From 10ceeb6163ab85da29d2625f8749352e53dd5f7c Mon Sep 17 00:00:00 2001 From: akyyy <35933136+akyyy@users.noreply.github.com> Date: Wed, 24 Oct 2018 15:23:32 -0700 Subject: [PATCH] Add stackdriver exporter support in pkg. (#126) * update dep golang.org/x/net * Add stackdriver exporter support in pkg * fix race and tests * Use thread safe newMetricsExporter in tests * address cr comments * minor fix * checkpoint * Address cr comments * add test for UpdateExporterFromConfigMap * Move lock position * add a test case for stackdriver error check * minor change * removing one test case since it seems app default creds exists for presubmit tests * replace promSrvChan with promSrv as cr suggsted * use a local var server for ListenAndServe * add getter and setter for shared variables * add func isMetricsConfigChanged and test * address cr comments * Update opencensus-go-exporter-stackdriver to v0.7.0 * Revert "Update opencensus-go-exporter-stackdriver to v0.7.0" This reverts commit 9825f641f885f420c8e162357e2ef315165ca02d. * run update-deps.sh * merge conflict * run update-deps.sh * Update Gopkg.toml so it doesn't have stackdriver exporter * remove regex for metrics domain at pkg repo per cr comments * merge conflict --- Gopkg.lock | 286 +- metrics/OWNERS | 6 + metrics/config.go | 123 + metrics/config_test.go | 207 + metrics/exporter.go | 149 + metrics/exporter_test.go | 121 + vendor/cloud.google.com/go/AUTHORS | 15 + vendor/cloud.google.com/go/CONTRIBUTORS | 40 + vendor/cloud.google.com/go/LICENSE | 202 + .../go/compute/metadata/metadata.go | 503 ++ .../go/internal/version/version.go | 71 + .../monitoring/apiv3/alert_policy_client.go | 279 ++ .../go/monitoring/apiv3/doc.go | 51 + .../go/monitoring/apiv3/group_client.go | 362 ++ .../go/monitoring/apiv3/metric_client.go | 453 ++ .../apiv3/notification_channel_client.go | 376 ++ .../go/monitoring/apiv3/path_funcs.go | 107 + .../monitoring/apiv3/uptime_check_client.go | 362 ++ vendor/cloud.google.com/go/trace/apiv2/doc.go | 51 + .../go/trace/apiv2/path_funcs.go | 43 + .../go/trace/apiv2/trace_client.go | 153 + .../exporter/stackdriver/AUTHORS | 1 + .../exporter/stackdriver/LICENSE | 202 + .../exporter/stackdriver/label.go | 33 + .../aws_identity_doc_utils.go | 53 + .../monitoredresource/gcp_metadata_config.go | 90 + .../monitoredresource/monitored_resources.go | 217 + .../exporter/stackdriver/sanitize.go | 50 + .../exporter/stackdriver/stackdriver.go | 278 ++ .../exporter/stackdriver/stats.go | 471 ++ .../exporter/stackdriver/trace.go | 171 + .../exporter/stackdriver/trace_proto.go | 277 ++ vendor/github.com/aws/aws-sdk-go/LICENSE.txt | 202 + vendor/github.com/aws/aws-sdk-go/NOTICE.txt | 3 + .../aws/aws-sdk-go/aws/awserr/error.go | 145 + .../aws/aws-sdk-go/aws/awserr/types.go | 194 + .../aws/aws-sdk-go/aws/awsutil/copy.go | 108 + .../aws/aws-sdk-go/aws/awsutil/equal.go | 27 + .../aws/aws-sdk-go/aws/awsutil/path_value.go | 222 + .../aws/aws-sdk-go/aws/awsutil/prettify.go | 113 + .../aws-sdk-go/aws/awsutil/string_value.go | 89 + .../aws/aws-sdk-go/aws/client/client.go | 96 + .../aws-sdk-go/aws/client/default_retryer.go | 116 + .../aws/aws-sdk-go/aws/client/logger.go | 184 + .../aws/client/metadata/client_info.go | 13 + .../github.com/aws/aws-sdk-go/aws/config.go | 492 ++ .../github.com/aws/aws-sdk-go/aws/context.go | 71 + .../aws/aws-sdk-go/aws/context_1_6.go | 41 + .../aws/aws-sdk-go/aws/context_1_7.go | 9 + .../aws/aws-sdk-go/aws/convert_types.go | 387 ++ .../aws-sdk-go/aws/corehandlers/handlers.go | 228 + .../aws/corehandlers/param_validator.go | 17 + .../aws-sdk-go/aws/corehandlers/user_agent.go | 37 + .../aws/credentials/chain_provider.go | 102 + .../aws-sdk-go/aws/credentials/credentials.go | 259 + .../ec2rolecreds/ec2_role_provider.go | 178 + .../aws/credentials/endpointcreds/provider.go | 198 + .../aws/credentials/env_provider.go | 78 + .../shared_credentials_provider.go | 150 + .../aws/credentials/static_provider.go | 57 + .../stscreds/assume_role_provider.go | 298 ++ .../github.com/aws/aws-sdk-go/aws/csm/doc.go | 46 + .../aws/aws-sdk-go/aws/csm/enable.go | 67 + .../aws/aws-sdk-go/aws/csm/metric.go | 53 + .../aws/aws-sdk-go/aws/csm/metric_chan.go | 54 + .../aws/aws-sdk-go/aws/csm/reporter.go | 242 + .../aws/aws-sdk-go/aws/defaults/defaults.go | 207 + .../aws-sdk-go/aws/defaults/shared_config.go | 27 + vendor/github.com/aws/aws-sdk-go/aws/doc.go | 56 + .../aws/aws-sdk-go/aws/ec2metadata/api.go | 162 + .../aws/aws-sdk-go/aws/ec2metadata/service.go | 149 + .../aws/aws-sdk-go/aws/endpoints/decode.go | 155 + .../aws/aws-sdk-go/aws/endpoints/defaults.go | 3328 +++++++++++++ .../aws/aws-sdk-go/aws/endpoints/doc.go | 66 + .../aws/aws-sdk-go/aws/endpoints/endpoints.go | 449 ++ .../aws/aws-sdk-go/aws/endpoints/v3model.go | 307 ++ .../aws/endpoints/v3model_codegen.go | 337 ++ .../github.com/aws/aws-sdk-go/aws/errors.go | 17 + .../aws/aws-sdk-go/aws/jsonvalue.go | 12 + .../github.com/aws/aws-sdk-go/aws/logger.go | 118 + .../aws/request/connection_reset_error.go | 19 + .../request/connection_reset_error_other.go | 11 + .../aws/aws-sdk-go/aws/request/handlers.go | 274 + .../aws-sdk-go/aws/request/http_request.go | 24 + .../aws-sdk-go/aws/request/offset_reader.go | 60 + .../aws/aws-sdk-go/aws/request/request.go | 657 +++ .../aws/aws-sdk-go/aws/request/request_1_7.go | 39 + .../aws/aws-sdk-go/aws/request/request_1_8.go | 33 + .../aws-sdk-go/aws/request/request_context.go | 14 + .../aws/request/request_context_1_6.go | 14 + .../aws/request/request_pagination.go | 264 + .../aws/aws-sdk-go/aws/request/retryer.go | 161 + .../aws/request/timeout_read_closer.go | 94 + .../aws/aws-sdk-go/aws/request/validation.go | 261 + .../aws/aws-sdk-go/aws/request/waiter.go | 295 ++ .../aws/aws-sdk-go/aws/session/doc.go | 273 + .../aws/aws-sdk-go/aws/session/env_config.go | 219 + .../aws/aws-sdk-go/aws/session/session.go | 628 +++ .../aws-sdk-go/aws/session/shared_config.go | 295 ++ .../aws-sdk-go/aws/signer/v4/header_rules.go | 82 + .../aws/aws-sdk-go/aws/signer/v4/options.go | 7 + .../aws/aws-sdk-go/aws/signer/v4/uri_path.go | 24 + .../aws/aws-sdk-go/aws/signer/v4/v4.go | 796 +++ vendor/github.com/aws/aws-sdk-go/aws/types.go | 201 + vendor/github.com/aws/aws-sdk-go/aws/url.go | 12 + .../github.com/aws/aws-sdk-go/aws/url_1_7.go | 29 + .../github.com/aws/aws-sdk-go/aws/version.go | 8 + .../aws/aws-sdk-go/internal/sdkio/io_go1.6.go | 10 + .../aws/aws-sdk-go/internal/sdkio/io_go1.7.go | 12 + .../internal/sdkrand/locked_source.go | 29 + .../aws/aws-sdk-go/internal/sdkuri/path.go | 23 + .../internal/shareddefaults/shared_config.go | 40 + .../aws/aws-sdk-go/private/protocol/host.go | 21 + .../private/protocol/idempotency.go | 75 + .../aws-sdk-go/private/protocol/jsonvalue.go | 76 + .../aws-sdk-go/private/protocol/payload.go | 81 + .../private/protocol/query/build.go | 36 + .../protocol/query/queryutil/queryutil.go | 246 + .../private/protocol/query/unmarshal.go | 39 + .../private/protocol/query/unmarshal_error.go | 74 + .../aws-sdk-go/private/protocol/rest/build.go | 295 ++ .../private/protocol/rest/payload.go | 45 + .../private/protocol/rest/unmarshal.go | 225 + .../aws-sdk-go/private/protocol/timestamp.go | 72 + .../aws-sdk-go/private/protocol/unmarshal.go | 21 + .../private/protocol/xml/xmlutil/build.go | 306 ++ .../private/protocol/xml/xmlutil/unmarshal.go | 272 + .../protocol/xml/xmlutil/xml_to_struct.go | 148 + .../aws/aws-sdk-go/service/sts/api.go | 2398 +++++++++ .../aws-sdk-go/service/sts/customizations.go | 12 + .../aws/aws-sdk-go/service/sts/doc.go | 72 + .../aws/aws-sdk-go/service/sts/errors.go | 73 + .../aws/aws-sdk-go/service/sts/service.go | 95 + vendor/github.com/beorn7/perks/LICENSE | 20 + .../beorn7/perks/quantile/stream.go | 316 ++ vendor/github.com/go-ini/ini/LICENSE | 191 + vendor/github.com/go-ini/ini/error.go | 32 + vendor/github.com/go-ini/ini/file.go | 415 ++ vendor/github.com/go-ini/ini/ini.go | 215 + vendor/github.com/go-ini/ini/key.go | 751 +++ vendor/github.com/go-ini/ini/parser.go | 494 ++ vendor/github.com/go-ini/ini/section.go | 258 + vendor/github.com/go-ini/ini/struct.go | 512 ++ vendor/github.com/golang/protobuf/LICENSE | 3 - .../github.com/golang/protobuf/proto/clone.go | 46 +- .../golang/protobuf/proto/decode.go | 668 +-- .../golang/protobuf/proto/discard.go | 350 ++ .../golang/protobuf/proto/encode.go | 1207 +---- .../github.com/golang/protobuf/proto/equal.go | 30 +- .../golang/protobuf/proto/extensions.go | 208 +- .../github.com/golang/protobuf/proto/lib.go | 128 +- .../golang/protobuf/proto/message_set.go | 81 +- .../golang/protobuf/proto/pointer_reflect.go | 645 +-- .../golang/protobuf/proto/pointer_unsafe.go | 402 +- .../golang/protobuf/proto/properties.go | 438 +- .../golang/protobuf/proto/table_marshal.go | 2767 +++++++++++ .../golang/protobuf/proto/table_merge.go | 654 +++ .../golang/protobuf/proto/table_unmarshal.go | 2051 ++++++++ .../github.com/golang/protobuf/proto/text.go | 65 +- .../golang/protobuf/proto/text_parser.go | 83 +- .../protoc-gen-go/descriptor/descriptor.pb.go | 2812 +++++++++++ .../github.com/golang/protobuf/ptypes/any.go | 10 +- .../golang/protobuf/ptypes/any/any.pb.go | 51 +- .../protobuf/ptypes/duration/duration.pb.go | 53 +- .../golang/protobuf/ptypes/empty/empty.pb.go | 79 + .../protobuf/ptypes/struct/struct.pb.go | 450 ++ .../protobuf/ptypes/timestamp/timestamp.pb.go | 55 +- .../protobuf/ptypes/wrappers/wrappers.pb.go | 443 ++ vendor/github.com/googleapis/gax-go/LICENSE | 27 + .../googleapis/gax-go/call_option.go | 157 + vendor/github.com/googleapis/gax-go/gax.go | 40 + vendor/github.com/googleapis/gax-go/header.go | 24 + vendor/github.com/googleapis/gax-go/invoke.go | 90 + .../github.com/jmespath/go-jmespath/LICENSE | 13 + vendor/github.com/jmespath/go-jmespath/api.go | 49 + .../go-jmespath/astnodetype_string.go | 16 + .../jmespath/go-jmespath/functions.go | 842 ++++ .../jmespath/go-jmespath/interpreter.go | 418 ++ .../github.com/jmespath/go-jmespath/lexer.go | 420 ++ .../github.com/jmespath/go-jmespath/parser.go | 603 +++ .../jmespath/go-jmespath/toktype_string.go | 16 + .../github.com/jmespath/go-jmespath/util.go | 185 + .../golang_protobuf_extensions/LICENSE | 201 + .../golang_protobuf_extensions/NOTICE | 1 + .../pbutil/decode.go | 75 + .../golang_protobuf_extensions/pbutil/doc.go | 16 + .../pbutil/encode.go | 46 + .../prometheus/client_golang/AUTHORS.md | 18 + .../prometheus/client_golang/LICENSE | 201 + .../prometheus/client_golang/NOTICE | 23 + .../client_golang/prometheus/collector.go | 75 + .../client_golang/prometheus/counter.go | 172 + .../client_golang/prometheus/desc.go | 205 + .../client_golang/prometheus/doc.go | 181 + .../prometheus/expvar_collector.go | 119 + .../client_golang/prometheus/fnv.go | 29 + .../client_golang/prometheus/gauge.go | 140 + .../client_golang/prometheus/go_collector.go | 263 + .../client_golang/prometheus/histogram.go | 444 ++ .../client_golang/prometheus/http.go | 490 ++ .../client_golang/prometheus/metric.go | 166 + .../prometheus/process_collector.go | 142 + .../client_golang/prometheus/promhttp/http.go | 201 + .../client_golang/prometheus/registry.go | 806 +++ .../client_golang/prometheus/summary.go | 534 ++ .../client_golang/prometheus/untyped.go | 138 + .../client_golang/prometheus/value.go | 234 + .../client_golang/prometheus/vec.go | 404 ++ .../prometheus/client_model/LICENSE | 201 + .../github.com/prometheus/client_model/NOTICE | 5 + .../prometheus/client_model/go/metrics.pb.go | 629 +++ .../prometheus/client_model/ruby/LICENSE | 201 + vendor/github.com/prometheus/common/LICENSE | 201 + vendor/github.com/prometheus/common/NOTICE | 5 + .../prometheus/common/expfmt/decode.go | 429 ++ .../prometheus/common/expfmt/encode.go | 88 + .../prometheus/common/expfmt/expfmt.go | 38 + .../prometheus/common/expfmt/fuzz.go | 36 + .../prometheus/common/expfmt/text_create.go | 303 ++ .../prometheus/common/expfmt/text_parse.go | 757 +++ .../bitbucket.org/ww/goautoneg/autoneg.go | 162 + .../prometheus/common/model/alert.go | 136 + .../prometheus/common/model/fingerprinting.go | 105 + .../github.com/prometheus/common/model/fnv.go | 42 + .../prometheus/common/model/labels.go | 210 + .../prometheus/common/model/labelset.go | 169 + .../prometheus/common/model/metric.go | 103 + .../prometheus/common/model/model.go | 16 + .../prometheus/common/model/signature.go | 144 + .../prometheus/common/model/silence.go | 106 + .../prometheus/common/model/time.go | 264 + .../prometheus/common/model/value.go | 416 ++ vendor/github.com/prometheus/procfs/LICENSE | 201 + vendor/github.com/prometheus/procfs/NOTICE | 7 + .../github.com/prometheus/procfs/buddyinfo.go | 95 + vendor/github.com/prometheus/procfs/doc.go | 45 + vendor/github.com/prometheus/procfs/fs.go | 82 + .../prometheus/procfs/internal/util/parse.go | 59 + .../procfs/internal/util/sysreadfile_linux.go | 45 + vendor/github.com/prometheus/procfs/ipvs.go | 259 + vendor/github.com/prometheus/procfs/mdstat.go | 151 + .../prometheus/procfs/mountstats.go | 606 +++ .../github.com/prometheus/procfs/net_dev.go | 216 + .../github.com/prometheus/procfs/nfs/nfs.go | 263 + .../github.com/prometheus/procfs/nfs/parse.go | 317 ++ .../prometheus/procfs/nfs/parse_nfs.go | 67 + .../prometheus/procfs/nfs/parse_nfsd.go | 89 + vendor/github.com/prometheus/procfs/proc.go | 258 + .../github.com/prometheus/procfs/proc_io.go | 65 + .../prometheus/procfs/proc_limits.go | 150 + .../github.com/prometheus/procfs/proc_ns.go | 68 + .../github.com/prometheus/procfs/proc_stat.go | 188 + vendor/github.com/prometheus/procfs/stat.go | 232 + vendor/github.com/prometheus/procfs/xfrm.go | 187 + .../github.com/prometheus/procfs/xfs/parse.go | 330 ++ .../github.com/prometheus/procfs/xfs/xfs.go | 163 + .../exporter/prometheus/prometheus.go | 308 ++ .../go.opencensus.io/plugin/ocgrpc/client.go | 56 + .../plugin/ocgrpc/client_metrics.go | 116 + .../plugin/ocgrpc/client_stats_handler.go | 49 + vendor/go.opencensus.io/plugin/ocgrpc/doc.go | 19 + .../go.opencensus.io/plugin/ocgrpc/server.go | 80 + .../plugin/ocgrpc/server_metrics.go | 97 + .../plugin/ocgrpc/server_stats_handler.go | 63 + .../plugin/ocgrpc/stats_common.go | 205 + .../plugin/ocgrpc/trace_common.go | 107 + .../go.opencensus.io/plugin/ochttp/client.go | 97 + .../plugin/ochttp/client_stats.go | 125 + vendor/go.opencensus.io/plugin/ochttp/doc.go | 19 + .../plugin/ochttp/propagation/b3/b3.go | 123 + .../go.opencensus.io/plugin/ochttp/server.go | 230 + .../go.opencensus.io/plugin/ochttp/stats.go | 181 + .../go.opencensus.io/plugin/ochttp/trace.go | 199 + .../trace/propagation/propagation.go | 108 + vendor/golang.org/x/net/context/context.go | 2 + .../x/net/context/ctxhttp/ctxhttp.go | 74 + .../x/net/context/ctxhttp/ctxhttp_pre17.go | 147 + vendor/golang.org/x/net/http/httpguts/guts.go | 50 + .../{lex/httplex => http/httpguts}/httplex.go | 7 +- vendor/golang.org/x/net/http2/ciphers.go | 2 +- .../x/net/http2/client_conn_pool.go | 28 +- .../x/net/http2/configure_transport.go | 10 +- vendor/golang.org/x/net/http2/flow.go | 10 +- vendor/golang.org/x/net/http2/frame.go | 67 +- vendor/golang.org/x/net/http2/go111.go | 26 + vendor/golang.org/x/net/http2/go17.go | 15 + vendor/golang.org/x/net/http2/headermap.go | 20 +- vendor/golang.org/x/net/http2/hpack/encode.go | 2 +- vendor/golang.org/x/net/http2/hpack/hpack.go | 6 + .../golang.org/x/net/http2/hpack/huffman.go | 20 +- vendor/golang.org/x/net/http2/http2.go | 25 +- vendor/golang.org/x/net/http2/not_go111.go | 17 + vendor/golang.org/x/net/http2/not_go17.go | 8 + vendor/golang.org/x/net/http2/server.go | 171 +- vendor/golang.org/x/net/http2/transport.go | 576 ++- vendor/golang.org/x/net/http2/write.go | 13 +- vendor/golang.org/x/net/idna/idna.go | 126 +- vendor/golang.org/x/net/idna/tables.go | 4396 +++++++++-------- vendor/golang.org/x/net/idna/trieval.go | 17 +- .../x/net/internal/timeseries/timeseries.go | 525 ++ vendor/golang.org/x/net/trace/events.go | 532 ++ vendor/golang.org/x/net/trace/histogram.go | 365 ++ vendor/golang.org/x/net/trace/trace.go | 1111 +++++ vendor/golang.org/x/net/trace/trace_go16.go | 21 + vendor/golang.org/x/net/trace/trace_go17.go | 21 + vendor/golang.org/x/oauth2/AUTHORS | 3 + vendor/golang.org/x/oauth2/CONTRIBUTORS | 3 + vendor/golang.org/x/oauth2/LICENSE | 27 + .../golang.org/x/oauth2/google/appengine.go | 89 + .../x/oauth2/google/appengine_hook.go | 14 + .../x/oauth2/google/appengineflex_hook.go | 11 + vendor/golang.org/x/oauth2/google/default.go | 115 + vendor/golang.org/x/oauth2/google/doc_go19.go | 42 + .../x/oauth2/google/doc_not_go19.go | 43 + vendor/golang.org/x/oauth2/google/go19.go | 57 + vendor/golang.org/x/oauth2/google/google.go | 192 + vendor/golang.org/x/oauth2/google/jwt.go | 74 + vendor/golang.org/x/oauth2/google/not_go19.go | 54 + vendor/golang.org/x/oauth2/google/sdk.go | 201 + .../x/oauth2/internal/client_appengine.go | 13 + vendor/golang.org/x/oauth2/internal/doc.go | 6 + vendor/golang.org/x/oauth2/internal/oauth2.go | 37 + vendor/golang.org/x/oauth2/internal/token.go | 273 + .../golang.org/x/oauth2/internal/transport.go | 34 + vendor/golang.org/x/oauth2/jws/jws.go | 182 + vendor/golang.org/x/oauth2/jwt/jwt.go | 162 + vendor/golang.org/x/oauth2/oauth2.go | 362 ++ vendor/golang.org/x/oauth2/token.go | 175 + vendor/golang.org/x/oauth2/transport.go | 144 + .../golang.org/x/sync/semaphore/semaphore.go | 131 + vendor/google.golang.org/api/AUTHORS | 10 + vendor/google.golang.org/api/CONTRIBUTORS | 55 + vendor/google.golang.org/api/LICENSE | 27 + .../googleapi/internal/uritemplates/LICENSE | 18 + .../api/googleapi/transport/apikey.go | 38 + .../google.golang.org/api/internal/creds.go | 45 + vendor/google.golang.org/api/internal/pool.go | 59 + .../api/internal/settings.go | 81 + .../api/iterator/iterator.go | 231 + .../api/option/credentials_go19.go | 32 + .../api/option/credentials_notgo19.go | 32 + vendor/google.golang.org/api/option/option.go | 191 + .../api/support/bundler/bundler.go | 349 ++ .../google.golang.org/api/transport/dial.go | 49 + .../google.golang.org/api/transport/go19.go | 34 + .../api/transport/grpc/dial.go | 91 + .../api/transport/grpc/dial_appengine.go | 41 + .../api/transport/grpc/go18.go | 26 + .../api/transport/grpc/not_go18.go | 21 + .../api/transport/http/dial.go | 138 + .../api/transport/http/dial_appengine.go | 30 + .../api/transport/http/go18.go | 31 + .../http/internal/propagation/http.go | 96 + .../api/transport/http/not_go18.go | 21 + .../api/transport/not_go19.go | 34 + vendor/google.golang.org/appengine/LICENSE | 202 + .../google.golang.org/appengine/appengine.go | 113 + .../appengine/appengine_vm.go | 20 + vendor/google.golang.org/appengine/errors.go | 46 + .../google.golang.org/appengine/identity.go | 142 + .../appengine/internal/api.go | 660 +++ .../appengine/internal/api_classic.go | 169 + .../appengine/internal/api_common.go | 123 + .../appengine/internal/api_pre17.go | 682 +++ .../appengine/internal/app_id.go | 28 + .../app_identity/app_identity_service.pb.go | 611 +++ .../appengine/internal/base/api_base.pb.go | 308 ++ .../internal/datastore/datastore_v3.pb.go | 4367 ++++++++++++++++ .../appengine/internal/identity.go | 14 + .../appengine/internal/identity_classic.go | 57 + .../appengine/internal/identity_vm.go | 134 + .../appengine/internal/internal.go | 110 + .../appengine/internal/log/log_service.pb.go | 1313 +++++ .../appengine/internal/main.go | 15 + .../appengine/internal/main_vm.go | 48 + .../appengine/internal/metadata.go | 60 + .../internal/modules/modules_service.pb.go | 786 +++ .../appengine/internal/net.go | 56 + .../internal/remote_api/remote_api.pb.go | 361 ++ .../internal/socket/socket_service.pb.go | 2822 +++++++++++ .../appengine/internal/transaction.go | 115 + .../internal/urlfetch/urlfetch_service.pb.go | 527 ++ .../google.golang.org/appengine/namespace.go | 25 + .../google.golang.org/appengine/socket/doc.go | 10 + .../appengine/socket/socket_classic.go | 290 ++ .../appengine/socket/socket_vm.go | 64 + vendor/google.golang.org/appengine/timeout.go | 20 + .../appengine/urlfetch/urlfetch.go | 210 + vendor/google.golang.org/genproto/LICENSE | 202 + .../api/annotations/annotations.pb.go | 54 + .../googleapis/api/annotations/http.pb.go | 693 +++ .../api/distribution/distribution.pb.go | 632 +++ .../genproto/googleapis/api/label/label.pb.go | 139 + .../googleapis/api/metric/metric.pb.go | 397 ++ .../api/monitoredres/monitored_resource.pb.go | 294 ++ .../devtools/cloudtrace/v2/trace.pb.go | 1411 ++++++ .../devtools/cloudtrace/v2/tracing.pb.go | 227 + .../googleapis/monitoring/v3/alert.pb.go | 966 ++++ .../monitoring/v3/alert_service.pb.go | 672 +++ .../googleapis/monitoring/v3/common.pb.go | 898 ++++ .../monitoring/v3/dropped_labels.pb.go | 104 + .../googleapis/monitoring/v3/group.pb.go | 157 + .../monitoring/v3/group_service.pb.go | 948 ++++ .../googleapis/monitoring/v3/metric.pb.go | 234 + .../monitoring/v3/metric_service.pb.go | 1222 +++++ .../monitoring/v3/mutation_record.pb.go | 100 + .../monitoring/v3/notification.pb.go | 374 ++ .../monitoring/v3/notification_service.pb.go | 1319 +++++ .../monitoring/v3/span_context.pb.go | 99 + .../googleapis/monitoring/v3/uptime.pb.go | 969 ++++ .../monitoring/v3/uptime_service.pb.go | 793 +++ .../googleapis/rpc/status/status.pb.go | 159 + .../protobuf/field_mask/field_mask.pb.go | 281 ++ vendor/google.golang.org/grpc/AUTHORS | 1 + vendor/google.golang.org/grpc/LICENSE | 202 + vendor/google.golang.org/grpc/backoff.go | 38 + vendor/google.golang.org/grpc/balancer.go | 391 ++ .../grpc/balancer/balancer.go | 274 + .../grpc/balancer/base/balancer.go | 208 + .../grpc/balancer/base/base.go | 52 + .../grpc/balancer/roundrobin/roundrobin.go | 79 + .../grpc/balancer_conn_wrappers.go | 300 ++ .../grpc/balancer_v1_wrapper.go | 328 ++ vendor/google.golang.org/grpc/call.go | 74 + vendor/google.golang.org/grpc/clientconn.go | 1277 +++++ vendor/google.golang.org/grpc/codec.go | 50 + .../grpc/codes/code_string.go | 62 + vendor/google.golang.org/grpc/codes/codes.go | 197 + .../grpc/connectivity/connectivity.go | 72 + .../grpc/credentials/credentials.go | 293 ++ .../grpc/credentials/go16.go | 57 + .../grpc/credentials/go17.go | 59 + .../grpc/credentials/go18.go | 46 + .../grpc/credentials/go19.go | 35 + .../grpc/credentials/oauth/oauth.go | 173 + vendor/google.golang.org/grpc/dialoptions.go | 453 ++ vendor/google.golang.org/grpc/doc.go | 24 + .../grpc/encoding/encoding.go | 118 + .../grpc/encoding/proto/proto.go | 110 + vendor/google.golang.org/grpc/go16.go | 71 + vendor/google.golang.org/grpc/go17.go | 72 + .../google.golang.org/grpc/grpclog/grpclog.go | 126 + .../google.golang.org/grpc/grpclog/logger.go | 85 + .../grpc/grpclog/loggerv2.go | 195 + vendor/google.golang.org/grpc/interceptor.go | 77 + .../grpc/internal/backoff/backoff.go | 78 + .../grpc/internal/channelz/funcs.go | 573 +++ .../grpc/internal/channelz/types.go | 419 ++ .../grpc/internal/channelz/types_linux.go | 53 + .../grpc/internal/channelz/types_nonlinux.go | 38 + .../grpc/internal/channelz/util_linux_go19.go | 39 + .../channelz/util_nonlinux_pre_go19.go | 26 + .../grpc/internal/envconfig/envconfig.go | 35 + .../grpc/internal/grpcrand/grpcrand.go | 56 + .../grpc/internal/internal.go | 28 + .../grpc/internal/transport/bdp_estimator.go | 140 + .../grpc/internal/transport/controlbuf.go | 852 ++++ .../grpc/internal/transport/defaults.go | 49 + .../grpc/internal/transport/flowcontrol.go | 218 + .../grpc/internal/transport/go16.go | 52 + .../grpc/internal/transport/go17.go | 53 + .../grpc/internal/transport/handler_server.go | 449 ++ .../grpc/internal/transport/http2_client.go | 1337 +++++ .../grpc/internal/transport/http2_server.go | 1180 +++++ .../grpc/internal/transport/http_util.go | 613 +++ .../grpc/internal/transport/log.go | 44 + .../grpc/internal/transport/transport.go | 708 +++ .../grpc/keepalive/keepalive.go | 65 + .../grpc/metadata/metadata.go | 210 + .../grpc/naming/dns_resolver.go | 290 ++ vendor/google.golang.org/grpc/naming/go17.go | 34 + vendor/google.golang.org/grpc/naming/go18.go | 28 + .../google.golang.org/grpc/naming/naming.go | 69 + vendor/google.golang.org/grpc/peer/peer.go | 51 + .../google.golang.org/grpc/picker_wrapper.go | 180 + vendor/google.golang.org/grpc/pickfirst.go | 108 + vendor/google.golang.org/grpc/proxy.go | 130 + .../grpc/resolver/dns/dns_resolver.go | 397 ++ .../grpc/resolver/dns/go17.go | 35 + .../grpc/resolver/dns/go18.go | 29 + .../grpc/resolver/passthrough/passthrough.go | 57 + .../grpc/resolver/resolver.go | 158 + .../grpc/resolver_conn_wrapper.go | 158 + vendor/google.golang.org/grpc/rpc_util.go | 778 +++ vendor/google.golang.org/grpc/server.go | 1447 ++++++ .../google.golang.org/grpc/service_config.go | 358 ++ .../google.golang.org/grpc/stats/handlers.go | 64 + vendor/google.golang.org/grpc/stats/stats.go | 296 ++ vendor/google.golang.org/grpc/status/go16.go | 42 + vendor/google.golang.org/grpc/status/go17.go | 44 + .../google.golang.org/grpc/status/status.go | 189 + vendor/google.golang.org/grpc/stream.go | 1023 ++++ vendor/google.golang.org/grpc/tap/tap.go | 51 + vendor/google.golang.org/grpc/trace.go | 113 + vendor/google.golang.org/grpc/version.go | 22 + 495 files changed, 115584 insertions(+), 5624 deletions(-) create mode 100644 metrics/OWNERS create mode 100644 metrics/config.go create mode 100644 metrics/config_test.go create mode 100644 metrics/exporter.go create mode 100644 metrics/exporter_test.go create mode 100644 vendor/cloud.google.com/go/AUTHORS create mode 100644 vendor/cloud.google.com/go/CONTRIBUTORS create mode 100644 vendor/cloud.google.com/go/LICENSE create mode 100644 vendor/cloud.google.com/go/compute/metadata/metadata.go create mode 100644 vendor/cloud.google.com/go/internal/version/version.go create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/alert_policy_client.go create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/doc.go create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/group_client.go create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/metric_client.go create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/notification_channel_client.go create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/path_funcs.go create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/uptime_check_client.go create mode 100644 vendor/cloud.google.com/go/trace/apiv2/doc.go create mode 100644 vendor/cloud.google.com/go/trace/apiv2/path_funcs.go create mode 100644 vendor/cloud.google.com/go/trace/apiv2/trace_client.go create mode 100644 vendor/contrib.go.opencensus.io/exporter/stackdriver/AUTHORS create mode 100644 vendor/contrib.go.opencensus.io/exporter/stackdriver/LICENSE create mode 100644 vendor/contrib.go.opencensus.io/exporter/stackdriver/label.go create mode 100644 vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/aws_identity_doc_utils.go create mode 100644 vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/gcp_metadata_config.go create mode 100644 vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/monitored_resources.go create mode 100644 vendor/contrib.go.opencensus.io/exporter/stackdriver/sanitize.go create mode 100644 vendor/contrib.go.opencensus.io/exporter/stackdriver/stackdriver.go create mode 100644 vendor/contrib.go.opencensus.io/exporter/stackdriver/stats.go create mode 100644 vendor/contrib.go.opencensus.io/exporter/stackdriver/trace.go create mode 100644 vendor/contrib.go.opencensus.io/exporter/stackdriver/trace_proto.go create mode 100644 vendor/github.com/aws/aws-sdk-go/LICENSE.txt create mode 100644 vendor/github.com/aws/aws-sdk-go/NOTICE.txt create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/client/client.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/client/logger.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/config.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/context.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/context_1_6.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/context_1_7.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/convert_types.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/errors.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/logger.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_other.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/validation.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/session.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/types.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/url.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/version.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/host.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/errors.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/service.go create mode 100644 vendor/github.com/beorn7/perks/LICENSE create mode 100644 vendor/github.com/beorn7/perks/quantile/stream.go create mode 100644 vendor/github.com/go-ini/ini/LICENSE create mode 100644 vendor/github.com/go-ini/ini/error.go create mode 100644 vendor/github.com/go-ini/ini/file.go create mode 100644 vendor/github.com/go-ini/ini/ini.go create mode 100644 vendor/github.com/go-ini/ini/key.go create mode 100644 vendor/github.com/go-ini/ini/parser.go create mode 100644 vendor/github.com/go-ini/ini/section.go create mode 100644 vendor/github.com/go-ini/ini/struct.go create mode 100644 vendor/github.com/golang/protobuf/proto/discard.go create mode 100644 vendor/github.com/golang/protobuf/proto/table_marshal.go create mode 100644 vendor/github.com/golang/protobuf/proto/table_merge.go create mode 100644 vendor/github.com/golang/protobuf/proto/table_unmarshal.go create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go create mode 100644 vendor/github.com/googleapis/gax-go/LICENSE create mode 100644 vendor/github.com/googleapis/gax-go/call_option.go create mode 100644 vendor/github.com/googleapis/gax-go/gax.go create mode 100644 vendor/github.com/googleapis/gax-go/header.go create mode 100644 vendor/github.com/googleapis/gax-go/invoke.go create mode 100644 vendor/github.com/jmespath/go-jmespath/LICENSE create mode 100644 vendor/github.com/jmespath/go-jmespath/api.go create mode 100644 vendor/github.com/jmespath/go-jmespath/astnodetype_string.go create mode 100644 vendor/github.com/jmespath/go-jmespath/functions.go create mode 100644 vendor/github.com/jmespath/go-jmespath/interpreter.go create mode 100644 vendor/github.com/jmespath/go-jmespath/lexer.go create mode 100644 vendor/github.com/jmespath/go-jmespath/parser.go create mode 100644 vendor/github.com/jmespath/go-jmespath/toktype_string.go create mode 100644 vendor/github.com/jmespath/go-jmespath/util.go create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go create mode 100644 vendor/github.com/prometheus/client_golang/AUTHORS.md create mode 100644 vendor/github.com/prometheus/client_golang/LICENSE create mode 100644 vendor/github.com/prometheus/client_golang/NOTICE create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/collector.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/counter.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/desc.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/doc.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/fnv.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/gauge.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/go_collector.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/histogram.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/http.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/metric.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/process_collector.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/registry.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/summary.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/untyped.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/value.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/vec.go create mode 100644 vendor/github.com/prometheus/client_model/LICENSE create mode 100644 vendor/github.com/prometheus/client_model/NOTICE create mode 100644 vendor/github.com/prometheus/client_model/go/metrics.pb.go create mode 100644 vendor/github.com/prometheus/client_model/ruby/LICENSE create mode 100644 vendor/github.com/prometheus/common/LICENSE create mode 100644 vendor/github.com/prometheus/common/NOTICE create mode 100644 vendor/github.com/prometheus/common/expfmt/decode.go create mode 100644 vendor/github.com/prometheus/common/expfmt/encode.go create mode 100644 vendor/github.com/prometheus/common/expfmt/expfmt.go create mode 100644 vendor/github.com/prometheus/common/expfmt/fuzz.go create mode 100644 vendor/github.com/prometheus/common/expfmt/text_create.go create mode 100644 vendor/github.com/prometheus/common/expfmt/text_parse.go create mode 100644 vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go create mode 100644 vendor/github.com/prometheus/common/model/alert.go create mode 100644 vendor/github.com/prometheus/common/model/fingerprinting.go create mode 100644 vendor/github.com/prometheus/common/model/fnv.go create mode 100644 vendor/github.com/prometheus/common/model/labels.go create mode 100644 vendor/github.com/prometheus/common/model/labelset.go create mode 100644 vendor/github.com/prometheus/common/model/metric.go create mode 100644 vendor/github.com/prometheus/common/model/model.go create mode 100644 vendor/github.com/prometheus/common/model/signature.go create mode 100644 vendor/github.com/prometheus/common/model/silence.go create mode 100644 vendor/github.com/prometheus/common/model/time.go create mode 100644 vendor/github.com/prometheus/common/model/value.go create mode 100644 vendor/github.com/prometheus/procfs/LICENSE create mode 100644 vendor/github.com/prometheus/procfs/NOTICE create mode 100644 vendor/github.com/prometheus/procfs/buddyinfo.go create mode 100644 vendor/github.com/prometheus/procfs/doc.go create mode 100644 vendor/github.com/prometheus/procfs/fs.go create mode 100644 vendor/github.com/prometheus/procfs/internal/util/parse.go create mode 100644 vendor/github.com/prometheus/procfs/internal/util/sysreadfile_linux.go create mode 100644 vendor/github.com/prometheus/procfs/ipvs.go create mode 100644 vendor/github.com/prometheus/procfs/mdstat.go create mode 100644 vendor/github.com/prometheus/procfs/mountstats.go create mode 100644 vendor/github.com/prometheus/procfs/net_dev.go create mode 100644 vendor/github.com/prometheus/procfs/nfs/nfs.go create mode 100644 vendor/github.com/prometheus/procfs/nfs/parse.go create mode 100644 vendor/github.com/prometheus/procfs/nfs/parse_nfs.go create mode 100644 vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go create mode 100644 vendor/github.com/prometheus/procfs/proc.go create mode 100644 vendor/github.com/prometheus/procfs/proc_io.go create mode 100644 vendor/github.com/prometheus/procfs/proc_limits.go create mode 100644 vendor/github.com/prometheus/procfs/proc_ns.go create mode 100644 vendor/github.com/prometheus/procfs/proc_stat.go create mode 100644 vendor/github.com/prometheus/procfs/stat.go create mode 100644 vendor/github.com/prometheus/procfs/xfrm.go create mode 100644 vendor/github.com/prometheus/procfs/xfs/parse.go create mode 100644 vendor/github.com/prometheus/procfs/xfs/xfs.go create mode 100644 vendor/go.opencensus.io/exporter/prometheus/prometheus.go create mode 100644 vendor/go.opencensus.io/plugin/ocgrpc/client.go create mode 100644 vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go create mode 100644 vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler.go create mode 100644 vendor/go.opencensus.io/plugin/ocgrpc/doc.go create mode 100644 vendor/go.opencensus.io/plugin/ocgrpc/server.go create mode 100644 vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go create mode 100644 vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler.go create mode 100644 vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go create mode 100644 vendor/go.opencensus.io/plugin/ocgrpc/trace_common.go create mode 100644 vendor/go.opencensus.io/plugin/ochttp/client.go create mode 100644 vendor/go.opencensus.io/plugin/ochttp/client_stats.go create mode 100644 vendor/go.opencensus.io/plugin/ochttp/doc.go create mode 100644 vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go create mode 100644 vendor/go.opencensus.io/plugin/ochttp/server.go create mode 100644 vendor/go.opencensus.io/plugin/ochttp/stats.go create mode 100644 vendor/go.opencensus.io/plugin/ochttp/trace.go create mode 100644 vendor/go.opencensus.io/trace/propagation/propagation.go create mode 100644 vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go create mode 100644 vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go create mode 100644 vendor/golang.org/x/net/http/httpguts/guts.go rename vendor/golang.org/x/net/{lex/httplex => http/httpguts}/httplex.go (97%) create mode 100644 vendor/golang.org/x/net/http2/go111.go create mode 100644 vendor/golang.org/x/net/http2/not_go111.go create mode 100644 vendor/golang.org/x/net/internal/timeseries/timeseries.go create mode 100644 vendor/golang.org/x/net/trace/events.go create mode 100644 vendor/golang.org/x/net/trace/histogram.go create mode 100644 vendor/golang.org/x/net/trace/trace.go create mode 100644 vendor/golang.org/x/net/trace/trace_go16.go create mode 100644 vendor/golang.org/x/net/trace/trace_go17.go create mode 100644 vendor/golang.org/x/oauth2/AUTHORS create mode 100644 vendor/golang.org/x/oauth2/CONTRIBUTORS create mode 100644 vendor/golang.org/x/oauth2/LICENSE create mode 100644 vendor/golang.org/x/oauth2/google/appengine.go create mode 100644 vendor/golang.org/x/oauth2/google/appengine_hook.go create mode 100644 vendor/golang.org/x/oauth2/google/appengineflex_hook.go create mode 100644 vendor/golang.org/x/oauth2/google/default.go create mode 100644 vendor/golang.org/x/oauth2/google/doc_go19.go create mode 100644 vendor/golang.org/x/oauth2/google/doc_not_go19.go create mode 100644 vendor/golang.org/x/oauth2/google/go19.go create mode 100644 vendor/golang.org/x/oauth2/google/google.go create mode 100644 vendor/golang.org/x/oauth2/google/jwt.go create mode 100644 vendor/golang.org/x/oauth2/google/not_go19.go create mode 100644 vendor/golang.org/x/oauth2/google/sdk.go create mode 100644 vendor/golang.org/x/oauth2/internal/client_appengine.go create mode 100644 vendor/golang.org/x/oauth2/internal/doc.go create mode 100644 vendor/golang.org/x/oauth2/internal/oauth2.go create mode 100644 vendor/golang.org/x/oauth2/internal/token.go create mode 100644 vendor/golang.org/x/oauth2/internal/transport.go create mode 100644 vendor/golang.org/x/oauth2/jws/jws.go create mode 100644 vendor/golang.org/x/oauth2/jwt/jwt.go create mode 100644 vendor/golang.org/x/oauth2/oauth2.go create mode 100644 vendor/golang.org/x/oauth2/token.go create mode 100644 vendor/golang.org/x/oauth2/transport.go create mode 100644 vendor/golang.org/x/sync/semaphore/semaphore.go create mode 100644 vendor/google.golang.org/api/AUTHORS create mode 100644 vendor/google.golang.org/api/CONTRIBUTORS create mode 100644 vendor/google.golang.org/api/LICENSE create mode 100644 vendor/google.golang.org/api/googleapi/internal/uritemplates/LICENSE create mode 100644 vendor/google.golang.org/api/googleapi/transport/apikey.go create mode 100644 vendor/google.golang.org/api/internal/creds.go create mode 100644 vendor/google.golang.org/api/internal/pool.go create mode 100644 vendor/google.golang.org/api/internal/settings.go create mode 100644 vendor/google.golang.org/api/iterator/iterator.go create mode 100644 vendor/google.golang.org/api/option/credentials_go19.go create mode 100644 vendor/google.golang.org/api/option/credentials_notgo19.go create mode 100644 vendor/google.golang.org/api/option/option.go create mode 100644 vendor/google.golang.org/api/support/bundler/bundler.go create mode 100644 vendor/google.golang.org/api/transport/dial.go create mode 100644 vendor/google.golang.org/api/transport/go19.go create mode 100644 vendor/google.golang.org/api/transport/grpc/dial.go create mode 100644 vendor/google.golang.org/api/transport/grpc/dial_appengine.go create mode 100644 vendor/google.golang.org/api/transport/grpc/go18.go create mode 100644 vendor/google.golang.org/api/transport/grpc/not_go18.go create mode 100644 vendor/google.golang.org/api/transport/http/dial.go create mode 100644 vendor/google.golang.org/api/transport/http/dial_appengine.go create mode 100644 vendor/google.golang.org/api/transport/http/go18.go create mode 100644 vendor/google.golang.org/api/transport/http/internal/propagation/http.go create mode 100644 vendor/google.golang.org/api/transport/http/not_go18.go create mode 100644 vendor/google.golang.org/api/transport/not_go19.go create mode 100644 vendor/google.golang.org/appengine/LICENSE create mode 100644 vendor/google.golang.org/appengine/appengine.go create mode 100644 vendor/google.golang.org/appengine/appengine_vm.go create mode 100644 vendor/google.golang.org/appengine/errors.go create mode 100644 vendor/google.golang.org/appengine/identity.go create mode 100644 vendor/google.golang.org/appengine/internal/api.go create mode 100644 vendor/google.golang.org/appengine/internal/api_classic.go create mode 100644 vendor/google.golang.org/appengine/internal/api_common.go create mode 100644 vendor/google.golang.org/appengine/internal/api_pre17.go create mode 100644 vendor/google.golang.org/appengine/internal/app_id.go create mode 100644 vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go create mode 100644 vendor/google.golang.org/appengine/internal/base/api_base.pb.go create mode 100644 vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go create mode 100644 vendor/google.golang.org/appengine/internal/identity.go create mode 100644 vendor/google.golang.org/appengine/internal/identity_classic.go create mode 100644 vendor/google.golang.org/appengine/internal/identity_vm.go create mode 100644 vendor/google.golang.org/appengine/internal/internal.go create mode 100644 vendor/google.golang.org/appengine/internal/log/log_service.pb.go create mode 100644 vendor/google.golang.org/appengine/internal/main.go create mode 100644 vendor/google.golang.org/appengine/internal/main_vm.go create mode 100644 vendor/google.golang.org/appengine/internal/metadata.go create mode 100644 vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go create mode 100644 vendor/google.golang.org/appengine/internal/net.go create mode 100644 vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go create mode 100644 vendor/google.golang.org/appengine/internal/socket/socket_service.pb.go create mode 100644 vendor/google.golang.org/appengine/internal/transaction.go create mode 100644 vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go create mode 100644 vendor/google.golang.org/appengine/namespace.go create mode 100644 vendor/google.golang.org/appengine/socket/doc.go create mode 100644 vendor/google.golang.org/appengine/socket/socket_classic.go create mode 100644 vendor/google.golang.org/appengine/socket/socket_vm.go create mode 100644 vendor/google.golang.org/appengine/timeout.go create mode 100644 vendor/google.golang.org/appengine/urlfetch/urlfetch.go create mode 100644 vendor/google.golang.org/genproto/LICENSE create mode 100644 vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/distribution/distribution.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/label/label.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/metric/metric.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/monitoredres/monitored_resource.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/devtools/cloudtrace/v2/trace.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/devtools/cloudtrace/v2/tracing.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/monitoring/v3/alert.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/monitoring/v3/alert_service.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/monitoring/v3/common.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/monitoring/v3/dropped_labels.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/monitoring/v3/group.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/monitoring/v3/group_service.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/monitoring/v3/metric.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/monitoring/v3/metric_service.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/monitoring/v3/mutation_record.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/monitoring/v3/notification.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/monitoring/v3/notification_service.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/monitoring/v3/span_context.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/monitoring/v3/uptime.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/monitoring/v3/uptime_service.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go create mode 100644 vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.pb.go create mode 100644 vendor/google.golang.org/grpc/AUTHORS create mode 100644 vendor/google.golang.org/grpc/LICENSE create mode 100644 vendor/google.golang.org/grpc/backoff.go create mode 100644 vendor/google.golang.org/grpc/balancer.go create mode 100644 vendor/google.golang.org/grpc/balancer/balancer.go create mode 100644 vendor/google.golang.org/grpc/balancer/base/balancer.go create mode 100644 vendor/google.golang.org/grpc/balancer/base/base.go create mode 100644 vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go create mode 100644 vendor/google.golang.org/grpc/balancer_conn_wrappers.go create mode 100644 vendor/google.golang.org/grpc/balancer_v1_wrapper.go create mode 100644 vendor/google.golang.org/grpc/call.go create mode 100644 vendor/google.golang.org/grpc/clientconn.go create mode 100644 vendor/google.golang.org/grpc/codec.go create mode 100644 vendor/google.golang.org/grpc/codes/code_string.go create mode 100644 vendor/google.golang.org/grpc/codes/codes.go create mode 100644 vendor/google.golang.org/grpc/connectivity/connectivity.go create mode 100644 vendor/google.golang.org/grpc/credentials/credentials.go create mode 100644 vendor/google.golang.org/grpc/credentials/go16.go create mode 100644 vendor/google.golang.org/grpc/credentials/go17.go create mode 100644 vendor/google.golang.org/grpc/credentials/go18.go create mode 100644 vendor/google.golang.org/grpc/credentials/go19.go create mode 100644 vendor/google.golang.org/grpc/credentials/oauth/oauth.go create mode 100644 vendor/google.golang.org/grpc/dialoptions.go create mode 100644 vendor/google.golang.org/grpc/doc.go create mode 100644 vendor/google.golang.org/grpc/encoding/encoding.go create mode 100644 vendor/google.golang.org/grpc/encoding/proto/proto.go create mode 100644 vendor/google.golang.org/grpc/go16.go create mode 100644 vendor/google.golang.org/grpc/go17.go create mode 100644 vendor/google.golang.org/grpc/grpclog/grpclog.go create mode 100644 vendor/google.golang.org/grpc/grpclog/logger.go create mode 100644 vendor/google.golang.org/grpc/grpclog/loggerv2.go create mode 100644 vendor/google.golang.org/grpc/interceptor.go create mode 100644 vendor/google.golang.org/grpc/internal/backoff/backoff.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/funcs.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/types.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/types_linux.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/util_linux_go19.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/util_nonlinux_pre_go19.go create mode 100644 vendor/google.golang.org/grpc/internal/envconfig/envconfig.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go create mode 100644 vendor/google.golang.org/grpc/internal/internal.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/controlbuf.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/defaults.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/flowcontrol.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/go16.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/go17.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/handler_server.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/http2_client.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/http2_server.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/http_util.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/log.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/transport.go create mode 100644 vendor/google.golang.org/grpc/keepalive/keepalive.go create mode 100644 vendor/google.golang.org/grpc/metadata/metadata.go create mode 100644 vendor/google.golang.org/grpc/naming/dns_resolver.go create mode 100644 vendor/google.golang.org/grpc/naming/go17.go create mode 100644 vendor/google.golang.org/grpc/naming/go18.go create mode 100644 vendor/google.golang.org/grpc/naming/naming.go create mode 100644 vendor/google.golang.org/grpc/peer/peer.go create mode 100644 vendor/google.golang.org/grpc/picker_wrapper.go create mode 100644 vendor/google.golang.org/grpc/pickfirst.go create mode 100644 vendor/google.golang.org/grpc/proxy.go create mode 100644 vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go create mode 100644 vendor/google.golang.org/grpc/resolver/dns/go17.go create mode 100644 vendor/google.golang.org/grpc/resolver/dns/go18.go create mode 100644 vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go create mode 100644 vendor/google.golang.org/grpc/resolver/resolver.go create mode 100644 vendor/google.golang.org/grpc/resolver_conn_wrapper.go create mode 100644 vendor/google.golang.org/grpc/rpc_util.go create mode 100644 vendor/google.golang.org/grpc/server.go create mode 100644 vendor/google.golang.org/grpc/service_config.go create mode 100644 vendor/google.golang.org/grpc/stats/handlers.go create mode 100644 vendor/google.golang.org/grpc/stats/stats.go create mode 100644 vendor/google.golang.org/grpc/status/go16.go create mode 100644 vendor/google.golang.org/grpc/status/go17.go create mode 100644 vendor/google.golang.org/grpc/status/status.go create mode 100644 vendor/google.golang.org/grpc/stream.go create mode 100644 vendor/google.golang.org/grpc/tap/tap.go create mode 100644 vendor/google.golang.org/grpc/trace.go create mode 100644 vendor/google.golang.org/grpc/version.go diff --git a/Gopkg.lock b/Gopkg.lock index b48ae0e18..0f1490be8 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -1,6 +1,74 @@ # This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. +[[projects]] + digest = "1:31aae90efc63302dd03fbb716a3575f1c3c5ed6eb67de4e726c62e18bbb4eb54" + name = "cloud.google.com/go" + packages = [ + "compute/metadata", + "internal/version", + "monitoring/apiv3", + "trace/apiv2", + ] + pruneopts = "NUT" + revision = "dfffe386c33fb24c34ee501e5723df5b97b98514" + version = "v0.30.0" + +[[projects]] + digest = "1:631d6f34f86c4e4cb42a37b611f25bfb4f90c5f443439eb73a9aa4ee7bc557a3" + name = "contrib.go.opencensus.io/exporter/stackdriver" + packages = [ + ".", + "monitoredresource", + ] + pruneopts = "NUT" + revision = "2b93072101d466aa4120b3c23c2e1b08af01541c" + version = "v0.6.0" + +[[projects]] + digest = "1:2efd6e212ff92892a7751ba13a058aec7c1f50e49fb207d08eadb58644b49580" + name = "github.com/aws/aws-sdk-go" + packages = [ + "aws", + "aws/awserr", + "aws/awsutil", + "aws/client", + "aws/client/metadata", + "aws/corehandlers", + "aws/credentials", + "aws/credentials/ec2rolecreds", + "aws/credentials/endpointcreds", + "aws/credentials/stscreds", + "aws/csm", + "aws/defaults", + "aws/ec2metadata", + "aws/endpoints", + "aws/request", + "aws/session", + "aws/signer/v4", + "internal/sdkio", + "internal/sdkrand", + "internal/sdkuri", + "internal/shareddefaults", + "private/protocol", + "private/protocol/query", + "private/protocol/query/queryutil", + "private/protocol/rest", + "private/protocol/xml/xmlutil", + "service/sts", + ] + pruneopts = "NUT" + revision = "46fe04784968cd7b54e859fcce45753456f736ac" + version = "v1.15.52" + +[[projects]] + branch = "master" + digest = "1:707ebe952a8b3d00b343c01536c79c73771d100f63ec6babeaed5c79e2b8a8dd" + name = "github.com/beorn7/perks" + packages = ["quantile"] + pruneopts = "NUT" + revision = "3a771d992973f24aa725d07868b467d1ddfceafb" + [[projects]] digest = "1:6b21090f60571b20b3ddc2c8e48547dffcf409498ed6002c2cada023725ed377" name = "github.com/davecgh/go-spew" @@ -15,6 +83,14 @@ pruneopts = "NUT" revision = "73d445a93680fa1a78ae23a5839bad48f32ba1ee" +[[projects]] + digest = "1:d32823ccbd16481c42356a1ae7f2284761da19cae3131b554a0b7c086a650292" + name = "github.com/go-ini/ini" + packages = ["."] + pruneopts = "NUT" + revision = "7b294651033cd7d9e7f0d9ffa1b75ed1e198e737" + version = "v1.38.3" + [[projects]] digest = "1:acaf854c7e302a7e13a665d00e6579fb72ead2ecd4932286deeac94ad3070fe7" name = "github.com/gobuffalo/envy" @@ -41,17 +117,22 @@ revision = "44145f04b68cf362d9c4df2182967c2275eaefed" [[projects]] - digest = "1:0d390d7037c2aecc37e78c2cfbe43d020d6f1fa83fd22266b7ec621189447d57" + digest = "1:4dacf728c83400b3e9d1d3025dd3c1e93e9a1b033726d1b193dc209f3fa9cb7a" name = "github.com/golang/protobuf" packages = [ "proto", + "protoc-gen-go/descriptor", "ptypes", "ptypes/any", "ptypes/duration", + "ptypes/empty", + "ptypes/struct", "ptypes/timestamp", + "ptypes/wrappers", ] pruneopts = "NUT" - revision = "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" + revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5" + version = "v1.2.0" [[projects]] branch = "master" @@ -82,6 +163,14 @@ pruneopts = "NUT" revision = "44d81051d367757e1c7c6a5a86423ece9afcf63c" +[[projects]] + digest = "1:fe852c57b4fc4d11e6ef79bce1e930ee2f2f7d148b370afef8f8d012a80960ea" + name = "github.com/googleapis/gax-go" + packages = ["."] + pruneopts = "NUT" + revision = "317e0006254c44a0ac427cc52a0e083ff0b9622f" + version = "v2.0.0" + [[projects]] digest = "1:27b4ab41ffdc76ad6db56db327a4db234a59588ef059fc3fd678ba0bc6b9094f" name = "github.com/googleapis/gnostic" @@ -122,6 +211,13 @@ revision = "9f23e2d6bd2a77f959b2bf6acdbefd708a83a4a4" version = "v0.3.6" +[[projects]] + digest = "1:ac6d01547ec4f7f673311b4663909269bfb8249952de3279799289467837c3cc" + name = "github.com/jmespath/go-jmespath" + packages = ["."] + pruneopts = "NUT" + revision = "0b12d6b5" + [[projects]] digest = "1:da62aa6632d04e080b8a8b85a59ed9ed1550842a0099a55f3ae3a20d02a3745a" name = "github.com/joho/godotenv" @@ -161,6 +257,14 @@ pruneopts = "NUT" revision = "81af80346b1a01caae0cbc27fd3c1ba5b11e189f" +[[projects]] + digest = "1:5985ef4caf91ece5d54817c11ea25f182697534f8ae6521eadcd628c142ac4b6" + name = "github.com/matttproud/golang_protobuf_extensions" + packages = ["pbutil"] + pruneopts = "NUT" + revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c" + version = "v1.0.1" + [[projects]] digest = "1:2f42fa12d6911c7b7659738758631bec870b7e9b4c6be5444f963cdcfccc191f" name = "github.com/modern-go/concurrent" @@ -193,6 +297,50 @@ revision = "5f041e8faa004a95c88a202771f4cc3e991971e6" version = "v2.0.1" +[[projects]] + digest = "1:03bca087b180bf24c4f9060775f137775550a0834e18f0bca0520a868679dbd7" + name = "github.com/prometheus/client_golang" + packages = [ + "prometheus", + "prometheus/promhttp", + ] + pruneopts = "NUT" + revision = "c5b7fccd204277076155f10851dad72b76a49317" + version = "v0.8.0" + +[[projects]] + branch = "master" + digest = "1:2d5cd61daa5565187e1d96bae64dbbc6080dacf741448e9629c64fd93203b0d4" + name = "github.com/prometheus/client_model" + packages = ["go"] + pruneopts = "NUT" + revision = "5c3871d89910bfb32f5fcab2aa4b9ec68e65a99f" + +[[projects]] + branch = "master" + digest = "1:fad5a35eea6a1a33d6c8f949fbc146f24275ca809ece854248187683f52cc30b" + name = "github.com/prometheus/common" + packages = [ + "expfmt", + "internal/bitbucket.org/ww/goautoneg", + "model", + ] + pruneopts = "NUT" + revision = "c7de2306084e37d54b8be01f3541a8464345e9a5" + +[[projects]] + branch = "master" + digest = "1:102dea0c03a915acfc634b7c67f2662012b5483b56d9025e33f5188e112759b6" + name = "github.com/prometheus/procfs" + packages = [ + ".", + "internal/util", + "nfs", + "xfs", + ] + pruneopts = "NUT" + revision = "185b4288413d2a0dd0806f78c90dde719829e5ae" + [[projects]] digest = "1:e3707aeaccd2adc89eba6c062fec72116fe1fc1ba71097da85b4d8ae1668a675" name = "github.com/spf13/pflag" @@ -202,18 +350,23 @@ version = "v1.0.2" [[projects]] - digest = "1:f7954666f899cd89667f18da4c77458aae832e1935b056b5fe5bcf372cb3386a" + digest = "1:46928a6f812b22398d0599b069e8eeed11603905b0a77d8ce8f711821fbb9824" name = "go.opencensus.io" packages = [ ".", + "exporter/prometheus", "internal", "internal/tagencoding", + "plugin/ocgrpc", + "plugin/ochttp", + "plugin/ochttp/propagation/b3", "stats", "stats/internal", "stats/view", "tag", "trace", "trace/internal", + "trace/propagation", ] pruneopts = "NUT" revision = "e262766cd0d230a1bb7c37281e345e465f19b41b" @@ -259,23 +412,44 @@ revision = "81e90905daefcd6fd217b62423c0908922eadb30" [[projects]] - digest = "1:866122db0f3007816dccd5b50ba0b3ae593bdadb81f725a17b95c8c6953c5cb7" + branch = "master" + digest = "1:3033eba8bb0c8f2c6720e68e4c14e55b577ae9debb5f5b7b8cc6f319d89edc82" name = "golang.org/x/net" packages = [ "context", + "context/ctxhttp", + "http/httpguts", "http2", "http2/hpack", "idna", - "lex/httplex", + "internal/timeseries", + "trace", ] pruneopts = "NUT" - revision = "1c05540f6879653db88113bc4a2b70aec4bd491f" + revision = "49bb7cea24b1df9410e1712aa6433dae904ff66a" [[projects]] branch = "master" - digest = "1:39ebcc2b11457b703ae9ee2e8cca0f68df21969c6102cb3b705f76cca0ea0239" + digest = "1:dcb89c032286a9c3c5118a1496f8e0e237c1437f5356ac9602f6fdef560a5c21" + name = "golang.org/x/oauth2" + packages = [ + ".", + "google", + "internal", + "jws", + "jwt", + ] + pruneopts = "NUT" + revision = "c57b0facaced709681d9f90397429b9430a74754" + +[[projects]] + branch = "master" + digest = "1:c313aef534e493304f3666fbd24dca5932ebf776a82b7a40f961c9355794a1b1" name = "golang.org/x/sync" - packages = ["errgroup"] + packages = [ + "errgroup", + "semaphore", + ] pruneopts = "NUT" revision = "1d60e4601c6fd243af51cc01ddf169918a5407ca" @@ -332,6 +506,99 @@ pruneopts = "NUT" revision = "bfb5194568d3c40db30de765edc44cae9fc94671" +[[projects]] + branch = "master" + digest = "1:7689634b1a2940f3e725a37a7598b5462674a5b016b17d8ce22c8f71cacb0b34" + name = "google.golang.org/api" + packages = [ + "googleapi/transport", + "internal", + "iterator", + "option", + "support/bundler", + "transport", + "transport/grpc", + "transport/http", + "transport/http/internal/propagation", + ] + pruneopts = "NUT" + revision = "ce4acf611b3920b111e21272a15ddaea10c1fd2e" + +[[projects]] + digest = "1:300989288fc84e64d2230de8ece7a3f8aa8f5a688e75e26a186df1cba6b8cb5b" + name = "google.golang.org/appengine" + packages = [ + ".", + "internal", + "internal/app_identity", + "internal/base", + "internal/datastore", + "internal/log", + "internal/modules", + "internal/remote_api", + "internal/socket", + "internal/urlfetch", + "socket", + "urlfetch", + ] + pruneopts = "NUT" + revision = "ae0ab99deb4dc413a2b4bd6c8bdd0eb67f1e4d06" + version = "v1.2.0" + +[[projects]] + branch = "master" + digest = "1:57005092fa4440ad8b606ba030326b499f27ab920ca645da114b1c4262bb4502" + name = "google.golang.org/genproto" + packages = [ + "googleapis/api/annotations", + "googleapis/api/distribution", + "googleapis/api/label", + "googleapis/api/metric", + "googleapis/api/monitoredres", + "googleapis/devtools/cloudtrace/v2", + "googleapis/monitoring/v3", + "googleapis/rpc/status", + "protobuf/field_mask", + ] + pruneopts = "NUT" + revision = "af9cb2a35e7f169ec875002c1829c9b315cddc04" + +[[projects]] + digest = "1:83db4286030add91301b4fd9cc3b06497683691223937b47510a741559bc07ef" + name = "google.golang.org/grpc" + packages = [ + ".", + "balancer", + "balancer/base", + "balancer/roundrobin", + "codes", + "connectivity", + "credentials", + "credentials/oauth", + "encoding", + "encoding/proto", + "grpclog", + "internal", + "internal/backoff", + "internal/channelz", + "internal/envconfig", + "internal/grpcrand", + "internal/transport", + "keepalive", + "metadata", + "naming", + "peer", + "resolver", + "resolver/dns", + "resolver/passthrough", + "stats", + "status", + "tap", + ] + pruneopts = "NUT" + revision = "8dea3dc473e90c8179e519d91302d0597c0ca1d1" + version = "v1.15.0" + [[projects]] digest = "1:ef72505cf098abdd34efeea032103377bec06abb61d8a06f002d5d296a4b1185" name = "gopkg.in/inf.v0" @@ -662,12 +929,14 @@ analyzer-name = "dep" analyzer-version = 1 input-imports = [ + "contrib.go.opencensus.io/exporter/stackdriver", "github.com/golang/glog", "github.com/google/go-cmp/cmp", "github.com/google/go-cmp/cmp/cmpopts", "github.com/knative/test-infra", "github.com/markbates/inflect", "github.com/mattbaird/jsonpatch", + "go.opencensus.io/exporter/prometheus", "go.opencensus.io/stats", "go.opencensus.io/stats/view", "go.opencensus.io/tag", @@ -676,6 +945,7 @@ "go.uber.org/zap/zapcore", "go.uber.org/zap/zaptest", "golang.org/x/sync/errgroup", + "google.golang.org/genproto/googleapis/api/monitoredres", "k8s.io/api/admission/v1beta1", "k8s.io/api/admissionregistration/v1beta1", "k8s.io/api/apps/v1", diff --git a/metrics/OWNERS b/metrics/OWNERS new file mode 100644 index 000000000..40668b6ab --- /dev/null +++ b/metrics/OWNERS @@ -0,0 +1,6 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- mdemirhan +- wuyao +- yanweiguo diff --git a/metrics/config.go b/metrics/config.go new file mode 100644 index 000000000..73e92b06b --- /dev/null +++ b/metrics/config.go @@ -0,0 +1,123 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "errors" + "fmt" + "strings" + + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" +) + +const ( + backendDestinationKey = "metrics.backend-destination" + stackdriverProjectIDKey = "metrics.stackdriver-project-id" +) + +type MetricsBackend string + +const ( + // The metrics backend is stackdriver + Stackdriver MetricsBackend = "stackdriver" + // The metrics backend is prometheus + Prometheus MetricsBackend = "prometheus" +) + +type metricsConfig struct { + // The metrics domain. e.g. "serving.knative.dev" or "build.knative.dev". + domain string + // The component that emits the metrics. e.g. "activator", "autoscaler". + component string + // The metrics backend destination. + backendDestination MetricsBackend + // The stackdriver project ID where the stats data are uploaded to. This is + // not the GCP project ID. + stackdriverProjectID string +} + +func getMetricsConfig(m map[string]string, domain string, component string, logger *zap.SugaredLogger) (*metricsConfig, error) { + var mc metricsConfig + backend, ok := m[backendDestinationKey] + if !ok { + return nil, errors.New("metrics.backend-destination key is missing") + } + lb := MetricsBackend(strings.ToLower(backend)) + switch lb { + case Stackdriver, Prometheus: + mc.backendDestination = lb + default: + return nil, fmt.Errorf("Unsupported metrics backend value \"%s\"", backend) + } + + if mc.backendDestination == Stackdriver { + sdProj, ok := m[stackdriverProjectIDKey] + if !ok || sdProj == "" { + return nil, errors.New("For backend stackdriver, metrics.stackdriver-project-id field must exist and cannot be empty") + } + mc.stackdriverProjectID = sdProj + } + + if domain == "" { + return nil, errors.New("Metrics domain cannot be empty") + } + mc.domain = domain + + if component == "" { + return nil, errors.New("Metrics component name cannot be empty") + } + mc.component = component + return &mc, nil +} + +// UpdateExporterFromConfigMap returns a helper func that can be used to update the exporter +// when a config map is updated +func UpdateExporterFromConfigMap(domain string, component string, logger *zap.SugaredLogger) func(configMap *corev1.ConfigMap) { + return func(configMap *corev1.ConfigMap) { + newConfig, err := getMetricsConfig(configMap.Data, domain, component, logger) + if err != nil { + ce := getCurMetricsExporter() + if ce == nil { + // Fail the process if there doesn't exist an exporter. + logger.Fatal("Failed to get a valid metrics config") + } else { + logger.Error("Failed to get a valid metrics config; Skip updating the metrics exporter", zap.Error(err)) + return + } + } + + if isMetricsConfigChanged(newConfig) { + if err := newMetricsExporter(newConfig, logger); err != nil { + logger.Error("Failed to update a new metrics exporter based on metric config.", zap.Error(err)) + return + } + } + } +} + +// isMetricsConfigChanged compares the non-nil newConfig against curMetricsConfig. When backend changes, +// or stackdriver project ID changes for stackdriver backend, we need to update the metrics exporter. +func isMetricsConfigChanged(newConfig *metricsConfig) bool { + cc := getCurMetricsConfig() + if cc == nil || newConfig.backendDestination != cc.backendDestination { + return true + } else if newConfig.backendDestination == Stackdriver && newConfig.stackdriverProjectID != cc.stackdriverProjectID { + return true + } + return false +} diff --git a/metrics/config_test.go b/metrics/config_test.go new file mode 100644 index 000000000..b5e02340f --- /dev/null +++ b/metrics/config_test.go @@ -0,0 +1,207 @@ +/* +Copyright 2018 The Knative Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package metrics + +import ( + "reflect" + "testing" + + . "github.com/knative/pkg/logging/testing" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + servingDomain = "serving.knative.dev" + badDomain = "test.domain" + testComponent = "testComponent" + testProj = "test-project" + anotherProj = "another-project" +) + +var ( + errorTests = []struct { + name string + cm map[string]string + domain string + component string + expectedErr string + }{{ + name: "backendKeyMissing", + cm: map[string]string{"": ""}, + domain: servingDomain, + component: testComponent, + expectedErr: "metrics.backend-destination key is missing", + }, { + name: "stackdriverProjectIDMissing", + cm: map[string]string{"metrics.backend-destination": "stackdriver"}, + domain: servingDomain, + component: testComponent, + expectedErr: "For backend stackdriver, metrics.stackdriver-project-id field must exist and cannot be empty", + }, { + name: "stackdriverProjectIDEmpty", + cm: map[string]string{ + "metrics.backend-destination": "stackdriver", + "metrics.stackdriver-project-id": "", + }, + domain: servingDomain, + component: testComponent, + expectedErr: "For backend stackdriver, metrics.stackdriver-project-id field must exist and cannot be empty", + }, { + name: "unsupportedBackend", + cm: map[string]string{ + "metrics.backend-destination": "unsupported", + "metrics.stackdriver-project-id": testProj, + }, + domain: servingDomain, + component: testComponent, + expectedErr: "Unsupported metrics backend value \"unsupported\"", + }, { + name: "emptyDomain", + cm: map[string]string{ + "metrics.backend-destination": "prometheus", + }, + domain: "", + component: testComponent, + expectedErr: "Metrics domain cannot be empty", + }, { + name: "invalidComponent", + cm: map[string]string{ + "metrics.backend-destination": "prometheus", + }, + domain: servingDomain, + component: "", + expectedErr: "Metrics component name cannot be empty", + }} + successTests = []struct { + name string + cm map[string]string + domain string + component string + expectedConfig metricsConfig + }{{ + name: "validPrometheus", + cm: map[string]string{"metrics.backend-destination": "prometheus"}, + domain: servingDomain, + component: testComponent, + expectedConfig: metricsConfig{ + domain: servingDomain, + component: testComponent, + backendDestination: Prometheus}, + }, { + name: "validStackdriver", + cm: map[string]string{"metrics.backend-destination": "stackdriver", + "metrics.stackdriver-project-id": anotherProj}, + domain: servingDomain, + component: testComponent, + expectedConfig: metricsConfig{ + domain: servingDomain, + component: testComponent, + backendDestination: Stackdriver, + stackdriverProjectID: anotherProj}, + }, { + name: "validCapitalStackdriver", + cm: map[string]string{"metrics.backend-destination": "Stackdriver", + "metrics.stackdriver-project-id": testProj}, + domain: servingDomain, + component: testComponent, + expectedConfig: metricsConfig{ + domain: servingDomain, + component: testComponent, + backendDestination: Stackdriver, + stackdriverProjectID: testProj}, + }} +) + +func TestGetMetricsConfig(t *testing.T) { + for _, test := range errorTests { + _, err := getMetricsConfig(test.cm, test.domain, test.component, TestLogger(t)) + if err.Error() != test.expectedErr { + t.Errorf("In test %v, wanted err: %v, got: %v", test.name, test.expectedErr, err) + } + } + + for _, test := range successTests { + mc, err := getMetricsConfig(test.cm, test.domain, test.component, TestLogger(t)) + if err != nil { + t.Errorf("In test %v, wanted valid config %v, got error %v", test.name, test.expectedConfig, err) + } + if !reflect.DeepEqual(*mc, test.expectedConfig) { + t.Errorf("In test %v, wanted config %v, got config %v", test.name, test.expectedConfig, *mc) + } + } +} + +func TestIsMetricsConfigChanged(t *testing.T) { + setCurMetricsExporterAndConfig(nil, nil) + for _, test := range successTests { + mc, err := getMetricsConfig(test.cm, test.domain, test.component, TestLogger(t)) + if err != nil { + t.Errorf("In test %v, wanted valid config %v, got error %v", test.name, test.expectedConfig, err) + } + changed := isMetricsConfigChanged(mc) + if !changed { + t.Errorf("In test %v, isMetricsConfigChanged should be true", test.name) + } + setCurMetricsExporterAndConfig(nil, mc) + } + + setCurMetricsExporterAndConfig(nil, &metricsConfig{ + domain: servingDomain, + component: testComponent, + backendDestination: Prometheus}) + newConfig := &metricsConfig{ + domain: servingDomain, + component: testComponent, + backendDestination: Prometheus, + stackdriverProjectID: testProj, + } + changed := isMetricsConfigChanged(newConfig) + if changed { + t.Error("isMetricsConfigChanged should be false if stackdriver project ID changes for prometheus backend") + } +} + +func TestUpdateExporterFromConfigMap(t *testing.T) { + setCurMetricsExporterAndConfig(nil, nil) + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "config-observability", + }, + Data: map[string]string{}, + } + oldConfig := getCurMetricsConfig() + for _, test := range successTests { + cm.Data = test.cm + u := UpdateExporterFromConfigMap(test.domain, test.component, TestLogger(t)) + u(cm) + mConfig := getCurMetricsConfig() + if mConfig == oldConfig { + t.Errorf("In test %v, expected metrics config change", test.name) + } + if !reflect.DeepEqual(*mConfig, test.expectedConfig) { + t.Errorf("In test %v, expected config: %v; got config %v", test.name, test.expectedConfig, mConfig) + } + oldConfig = mConfig + } + + for _, test := range errorTests { + cm.Data = test.cm + u := UpdateExporterFromConfigMap(test.domain, test.component, TestLogger(t)) + u(cm) + mConfig := getCurMetricsConfig() + if mConfig != oldConfig { + t.Errorf("In test %v, mConfig should not change", test.name) + } + } +} diff --git a/metrics/exporter.go b/metrics/exporter.go new file mode 100644 index 000000000..11abe8247 --- /dev/null +++ b/metrics/exporter.go @@ -0,0 +1,149 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "fmt" + "net/http" + "sync" + "time" + + "contrib.go.opencensus.io/exporter/stackdriver" + "go.opencensus.io/exporter/prometheus" + "go.opencensus.io/stats/view" + "go.uber.org/zap" + monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" +) + +var ( + curMetricsExporter view.Exporter + curMetricsConfig *metricsConfig + curPromSrv *http.Server + metricsMux sync.Mutex +) + +// newMetricsExporter gets a metrics exporter based on the config. +func newMetricsExporter(config *metricsConfig, logger *zap.SugaredLogger) error { + // If there is a Prometheus Exporter server running, stop it. + resetCurPromSrv() + ce := getCurMetricsExporter() + if ce != nil { + // UnregisterExporter is idempotent and it can be called multiple times for the same exporter + // without side effects. + view.UnregisterExporter(ce) + } + var err error + var e view.Exporter + switch config.backendDestination { + case Stackdriver: + e, err = newStackdriverExporter(config, logger) + case Prometheus: + e, err = newPrometheusExporter(config, logger) + default: + err = fmt.Errorf("Unsupported metrics backend %v", config.backendDestination) + } + if err != nil { + return err + } + existingConfig := getCurMetricsConfig() + setCurMetricsExporterAndConfig(e, config) + logger.Infof("Successfully updated the metrics exporter; old config: %v; new config %v", existingConfig, config) + return nil +} + +func newStackdriverExporter(config *metricsConfig, logger *zap.SugaredLogger) (view.Exporter, error) { + e, err := stackdriver.NewExporter(stackdriver.Options{ + ProjectID: config.stackdriverProjectID, + MetricPrefix: config.domain + "/" + config.component, + Resource: &monitoredrespb.MonitoredResource{ + Type: "global", + }, + DefaultMonitoringLabels: &stackdriver.Labels{}, + }) + if err != nil { + logger.Error("Failed to create the Stackdriver exporter.", zap.Error(err)) + return nil, err + } + logger.Infof("Created Opencensus Stackdriver exporter with config %v", config) + return e, nil +} + +func newPrometheusExporter(config *metricsConfig, logger *zap.SugaredLogger) (view.Exporter, error) { + e, err := prometheus.NewExporter(prometheus.Options{Namespace: config.component}) + if err != nil { + logger.Error("Failed to create the Prometheus exporter.", zap.Error(err)) + return nil, err + } + logger.Infof("Created Opencensus Prometheus exporter with config: %v. Start the server for Prometheus exporter.", config) + // Start the server for Prometheus scraping + go func() { + srv := startNewPromSrv(e) + srv.ListenAndServe() + }() + return e, nil +} + +func getCurPromSrv() *http.Server { + metricsMux.Lock() + defer metricsMux.Unlock() + return curPromSrv +} + +func resetCurPromSrv() { + metricsMux.Lock() + defer metricsMux.Unlock() + if curPromSrv != nil { + curPromSrv.Close() + curPromSrv = nil + } +} + +func startNewPromSrv(e *prometheus.Exporter) *http.Server { + sm := http.NewServeMux() + sm.Handle("/metrics", e) + metricsMux.Lock() + defer metricsMux.Unlock() + if curPromSrv != nil { + curPromSrv.Close() + } + curPromSrv = &http.Server{ + Addr: ":9090", + Handler: sm, + } + return curPromSrv +} + +func getCurMetricsExporter() view.Exporter { + metricsMux.Lock() + defer metricsMux.Unlock() + return curMetricsExporter +} + +func setCurMetricsExporterAndConfig(e view.Exporter, c *metricsConfig) { + metricsMux.Lock() + defer metricsMux.Unlock() + view.RegisterExporter(e) + view.SetReportingPeriod(60 * time.Second) + curMetricsExporter = e + curMetricsConfig = c +} + +func getCurMetricsConfig() *metricsConfig { + metricsMux.Lock() + defer metricsMux.Unlock() + return curMetricsConfig +} diff --git a/metrics/exporter_test.go b/metrics/exporter_test.go new file mode 100644 index 000000000..9553c293c --- /dev/null +++ b/metrics/exporter_test.go @@ -0,0 +1,121 @@ +/* +Copyright 2018 The Knative Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package metrics + +import ( + "testing" + "time" + + . "github.com/knative/pkg/logging/testing" +) + +func TestNewStackdriverExporter(t *testing.T) { + // The stackdriver project ID is required for stackdriver exporter. + e, err := newStackdriverExporter(&metricsConfig{ + domain: servingDomain, + component: testComponent, + backendDestination: Stackdriver, + stackdriverProjectID: testProj}, TestLogger(t)) + if err != nil { + t.Error(err) + } + if e == nil { + t.Error("expected a non-nil metrics exporter") + } + expectNoPromSrv(t) +} + +func TestNewPrometheusExporter(t *testing.T) { + // The stackdriver project ID is not required for prometheus exporter. + e, err := newPrometheusExporter(&metricsConfig{ + domain: servingDomain, + component: testComponent, + backendDestination: Prometheus, + stackdriverProjectID: ""}, TestLogger(t)) + if err != nil { + t.Error(err) + } + if e == nil { + t.Error("expected a non-nil metrics exporter") + } + expectPromSrv(t) +} + +func TestMetricsExporter(t *testing.T) { + err := newMetricsExporter(&metricsConfig{ + domain: servingDomain, + component: testComponent, + backendDestination: "unsupported", + stackdriverProjectID: ""}, TestLogger(t)) + if err == nil { + t.Errorf("Expected an error for unsupported backend %v", err) + } + + err = newMetricsExporter(&metricsConfig{ + domain: servingDomain, + component: testComponent, + backendDestination: Stackdriver, + stackdriverProjectID: testProj}, TestLogger(t)) + if err != nil { + t.Error(err) + } +} + +func TestInterlevedExporters(t *testing.T) { + // First create a stackdriver exporter + err := newMetricsExporter(&metricsConfig{ + domain: servingDomain, + component: testComponent, + backendDestination: Stackdriver, + stackdriverProjectID: testProj}, TestLogger(t)) + if err != nil { + t.Error(err) + } + expectNoPromSrv(t) + // Then switch to prometheus exporter + err = newMetricsExporter(&metricsConfig{ + domain: servingDomain, + component: testComponent, + backendDestination: Prometheus, + stackdriverProjectID: "", + }, TestLogger(t)) + if err != nil { + t.Error(err) + } + expectPromSrv(t) + // Finally switch to stackdriver exporter + err = newMetricsExporter(&metricsConfig{ + domain: servingDomain, + component: testComponent, + backendDestination: Stackdriver, + stackdriverProjectID: testProj}, TestLogger(t)) + if err != nil { + t.Error(err) + } +} + +func expectPromSrv(t *testing.T) { + time.Sleep(200 * time.Millisecond) + srv := getCurPromSrv() + if srv == nil { + t.Error("expected a server for prometheus exporter") + } +} + +func expectNoPromSrv(t *testing.T) { + time.Sleep(200 * time.Millisecond) + srv := getCurPromSrv() + if srv != nil { + t.Error("expected no server for stackdriver exporter") + } +} diff --git a/vendor/cloud.google.com/go/AUTHORS b/vendor/cloud.google.com/go/AUTHORS new file mode 100644 index 000000000..c364af1da --- /dev/null +++ b/vendor/cloud.google.com/go/AUTHORS @@ -0,0 +1,15 @@ +# This is the official list of cloud authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. + +# Names should be added to this file as: +# Name or Organization +# The email address is not required for organizations. + +Filippo Valsorda +Google Inc. +Ingo Oeser +Palm Stone Games, Inc. +Paweł Knap +Péter Szilágyi +Tyler Treat diff --git a/vendor/cloud.google.com/go/CONTRIBUTORS b/vendor/cloud.google.com/go/CONTRIBUTORS new file mode 100644 index 000000000..3b3cbed98 --- /dev/null +++ b/vendor/cloud.google.com/go/CONTRIBUTORS @@ -0,0 +1,40 @@ +# People who have agreed to one of the CLAs and can contribute patches. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# https://developers.google.com/open-source/cla/individual +# https://developers.google.com/open-source/cla/corporate +# +# Names should be added to this file as: +# Name + +# Keep the list alphabetically sorted. + +Alexis Hunt +Andreas Litt +Andrew Gerrand +Brad Fitzpatrick +Burcu Dogan +Dave Day +David Sansome +David Symonds +Filippo Valsorda +Glenn Lewis +Ingo Oeser +James Hall +Johan Euphrosine +Jonathan Amsterdam +Kunpei Sakai +Luna Duclos +Magnus Hiie +Mario Castro +Michael McGreevy +Omar Jarjur +Paweł Knap +Péter Szilágyi +Sarah Adams +Thanatat Tamtan +Toby Burress +Tuo Shan +Tyler Treat diff --git a/vendor/cloud.google.com/go/LICENSE b/vendor/cloud.google.com/go/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/cloud.google.com/go/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go new file mode 100644 index 000000000..9d0660be4 --- /dev/null +++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go @@ -0,0 +1,503 @@ +// Copyright 2014 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package metadata provides access to Google Compute Engine (GCE) +// metadata and API service accounts. +// +// This package is a wrapper around the GCE metadata service, +// as documented at https://developers.google.com/compute/docs/metadata. +package metadata // import "cloud.google.com/go/compute/metadata" + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/url" + "os" + "runtime" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + "golang.org/x/net/context/ctxhttp" +) + +const ( + // metadataIP is the documented metadata server IP address. + metadataIP = "169.254.169.254" + + // metadataHostEnv is the environment variable specifying the + // GCE metadata hostname. If empty, the default value of + // metadataIP ("169.254.169.254") is used instead. + // This is variable name is not defined by any spec, as far as + // I know; it was made up for the Go package. + metadataHostEnv = "GCE_METADATA_HOST" + + userAgent = "gcloud-golang/0.1" +) + +type cachedValue struct { + k string + trim bool + mu sync.Mutex + v string +} + +var ( + projID = &cachedValue{k: "project/project-id", trim: true} + projNum = &cachedValue{k: "project/numeric-project-id", trim: true} + instID = &cachedValue{k: "instance/id", trim: true} +) + +var ( + defaultClient = &Client{hc: &http.Client{ + Transport: &http.Transport{ + Dial: (&net.Dialer{ + Timeout: 2 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + ResponseHeaderTimeout: 2 * time.Second, + }, + }} + subscribeClient = &Client{hc: &http.Client{ + Transport: &http.Transport{ + Dial: (&net.Dialer{ + Timeout: 2 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + }, + }} +) + +// NotDefinedError is returned when requested metadata is not defined. +// +// The underlying string is the suffix after "/computeMetadata/v1/". +// +// This error is not returned if the value is defined to be the empty +// string. +type NotDefinedError string + +func (suffix NotDefinedError) Error() string { + return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix)) +} + +func (c *cachedValue) get(cl *Client) (v string, err error) { + defer c.mu.Unlock() + c.mu.Lock() + if c.v != "" { + return c.v, nil + } + if c.trim { + v, err = cl.getTrimmed(c.k) + } else { + v, err = cl.Get(c.k) + } + if err == nil { + c.v = v + } + return +} + +var ( + onGCEOnce sync.Once + onGCE bool +) + +// OnGCE reports whether this process is running on Google Compute Engine. +func OnGCE() bool { + onGCEOnce.Do(initOnGCE) + return onGCE +} + +func initOnGCE() { + onGCE = testOnGCE() +} + +func testOnGCE() bool { + // The user explicitly said they're on GCE, so trust them. + if os.Getenv(metadataHostEnv) != "" { + return true + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + resc := make(chan bool, 2) + + // Try two strategies in parallel. + // See https://github.com/GoogleCloudPlatform/google-cloud-go/issues/194 + go func() { + req, _ := http.NewRequest("GET", "http://"+metadataIP, nil) + req.Header.Set("User-Agent", userAgent) + res, err := ctxhttp.Do(ctx, defaultClient.hc, req) + if err != nil { + resc <- false + return + } + defer res.Body.Close() + resc <- res.Header.Get("Metadata-Flavor") == "Google" + }() + + go func() { + addrs, err := net.LookupHost("metadata.google.internal") + if err != nil || len(addrs) == 0 { + resc <- false + return + } + resc <- strsContains(addrs, metadataIP) + }() + + tryHarder := systemInfoSuggestsGCE() + if tryHarder { + res := <-resc + if res { + // The first strategy succeeded, so let's use it. + return true + } + // Wait for either the DNS or metadata server probe to + // contradict the other one and say we are running on + // GCE. Give it a lot of time to do so, since the system + // info already suggests we're running on a GCE BIOS. + timer := time.NewTimer(5 * time.Second) + defer timer.Stop() + select { + case res = <-resc: + return res + case <-timer.C: + // Too slow. Who knows what this system is. + return false + } + } + + // There's no hint from the system info that we're running on + // GCE, so use the first probe's result as truth, whether it's + // true or false. The goal here is to optimize for speed for + // users who are NOT running on GCE. We can't assume that + // either a DNS lookup or an HTTP request to a blackholed IP + // address is fast. Worst case this should return when the + // metaClient's Transport.ResponseHeaderTimeout or + // Transport.Dial.Timeout fires (in two seconds). + return <-resc +} + +// systemInfoSuggestsGCE reports whether the local system (without +// doing network requests) suggests that we're running on GCE. If this +// returns true, testOnGCE tries a bit harder to reach its metadata +// server. +func systemInfoSuggestsGCE() bool { + if runtime.GOOS != "linux" { + // We don't have any non-Linux clues available, at least yet. + return false + } + slurp, _ := ioutil.ReadFile("/sys/class/dmi/id/product_name") + name := strings.TrimSpace(string(slurp)) + return name == "Google" || name == "Google Compute Engine" +} + +// Subscribe calls Client.Subscribe on a client designed for subscribing (one with no +// ResponseHeaderTimeout). +func Subscribe(suffix string, fn func(v string, ok bool) error) error { + return subscribeClient.Subscribe(suffix, fn) +} + +// Get calls Client.Get on the default client. +func Get(suffix string) (string, error) { return defaultClient.Get(suffix) } + +// ProjectID returns the current instance's project ID string. +func ProjectID() (string, error) { return defaultClient.ProjectID() } + +// NumericProjectID returns the current instance's numeric project ID. +func NumericProjectID() (string, error) { return defaultClient.NumericProjectID() } + +// InternalIP returns the instance's primary internal IP address. +func InternalIP() (string, error) { return defaultClient.InternalIP() } + +// ExternalIP returns the instance's primary external (public) IP address. +func ExternalIP() (string, error) { return defaultClient.ExternalIP() } + +// Hostname returns the instance's hostname. This will be of the form +// ".c..internal". +func Hostname() (string, error) { return defaultClient.Hostname() } + +// InstanceTags returns the list of user-defined instance tags, +// assigned when initially creating a GCE instance. +func InstanceTags() ([]string, error) { return defaultClient.InstanceTags() } + +// InstanceID returns the current VM's numeric instance ID. +func InstanceID() (string, error) { return defaultClient.InstanceID() } + +// InstanceName returns the current VM's instance ID string. +func InstanceName() (string, error) { return defaultClient.InstanceName() } + +// Zone returns the current VM's zone, such as "us-central1-b". +func Zone() (string, error) { return defaultClient.Zone() } + +// InstanceAttributes calls Client.InstanceAttributes on the default client. +func InstanceAttributes() ([]string, error) { return defaultClient.InstanceAttributes() } + +// ProjectAttributes calls Client.ProjectAttributes on the default client. +func ProjectAttributes() ([]string, error) { return defaultClient.ProjectAttributes() } + +// InstanceAttributeValue calls Client.InstanceAttributeValue on the default client. +func InstanceAttributeValue(attr string) (string, error) { + return defaultClient.InstanceAttributeValue(attr) +} + +// ProjectAttributeValue calls Client.ProjectAttributeValue on the default client. +func ProjectAttributeValue(attr string) (string, error) { + return defaultClient.ProjectAttributeValue(attr) +} + +// Scopes calls Client.Scopes on the default client. +func Scopes(serviceAccount string) ([]string, error) { return defaultClient.Scopes(serviceAccount) } + +func strsContains(ss []string, s string) bool { + for _, v := range ss { + if v == s { + return true + } + } + return false +} + +// A Client provides metadata. +type Client struct { + hc *http.Client +} + +// NewClient returns a Client that can be used to fetch metadata. All HTTP requests +// will use the given http.Client instead of the default client. +func NewClient(c *http.Client) *Client { + return &Client{hc: c} +} + +// getETag returns a value from the metadata service as well as the associated ETag. +// This func is otherwise equivalent to Get. +func (c *Client) getETag(suffix string) (value, etag string, err error) { + // Using a fixed IP makes it very difficult to spoof the metadata service in + // a container, which is an important use-case for local testing of cloud + // deployments. To enable spoofing of the metadata service, the environment + // variable GCE_METADATA_HOST is first inspected to decide where metadata + // requests shall go. + host := os.Getenv(metadataHostEnv) + if host == "" { + // Using 169.254.169.254 instead of "metadata" here because Go + // binaries built with the "netgo" tag and without cgo won't + // know the search suffix for "metadata" is + // ".google.internal", and this IP address is documented as + // being stable anyway. + host = metadataIP + } + url := "http://" + host + "/computeMetadata/v1/" + suffix + req, _ := http.NewRequest("GET", url, nil) + req.Header.Set("Metadata-Flavor", "Google") + req.Header.Set("User-Agent", userAgent) + res, err := c.hc.Do(req) + if err != nil { + return "", "", err + } + defer res.Body.Close() + if res.StatusCode == http.StatusNotFound { + return "", "", NotDefinedError(suffix) + } + if res.StatusCode != 200 { + return "", "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url) + } + all, err := ioutil.ReadAll(res.Body) + if err != nil { + return "", "", err + } + return string(all), res.Header.Get("Etag"), nil +} + +// Get returns a value from the metadata service. +// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". +// +// If the GCE_METADATA_HOST environment variable is not defined, a default of +// 169.254.169.254 will be used instead. +// +// If the requested metadata is not defined, the returned error will +// be of type NotDefinedError. +func (c *Client) Get(suffix string) (string, error) { + val, _, err := c.getETag(suffix) + return val, err +} + +func (c *Client) getTrimmed(suffix string) (s string, err error) { + s, err = c.Get(suffix) + s = strings.TrimSpace(s) + return +} + +func (c *Client) lines(suffix string) ([]string, error) { + j, err := c.Get(suffix) + if err != nil { + return nil, err + } + s := strings.Split(strings.TrimSpace(j), "\n") + for i := range s { + s[i] = strings.TrimSpace(s[i]) + } + return s, nil +} + +// ProjectID returns the current instance's project ID string. +func (c *Client) ProjectID() (string, error) { return projID.get(c) } + +// NumericProjectID returns the current instance's numeric project ID. +func (c *Client) NumericProjectID() (string, error) { return projNum.get(c) } + +// InstanceID returns the current VM's numeric instance ID. +func (c *Client) InstanceID() (string, error) { return instID.get(c) } + +// InternalIP returns the instance's primary internal IP address. +func (c *Client) InternalIP() (string, error) { + return c.getTrimmed("instance/network-interfaces/0/ip") +} + +// ExternalIP returns the instance's primary external (public) IP address. +func (c *Client) ExternalIP() (string, error) { + return c.getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip") +} + +// Hostname returns the instance's hostname. This will be of the form +// ".c..internal". +func (c *Client) Hostname() (string, error) { + return c.getTrimmed("instance/hostname") +} + +// InstanceTags returns the list of user-defined instance tags, +// assigned when initially creating a GCE instance. +func (c *Client) InstanceTags() ([]string, error) { + var s []string + j, err := c.Get("instance/tags") + if err != nil { + return nil, err + } + if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil { + return nil, err + } + return s, nil +} + +// InstanceName returns the current VM's instance ID string. +func (c *Client) InstanceName() (string, error) { + host, err := c.Hostname() + if err != nil { + return "", err + } + return strings.Split(host, ".")[0], nil +} + +// Zone returns the current VM's zone, such as "us-central1-b". +func (c *Client) Zone() (string, error) { + zone, err := c.getTrimmed("instance/zone") + // zone is of the form "projects//zones/". + if err != nil { + return "", err + } + return zone[strings.LastIndex(zone, "/")+1:], nil +} + +// InstanceAttributes returns the list of user-defined attributes, +// assigned when initially creating a GCE VM instance. The value of an +// attribute can be obtained with InstanceAttributeValue. +func (c *Client) InstanceAttributes() ([]string, error) { return c.lines("instance/attributes/") } + +// ProjectAttributes returns the list of user-defined attributes +// applying to the project as a whole, not just this VM. The value of +// an attribute can be obtained with ProjectAttributeValue. +func (c *Client) ProjectAttributes() ([]string, error) { return c.lines("project/attributes/") } + +// InstanceAttributeValue returns the value of the provided VM +// instance attribute. +// +// If the requested attribute is not defined, the returned error will +// be of type NotDefinedError. +// +// InstanceAttributeValue may return ("", nil) if the attribute was +// defined to be the empty string. +func (c *Client) InstanceAttributeValue(attr string) (string, error) { + return c.Get("instance/attributes/" + attr) +} + +// ProjectAttributeValue returns the value of the provided +// project attribute. +// +// If the requested attribute is not defined, the returned error will +// be of type NotDefinedError. +// +// ProjectAttributeValue may return ("", nil) if the attribute was +// defined to be the empty string. +func (c *Client) ProjectAttributeValue(attr string) (string, error) { + return c.Get("project/attributes/" + attr) +} + +// Scopes returns the service account scopes for the given account. +// The account may be empty or the string "default" to use the instance's +// main account. +func (c *Client) Scopes(serviceAccount string) ([]string, error) { + if serviceAccount == "" { + serviceAccount = "default" + } + return c.lines("instance/service-accounts/" + serviceAccount + "/scopes") +} + +// Subscribe subscribes to a value from the metadata service. +// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". +// The suffix may contain query parameters. +// +// Subscribe calls fn with the latest metadata value indicated by the provided +// suffix. If the metadata value is deleted, fn is called with the empty string +// and ok false. Subscribe blocks until fn returns a non-nil error or the value +// is deleted. Subscribe returns the error value returned from the last call to +// fn, which may be nil when ok == false. +func (c *Client) Subscribe(suffix string, fn func(v string, ok bool) error) error { + const failedSubscribeSleep = time.Second * 5 + + // First check to see if the metadata value exists at all. + val, lastETag, err := c.getETag(suffix) + if err != nil { + return err + } + + if err := fn(val, true); err != nil { + return err + } + + ok := true + if strings.ContainsRune(suffix, '?') { + suffix += "&wait_for_change=true&last_etag=" + } else { + suffix += "?wait_for_change=true&last_etag=" + } + for { + val, etag, err := c.getETag(suffix + url.QueryEscape(lastETag)) + if err != nil { + if _, deleted := err.(NotDefinedError); !deleted { + time.Sleep(failedSubscribeSleep) + continue // Retry on other errors. + } + ok = false + } + lastETag = etag + + if err := fn(val, ok); err != nil || !ok { + return err + } + } +} diff --git a/vendor/cloud.google.com/go/internal/version/version.go b/vendor/cloud.google.com/go/internal/version/version.go new file mode 100644 index 000000000..220f02c1e --- /dev/null +++ b/vendor/cloud.google.com/go/internal/version/version.go @@ -0,0 +1,71 @@ +// Copyright 2016 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate ./update_version.sh + +// Package version contains version information for Google Cloud Client +// Libraries for Go, as reported in request headers. +package version + +import ( + "runtime" + "strings" + "unicode" +) + +// Repo is the current version of the client libraries in this +// repo. It should be a date in YYYYMMDD format. +const Repo = "20180226" + +// Go returns the Go runtime version. The returned string +// has no whitespace. +func Go() string { + return goVersion +} + +var goVersion = goVer(runtime.Version()) + +const develPrefix = "devel +" + +func goVer(s string) string { + if strings.HasPrefix(s, develPrefix) { + s = s[len(develPrefix):] + if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { + s = s[:p] + } + return s + } + + if strings.HasPrefix(s, "go1") { + s = s[2:] + var prerelease string + if p := strings.IndexFunc(s, notSemverRune); p >= 0 { + s, prerelease = s[:p], s[p:] + } + if strings.HasSuffix(s, ".") { + s += "0" + } else if strings.Count(s, ".") < 2 { + s += ".0" + } + if prerelease != "" { + s += "-" + prerelease + } + return s + } + return "" +} + +func notSemverRune(r rune) bool { + return strings.IndexRune("0123456789.", r) < 0 +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/alert_policy_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/alert_policy_client.go new file mode 100644 index 000000000..f0724b4c4 --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/alert_policy_client.go @@ -0,0 +1,279 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package monitoring + +import ( + "math" + "time" + + "cloud.google.com/go/internal/version" + "github.com/golang/protobuf/proto" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/transport" + monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// AlertPolicyCallOptions contains the retry settings for each method of AlertPolicyClient. +type AlertPolicyCallOptions struct { + ListAlertPolicies []gax.CallOption + GetAlertPolicy []gax.CallOption + CreateAlertPolicy []gax.CallOption + DeleteAlertPolicy []gax.CallOption + UpdateAlertPolicy []gax.CallOption +} + +func defaultAlertPolicyClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("monitoring.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultAlertPolicyCallOptions() *AlertPolicyCallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &AlertPolicyCallOptions{ + ListAlertPolicies: retry[[2]string{"default", "idempotent"}], + GetAlertPolicy: retry[[2]string{"default", "idempotent"}], + CreateAlertPolicy: retry[[2]string{"default", "non_idempotent"}], + DeleteAlertPolicy: retry[[2]string{"default", "idempotent"}], + UpdateAlertPolicy: retry[[2]string{"default", "non_idempotent"}], + } +} + +// AlertPolicyClient is a client for interacting with Stackdriver Monitoring API. +// +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type AlertPolicyClient struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + alertPolicyClient monitoringpb.AlertPolicyServiceClient + + // The call options for this service. + CallOptions *AlertPolicyCallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewAlertPolicyClient creates a new alert policy service client. +// +// The AlertPolicyService API is used to manage (list, create, delete, +// edit) alert policies in Stackdriver Monitoring. An alerting policy is +// a description of the conditions under which some aspect of your +// system is considered to be "unhealthy" and the ways to notify +// people or services about this state. In addition to using this API, alert +// policies can also be managed through +// Stackdriver Monitoring (at https://cloud.google.com/monitoring/docs/), +// which can be reached by clicking the "Monitoring" tab in +// Cloud Console (at https://console.cloud.google.com/). +func NewAlertPolicyClient(ctx context.Context, opts ...option.ClientOption) (*AlertPolicyClient, error) { + conn, err := transport.DialGRPC(ctx, append(defaultAlertPolicyClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &AlertPolicyClient{ + conn: conn, + CallOptions: defaultAlertPolicyCallOptions(), + + alertPolicyClient: monitoringpb.NewAlertPolicyServiceClient(conn), + } + c.setGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *AlertPolicyClient) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *AlertPolicyClient) Close() error { + return c.conn.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *AlertPolicyClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// ListAlertPolicies lists the existing alerting policies for the project. +func (c *AlertPolicyClient) ListAlertPolicies(ctx context.Context, req *monitoringpb.ListAlertPoliciesRequest, opts ...gax.CallOption) *AlertPolicyIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListAlertPolicies[0:len(c.CallOptions.ListAlertPolicies):len(c.CallOptions.ListAlertPolicies)], opts...) + it := &AlertPolicyIterator{} + req = proto.Clone(req).(*monitoringpb.ListAlertPoliciesRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.AlertPolicy, string, error) { + var resp *monitoringpb.ListAlertPoliciesResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.alertPolicyClient.ListAlertPolicies(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.AlertPolicies, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.PageSize) + return it +} + +// GetAlertPolicy gets a single alerting policy. +func (c *AlertPolicyClient) GetAlertPolicy(ctx context.Context, req *monitoringpb.GetAlertPolicyRequest, opts ...gax.CallOption) (*monitoringpb.AlertPolicy, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetAlertPolicy[0:len(c.CallOptions.GetAlertPolicy):len(c.CallOptions.GetAlertPolicy)], opts...) + var resp *monitoringpb.AlertPolicy + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.alertPolicyClient.GetAlertPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// CreateAlertPolicy creates a new alerting policy. +func (c *AlertPolicyClient) CreateAlertPolicy(ctx context.Context, req *monitoringpb.CreateAlertPolicyRequest, opts ...gax.CallOption) (*monitoringpb.AlertPolicy, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CreateAlertPolicy[0:len(c.CallOptions.CreateAlertPolicy):len(c.CallOptions.CreateAlertPolicy)], opts...) + var resp *monitoringpb.AlertPolicy + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.alertPolicyClient.CreateAlertPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeleteAlertPolicy deletes an alerting policy. +func (c *AlertPolicyClient) DeleteAlertPolicy(ctx context.Context, req *monitoringpb.DeleteAlertPolicyRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DeleteAlertPolicy[0:len(c.CallOptions.DeleteAlertPolicy):len(c.CallOptions.DeleteAlertPolicy)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.alertPolicyClient.DeleteAlertPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// UpdateAlertPolicy updates an alerting policy. You can either replace the entire policy with +// a new one or replace only certain fields in the current alerting policy by +// specifying the fields to be updated via updateMask. Returns the +// updated alerting policy. +func (c *AlertPolicyClient) UpdateAlertPolicy(ctx context.Context, req *monitoringpb.UpdateAlertPolicyRequest, opts ...gax.CallOption) (*monitoringpb.AlertPolicy, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.UpdateAlertPolicy[0:len(c.CallOptions.UpdateAlertPolicy):len(c.CallOptions.UpdateAlertPolicy)], opts...) + var resp *monitoringpb.AlertPolicy + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.alertPolicyClient.UpdateAlertPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// AlertPolicyIterator manages a stream of *monitoringpb.AlertPolicy. +type AlertPolicyIterator struct { + items []*monitoringpb.AlertPolicy + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.AlertPolicy, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *AlertPolicyIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *AlertPolicyIterator) Next() (*monitoringpb.AlertPolicy, error) { + var item *monitoringpb.AlertPolicy + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *AlertPolicyIterator) bufLen() int { + return len(it.items) +} + +func (it *AlertPolicyIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/doc.go b/vendor/cloud.google.com/go/monitoring/apiv3/doc.go new file mode 100644 index 000000000..dee265c60 --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/doc.go @@ -0,0 +1,51 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package monitoring is an auto-generated package for the +// Stackdriver Monitoring API. +// +// NOTE: This package is in alpha. It is not stable, and is likely to change. +// +// Manages your Stackdriver Monitoring data and configurations. Most projects +// must be associated with a Stackdriver account, with a few exceptions as +// noted on the individual method pages. +package monitoring // import "cloud.google.com/go/monitoring/apiv3" + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { + out, _ := metadata.FromOutgoingContext(ctx) + out = out.Copy() + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return metadata.NewOutgoingContext(ctx, out) +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.read", + "https://www.googleapis.com/auth/monitoring.write", + } +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/group_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/group_client.go new file mode 100644 index 000000000..1d364c073 --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/group_client.go @@ -0,0 +1,362 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package monitoring + +import ( + "math" + "time" + + "cloud.google.com/go/internal/version" + "github.com/golang/protobuf/proto" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/transport" + monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" + monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// GroupCallOptions contains the retry settings for each method of GroupClient. +type GroupCallOptions struct { + ListGroups []gax.CallOption + GetGroup []gax.CallOption + CreateGroup []gax.CallOption + UpdateGroup []gax.CallOption + DeleteGroup []gax.CallOption + ListGroupMembers []gax.CallOption +} + +func defaultGroupClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("monitoring.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultGroupCallOptions() *GroupCallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &GroupCallOptions{ + ListGroups: retry[[2]string{"default", "idempotent"}], + GetGroup: retry[[2]string{"default", "idempotent"}], + CreateGroup: retry[[2]string{"default", "non_idempotent"}], + UpdateGroup: retry[[2]string{"default", "idempotent"}], + DeleteGroup: retry[[2]string{"default", "idempotent"}], + ListGroupMembers: retry[[2]string{"default", "idempotent"}], + } +} + +// GroupClient is a client for interacting with Stackdriver Monitoring API. +// +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type GroupClient struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + groupClient monitoringpb.GroupServiceClient + + // The call options for this service. + CallOptions *GroupCallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewGroupClient creates a new group service client. +// +// The Group API lets you inspect and manage your +// groups (at #google.monitoring.v3.Group). +// +// A group is a named filter that is used to identify +// a collection of monitored resources. Groups are typically used to +// mirror the physical and/or logical topology of the environment. +// Because group membership is computed dynamically, monitored +// resources that are started in the future are automatically placed +// in matching groups. By using a group to name monitored resources in, +// for example, an alert policy, the target of that alert policy is +// updated automatically as monitored resources are added and removed +// from the infrastructure. +func NewGroupClient(ctx context.Context, opts ...option.ClientOption) (*GroupClient, error) { + conn, err := transport.DialGRPC(ctx, append(defaultGroupClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &GroupClient{ + conn: conn, + CallOptions: defaultGroupCallOptions(), + + groupClient: monitoringpb.NewGroupServiceClient(conn), + } + c.setGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *GroupClient) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *GroupClient) Close() error { + return c.conn.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *GroupClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// ListGroups lists the existing groups. +func (c *GroupClient) ListGroups(ctx context.Context, req *monitoringpb.ListGroupsRequest, opts ...gax.CallOption) *GroupIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListGroups[0:len(c.CallOptions.ListGroups):len(c.CallOptions.ListGroups)], opts...) + it := &GroupIterator{} + req = proto.Clone(req).(*monitoringpb.ListGroupsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.Group, string, error) { + var resp *monitoringpb.ListGroupsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.groupClient.ListGroups(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.Group, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.PageSize) + return it +} + +// GetGroup gets a single group. +func (c *GroupClient) GetGroup(ctx context.Context, req *monitoringpb.GetGroupRequest, opts ...gax.CallOption) (*monitoringpb.Group, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetGroup[0:len(c.CallOptions.GetGroup):len(c.CallOptions.GetGroup)], opts...) + var resp *monitoringpb.Group + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.groupClient.GetGroup(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// CreateGroup creates a new group. +func (c *GroupClient) CreateGroup(ctx context.Context, req *monitoringpb.CreateGroupRequest, opts ...gax.CallOption) (*monitoringpb.Group, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CreateGroup[0:len(c.CallOptions.CreateGroup):len(c.CallOptions.CreateGroup)], opts...) + var resp *monitoringpb.Group + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.groupClient.CreateGroup(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// UpdateGroup updates an existing group. +// You can change any group attributes except name. +func (c *GroupClient) UpdateGroup(ctx context.Context, req *monitoringpb.UpdateGroupRequest, opts ...gax.CallOption) (*monitoringpb.Group, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.UpdateGroup[0:len(c.CallOptions.UpdateGroup):len(c.CallOptions.UpdateGroup)], opts...) + var resp *monitoringpb.Group + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.groupClient.UpdateGroup(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeleteGroup deletes an existing group. +func (c *GroupClient) DeleteGroup(ctx context.Context, req *monitoringpb.DeleteGroupRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DeleteGroup[0:len(c.CallOptions.DeleteGroup):len(c.CallOptions.DeleteGroup)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.groupClient.DeleteGroup(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// ListGroupMembers lists the monitored resources that are members of a group. +func (c *GroupClient) ListGroupMembers(ctx context.Context, req *monitoringpb.ListGroupMembersRequest, opts ...gax.CallOption) *MonitoredResourceIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListGroupMembers[0:len(c.CallOptions.ListGroupMembers):len(c.CallOptions.ListGroupMembers)], opts...) + it := &MonitoredResourceIterator{} + req = proto.Clone(req).(*monitoringpb.ListGroupMembersRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoredrespb.MonitoredResource, string, error) { + var resp *monitoringpb.ListGroupMembersResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.groupClient.ListGroupMembers(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.Members, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.PageSize) + return it +} + +// GroupIterator manages a stream of *monitoringpb.Group. +type GroupIterator struct { + items []*monitoringpb.Group + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.Group, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *GroupIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *GroupIterator) Next() (*monitoringpb.Group, error) { + var item *monitoringpb.Group + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *GroupIterator) bufLen() int { + return len(it.items) +} + +func (it *GroupIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// MonitoredResourceIterator manages a stream of *monitoredrespb.MonitoredResource. +type MonitoredResourceIterator struct { + items []*monitoredrespb.MonitoredResource + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoredrespb.MonitoredResource, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *MonitoredResourceIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *MonitoredResourceIterator) Next() (*monitoredrespb.MonitoredResource, error) { + var item *monitoredrespb.MonitoredResource + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *MonitoredResourceIterator) bufLen() int { + return len(it.items) +} + +func (it *MonitoredResourceIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/metric_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/metric_client.go new file mode 100644 index 000000000..7234d6dd4 --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/metric_client.go @@ -0,0 +1,453 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package monitoring + +import ( + "math" + "time" + + "cloud.google.com/go/internal/version" + "github.com/golang/protobuf/proto" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/transport" + metricpb "google.golang.org/genproto/googleapis/api/metric" + monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" + monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// MetricCallOptions contains the retry settings for each method of MetricClient. +type MetricCallOptions struct { + ListMonitoredResourceDescriptors []gax.CallOption + GetMonitoredResourceDescriptor []gax.CallOption + ListMetricDescriptors []gax.CallOption + GetMetricDescriptor []gax.CallOption + CreateMetricDescriptor []gax.CallOption + DeleteMetricDescriptor []gax.CallOption + ListTimeSeries []gax.CallOption + CreateTimeSeries []gax.CallOption +} + +func defaultMetricClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("monitoring.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultMetricCallOptions() *MetricCallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &MetricCallOptions{ + ListMonitoredResourceDescriptors: retry[[2]string{"default", "idempotent"}], + GetMonitoredResourceDescriptor: retry[[2]string{"default", "idempotent"}], + ListMetricDescriptors: retry[[2]string{"default", "idempotent"}], + GetMetricDescriptor: retry[[2]string{"default", "idempotent"}], + CreateMetricDescriptor: retry[[2]string{"default", "non_idempotent"}], + DeleteMetricDescriptor: retry[[2]string{"default", "idempotent"}], + ListTimeSeries: retry[[2]string{"default", "idempotent"}], + CreateTimeSeries: retry[[2]string{"default", "non_idempotent"}], + } +} + +// MetricClient is a client for interacting with Stackdriver Monitoring API. +// +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type MetricClient struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + metricClient monitoringpb.MetricServiceClient + + // The call options for this service. + CallOptions *MetricCallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewMetricClient creates a new metric service client. +// +// Manages metric descriptors, monitored resource descriptors, and +// time series data. +func NewMetricClient(ctx context.Context, opts ...option.ClientOption) (*MetricClient, error) { + conn, err := transport.DialGRPC(ctx, append(defaultMetricClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &MetricClient{ + conn: conn, + CallOptions: defaultMetricCallOptions(), + + metricClient: monitoringpb.NewMetricServiceClient(conn), + } + c.setGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *MetricClient) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *MetricClient) Close() error { + return c.conn.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *MetricClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// ListMonitoredResourceDescriptors lists monitored resource descriptors that match a filter. This method does not require a Stackdriver account. +func (c *MetricClient) ListMonitoredResourceDescriptors(ctx context.Context, req *monitoringpb.ListMonitoredResourceDescriptorsRequest, opts ...gax.CallOption) *MonitoredResourceDescriptorIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListMonitoredResourceDescriptors[0:len(c.CallOptions.ListMonitoredResourceDescriptors):len(c.CallOptions.ListMonitoredResourceDescriptors)], opts...) + it := &MonitoredResourceDescriptorIterator{} + req = proto.Clone(req).(*monitoringpb.ListMonitoredResourceDescriptorsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoredrespb.MonitoredResourceDescriptor, string, error) { + var resp *monitoringpb.ListMonitoredResourceDescriptorsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.metricClient.ListMonitoredResourceDescriptors(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.ResourceDescriptors, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.PageSize) + return it +} + +// GetMonitoredResourceDescriptor gets a single monitored resource descriptor. This method does not require a Stackdriver account. +func (c *MetricClient) GetMonitoredResourceDescriptor(ctx context.Context, req *monitoringpb.GetMonitoredResourceDescriptorRequest, opts ...gax.CallOption) (*monitoredrespb.MonitoredResourceDescriptor, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetMonitoredResourceDescriptor[0:len(c.CallOptions.GetMonitoredResourceDescriptor):len(c.CallOptions.GetMonitoredResourceDescriptor)], opts...) + var resp *monitoredrespb.MonitoredResourceDescriptor + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.metricClient.GetMonitoredResourceDescriptor(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListMetricDescriptors lists metric descriptors that match a filter. This method does not require a Stackdriver account. +func (c *MetricClient) ListMetricDescriptors(ctx context.Context, req *monitoringpb.ListMetricDescriptorsRequest, opts ...gax.CallOption) *MetricDescriptorIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListMetricDescriptors[0:len(c.CallOptions.ListMetricDescriptors):len(c.CallOptions.ListMetricDescriptors)], opts...) + it := &MetricDescriptorIterator{} + req = proto.Clone(req).(*monitoringpb.ListMetricDescriptorsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*metricpb.MetricDescriptor, string, error) { + var resp *monitoringpb.ListMetricDescriptorsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.metricClient.ListMetricDescriptors(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.MetricDescriptors, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.PageSize) + return it +} + +// GetMetricDescriptor gets a single metric descriptor. This method does not require a Stackdriver account. +func (c *MetricClient) GetMetricDescriptor(ctx context.Context, req *monitoringpb.GetMetricDescriptorRequest, opts ...gax.CallOption) (*metricpb.MetricDescriptor, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetMetricDescriptor[0:len(c.CallOptions.GetMetricDescriptor):len(c.CallOptions.GetMetricDescriptor)], opts...) + var resp *metricpb.MetricDescriptor + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.metricClient.GetMetricDescriptor(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// CreateMetricDescriptor creates a new metric descriptor. +// User-created metric descriptors define +// custom metrics (at /monitoring/custom-metrics). +func (c *MetricClient) CreateMetricDescriptor(ctx context.Context, req *monitoringpb.CreateMetricDescriptorRequest, opts ...gax.CallOption) (*metricpb.MetricDescriptor, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CreateMetricDescriptor[0:len(c.CallOptions.CreateMetricDescriptor):len(c.CallOptions.CreateMetricDescriptor)], opts...) + var resp *metricpb.MetricDescriptor + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.metricClient.CreateMetricDescriptor(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeleteMetricDescriptor deletes a metric descriptor. Only user-created +// custom metrics (at /monitoring/custom-metrics) can be deleted. +func (c *MetricClient) DeleteMetricDescriptor(ctx context.Context, req *monitoringpb.DeleteMetricDescriptorRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DeleteMetricDescriptor[0:len(c.CallOptions.DeleteMetricDescriptor):len(c.CallOptions.DeleteMetricDescriptor)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.metricClient.DeleteMetricDescriptor(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// ListTimeSeries lists time series that match a filter. This method does not require a Stackdriver account. +func (c *MetricClient) ListTimeSeries(ctx context.Context, req *monitoringpb.ListTimeSeriesRequest, opts ...gax.CallOption) *TimeSeriesIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListTimeSeries[0:len(c.CallOptions.ListTimeSeries):len(c.CallOptions.ListTimeSeries)], opts...) + it := &TimeSeriesIterator{} + req = proto.Clone(req).(*monitoringpb.ListTimeSeriesRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.TimeSeries, string, error) { + var resp *monitoringpb.ListTimeSeriesResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.metricClient.ListTimeSeries(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.TimeSeries, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.PageSize) + return it +} + +// CreateTimeSeries creates or adds data to one or more time series. +// The response is empty if all time series in the request were written. +// If any time series could not be written, a corresponding failure message is +// included in the error response. +func (c *MetricClient) CreateTimeSeries(ctx context.Context, req *monitoringpb.CreateTimeSeriesRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CreateTimeSeries[0:len(c.CallOptions.CreateTimeSeries):len(c.CallOptions.CreateTimeSeries)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.metricClient.CreateTimeSeries(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// MetricDescriptorIterator manages a stream of *metricpb.MetricDescriptor. +type MetricDescriptorIterator struct { + items []*metricpb.MetricDescriptor + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*metricpb.MetricDescriptor, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *MetricDescriptorIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *MetricDescriptorIterator) Next() (*metricpb.MetricDescriptor, error) { + var item *metricpb.MetricDescriptor + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *MetricDescriptorIterator) bufLen() int { + return len(it.items) +} + +func (it *MetricDescriptorIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// MonitoredResourceDescriptorIterator manages a stream of *monitoredrespb.MonitoredResourceDescriptor. +type MonitoredResourceDescriptorIterator struct { + items []*monitoredrespb.MonitoredResourceDescriptor + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoredrespb.MonitoredResourceDescriptor, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *MonitoredResourceDescriptorIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *MonitoredResourceDescriptorIterator) Next() (*monitoredrespb.MonitoredResourceDescriptor, error) { + var item *monitoredrespb.MonitoredResourceDescriptor + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *MonitoredResourceDescriptorIterator) bufLen() int { + return len(it.items) +} + +func (it *MonitoredResourceDescriptorIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// TimeSeriesIterator manages a stream of *monitoringpb.TimeSeries. +type TimeSeriesIterator struct { + items []*monitoringpb.TimeSeries + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.TimeSeries, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *TimeSeriesIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *TimeSeriesIterator) Next() (*monitoringpb.TimeSeries, error) { + var item *monitoringpb.TimeSeries + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *TimeSeriesIterator) bufLen() int { + return len(it.items) +} + +func (it *TimeSeriesIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/notification_channel_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/notification_channel_client.go new file mode 100644 index 000000000..53bcf6bcb --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/notification_channel_client.go @@ -0,0 +1,376 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package monitoring + +import ( + "math" + "time" + + "cloud.google.com/go/internal/version" + "github.com/golang/protobuf/proto" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/transport" + monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// NotificationChannelCallOptions contains the retry settings for each method of NotificationChannelClient. +type NotificationChannelCallOptions struct { + ListNotificationChannelDescriptors []gax.CallOption + GetNotificationChannelDescriptor []gax.CallOption + ListNotificationChannels []gax.CallOption + GetNotificationChannel []gax.CallOption + CreateNotificationChannel []gax.CallOption + UpdateNotificationChannel []gax.CallOption + DeleteNotificationChannel []gax.CallOption +} + +func defaultNotificationChannelClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("monitoring.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultNotificationChannelCallOptions() *NotificationChannelCallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &NotificationChannelCallOptions{ + ListNotificationChannelDescriptors: retry[[2]string{"default", "idempotent"}], + GetNotificationChannelDescriptor: retry[[2]string{"default", "idempotent"}], + ListNotificationChannels: retry[[2]string{"default", "idempotent"}], + GetNotificationChannel: retry[[2]string{"default", "idempotent"}], + CreateNotificationChannel: retry[[2]string{"default", "non_idempotent"}], + UpdateNotificationChannel: retry[[2]string{"default", "non_idempotent"}], + DeleteNotificationChannel: retry[[2]string{"default", "idempotent"}], + } +} + +// NotificationChannelClient is a client for interacting with Stackdriver Monitoring API. +// +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type NotificationChannelClient struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + notificationChannelClient monitoringpb.NotificationChannelServiceClient + + // The call options for this service. + CallOptions *NotificationChannelCallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewNotificationChannelClient creates a new notification channel service client. +// +// The Notification Channel API provides access to configuration that +// controls how messages related to incidents are sent. +func NewNotificationChannelClient(ctx context.Context, opts ...option.ClientOption) (*NotificationChannelClient, error) { + conn, err := transport.DialGRPC(ctx, append(defaultNotificationChannelClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &NotificationChannelClient{ + conn: conn, + CallOptions: defaultNotificationChannelCallOptions(), + + notificationChannelClient: monitoringpb.NewNotificationChannelServiceClient(conn), + } + c.setGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *NotificationChannelClient) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *NotificationChannelClient) Close() error { + return c.conn.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *NotificationChannelClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// ListNotificationChannelDescriptors lists the descriptors for supported channel types. The use of descriptors +// makes it possible for new channel types to be dynamically added. +func (c *NotificationChannelClient) ListNotificationChannelDescriptors(ctx context.Context, req *monitoringpb.ListNotificationChannelDescriptorsRequest, opts ...gax.CallOption) *NotificationChannelDescriptorIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListNotificationChannelDescriptors[0:len(c.CallOptions.ListNotificationChannelDescriptors):len(c.CallOptions.ListNotificationChannelDescriptors)], opts...) + it := &NotificationChannelDescriptorIterator{} + req = proto.Clone(req).(*monitoringpb.ListNotificationChannelDescriptorsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.NotificationChannelDescriptor, string, error) { + var resp *monitoringpb.ListNotificationChannelDescriptorsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.notificationChannelClient.ListNotificationChannelDescriptors(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.ChannelDescriptors, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.PageSize) + return it +} + +// GetNotificationChannelDescriptor gets a single channel descriptor. The descriptor indicates which fields +// are expected / permitted for a notification channel of the given type. +func (c *NotificationChannelClient) GetNotificationChannelDescriptor(ctx context.Context, req *monitoringpb.GetNotificationChannelDescriptorRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannelDescriptor, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetNotificationChannelDescriptor[0:len(c.CallOptions.GetNotificationChannelDescriptor):len(c.CallOptions.GetNotificationChannelDescriptor)], opts...) + var resp *monitoringpb.NotificationChannelDescriptor + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.notificationChannelClient.GetNotificationChannelDescriptor(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListNotificationChannels lists the notification channels that have been created for the project. +func (c *NotificationChannelClient) ListNotificationChannels(ctx context.Context, req *monitoringpb.ListNotificationChannelsRequest, opts ...gax.CallOption) *NotificationChannelIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListNotificationChannels[0:len(c.CallOptions.ListNotificationChannels):len(c.CallOptions.ListNotificationChannels)], opts...) + it := &NotificationChannelIterator{} + req = proto.Clone(req).(*monitoringpb.ListNotificationChannelsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.NotificationChannel, string, error) { + var resp *monitoringpb.ListNotificationChannelsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.notificationChannelClient.ListNotificationChannels(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.NotificationChannels, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.PageSize) + return it +} + +// GetNotificationChannel gets a single notification channel. The channel includes the relevant +// configuration details with which the channel was created. However, the +// response may truncate or omit passwords, API keys, or other private key +// matter and thus the response may not be 100% identical to the information +// that was supplied in the call to the create method. +func (c *NotificationChannelClient) GetNotificationChannel(ctx context.Context, req *monitoringpb.GetNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetNotificationChannel[0:len(c.CallOptions.GetNotificationChannel):len(c.CallOptions.GetNotificationChannel)], opts...) + var resp *monitoringpb.NotificationChannel + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.notificationChannelClient.GetNotificationChannel(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// CreateNotificationChannel creates a new notification channel, representing a single notification +// endpoint such as an email address, SMS number, or pagerduty service. +func (c *NotificationChannelClient) CreateNotificationChannel(ctx context.Context, req *monitoringpb.CreateNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CreateNotificationChannel[0:len(c.CallOptions.CreateNotificationChannel):len(c.CallOptions.CreateNotificationChannel)], opts...) + var resp *monitoringpb.NotificationChannel + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.notificationChannelClient.CreateNotificationChannel(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// UpdateNotificationChannel updates a notification channel. Fields not specified in the field mask +// remain unchanged. +func (c *NotificationChannelClient) UpdateNotificationChannel(ctx context.Context, req *monitoringpb.UpdateNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.UpdateNotificationChannel[0:len(c.CallOptions.UpdateNotificationChannel):len(c.CallOptions.UpdateNotificationChannel)], opts...) + var resp *monitoringpb.NotificationChannel + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.notificationChannelClient.UpdateNotificationChannel(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeleteNotificationChannel deletes a notification channel. +func (c *NotificationChannelClient) DeleteNotificationChannel(ctx context.Context, req *monitoringpb.DeleteNotificationChannelRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DeleteNotificationChannel[0:len(c.CallOptions.DeleteNotificationChannel):len(c.CallOptions.DeleteNotificationChannel)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.notificationChannelClient.DeleteNotificationChannel(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// NotificationChannelDescriptorIterator manages a stream of *monitoringpb.NotificationChannelDescriptor. +type NotificationChannelDescriptorIterator struct { + items []*monitoringpb.NotificationChannelDescriptor + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.NotificationChannelDescriptor, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *NotificationChannelDescriptorIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *NotificationChannelDescriptorIterator) Next() (*monitoringpb.NotificationChannelDescriptor, error) { + var item *monitoringpb.NotificationChannelDescriptor + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *NotificationChannelDescriptorIterator) bufLen() int { + return len(it.items) +} + +func (it *NotificationChannelDescriptorIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// NotificationChannelIterator manages a stream of *monitoringpb.NotificationChannel. +type NotificationChannelIterator struct { + items []*monitoringpb.NotificationChannel + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.NotificationChannel, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *NotificationChannelIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *NotificationChannelIterator) Next() (*monitoringpb.NotificationChannel, error) { + var item *monitoringpb.NotificationChannel + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *NotificationChannelIterator) bufLen() int { + return len(it.items) +} + +func (it *NotificationChannelIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/path_funcs.go b/vendor/cloud.google.com/go/monitoring/apiv3/path_funcs.go new file mode 100644 index 000000000..b2b514ba5 --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/path_funcs.go @@ -0,0 +1,107 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package monitoring + +// GroupProjectPath returns the path for the project resource. +// +// Deprecated: Use +// fmt.Sprintf("projects/%s", project) +// instead. +func GroupProjectPath(project string) string { + return "" + + "projects/" + + project + + "" +} + +// GroupGroupPath returns the path for the group resource. +// +// Deprecated: Use +// fmt.Sprintf("projects/%s/groups/%s", project, group) +// instead. +func GroupGroupPath(project, group string) string { + return "" + + "projects/" + + project + + "/groups/" + + group + + "" +} + +// MetricProjectPath returns the path for the project resource. +// +// Deprecated: Use +// fmt.Sprintf("projects/%s", project) +// instead. +func MetricProjectPath(project string) string { + return "" + + "projects/" + + project + + "" +} + +// MetricMetricDescriptorPath returns the path for the metric descriptor resource. +// +// Deprecated: Use +// fmt.Sprintf("projects/%s/metricDescriptors/%s", project, metricDescriptor) +// instead. +func MetricMetricDescriptorPath(project, metricDescriptor string) string { + return "" + + "projects/" + + project + + "/metricDescriptors/" + + metricDescriptor + + "" +} + +// MetricMonitoredResourceDescriptorPath returns the path for the monitored resource descriptor resource. +// +// Deprecated: Use +// fmt.Sprintf("projects/%s/monitoredResourceDescriptors/%s", project, monitoredResourceDescriptor) +// instead. +func MetricMonitoredResourceDescriptorPath(project, monitoredResourceDescriptor string) string { + return "" + + "projects/" + + project + + "/monitoredResourceDescriptors/" + + monitoredResourceDescriptor + + "" +} + +// UptimeCheckProjectPath returns the path for the project resource. +// +// Deprecated: Use +// fmt.Sprintf("projects/%s", project) +// instead. +func UptimeCheckProjectPath(project string) string { + return "" + + "projects/" + + project + + "" +} + +// UptimeCheckUptimeCheckConfigPath returns the path for the uptime check config resource. +// +// Deprecated: Use +// fmt.Sprintf("projects/%s/uptimeCheckConfigs/%s", project, uptimeCheckConfig) +// instead. +func UptimeCheckUptimeCheckConfigPath(project, uptimeCheckConfig string) string { + return "" + + "projects/" + + project + + "/uptimeCheckConfigs/" + + uptimeCheckConfig + + "" +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/uptime_check_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/uptime_check_client.go new file mode 100644 index 000000000..edbb1165e --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/uptime_check_client.go @@ -0,0 +1,362 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package monitoring + +import ( + "math" + "time" + + "cloud.google.com/go/internal/version" + "github.com/golang/protobuf/proto" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/transport" + monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// UptimeCheckCallOptions contains the retry settings for each method of UptimeCheckClient. +type UptimeCheckCallOptions struct { + ListUptimeCheckConfigs []gax.CallOption + GetUptimeCheckConfig []gax.CallOption + CreateUptimeCheckConfig []gax.CallOption + UpdateUptimeCheckConfig []gax.CallOption + DeleteUptimeCheckConfig []gax.CallOption + ListUptimeCheckIps []gax.CallOption +} + +func defaultUptimeCheckClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("monitoring.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultUptimeCheckCallOptions() *UptimeCheckCallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &UptimeCheckCallOptions{ + ListUptimeCheckConfigs: retry[[2]string{"default", "idempotent"}], + GetUptimeCheckConfig: retry[[2]string{"default", "idempotent"}], + CreateUptimeCheckConfig: retry[[2]string{"default", "non_idempotent"}], + UpdateUptimeCheckConfig: retry[[2]string{"default", "non_idempotent"}], + DeleteUptimeCheckConfig: retry[[2]string{"default", "idempotent"}], + ListUptimeCheckIps: retry[[2]string{"default", "idempotent"}], + } +} + +// UptimeCheckClient is a client for interacting with Stackdriver Monitoring API. +// +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type UptimeCheckClient struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + uptimeCheckClient monitoringpb.UptimeCheckServiceClient + + // The call options for this service. + CallOptions *UptimeCheckCallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewUptimeCheckClient creates a new uptime check service client. +// +// The UptimeCheckService API is used to manage (list, create, delete, edit) +// uptime check configurations in the Stackdriver Monitoring product. An uptime +// check is a piece of configuration that determines which resources and +// services to monitor for availability. These configurations can also be +// configured interactively by navigating to the [Cloud Console] +// (http://console.cloud.google.com), selecting the appropriate project, +// clicking on "Monitoring" on the left-hand side to navigate to Stackdriver, +// and then clicking on "Uptime". +func NewUptimeCheckClient(ctx context.Context, opts ...option.ClientOption) (*UptimeCheckClient, error) { + conn, err := transport.DialGRPC(ctx, append(defaultUptimeCheckClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &UptimeCheckClient{ + conn: conn, + CallOptions: defaultUptimeCheckCallOptions(), + + uptimeCheckClient: monitoringpb.NewUptimeCheckServiceClient(conn), + } + c.setGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *UptimeCheckClient) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *UptimeCheckClient) Close() error { + return c.conn.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *UptimeCheckClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// ListUptimeCheckConfigs lists the existing valid uptime check configurations for the project, +// leaving out any invalid configurations. +func (c *UptimeCheckClient) ListUptimeCheckConfigs(ctx context.Context, req *monitoringpb.ListUptimeCheckConfigsRequest, opts ...gax.CallOption) *UptimeCheckConfigIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListUptimeCheckConfigs[0:len(c.CallOptions.ListUptimeCheckConfigs):len(c.CallOptions.ListUptimeCheckConfigs)], opts...) + it := &UptimeCheckConfigIterator{} + req = proto.Clone(req).(*monitoringpb.ListUptimeCheckConfigsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.UptimeCheckConfig, string, error) { + var resp *monitoringpb.ListUptimeCheckConfigsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.uptimeCheckClient.ListUptimeCheckConfigs(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.UptimeCheckConfigs, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.PageSize) + return it +} + +// GetUptimeCheckConfig gets a single uptime check configuration. +func (c *UptimeCheckClient) GetUptimeCheckConfig(ctx context.Context, req *monitoringpb.GetUptimeCheckConfigRequest, opts ...gax.CallOption) (*monitoringpb.UptimeCheckConfig, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetUptimeCheckConfig[0:len(c.CallOptions.GetUptimeCheckConfig):len(c.CallOptions.GetUptimeCheckConfig)], opts...) + var resp *monitoringpb.UptimeCheckConfig + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.uptimeCheckClient.GetUptimeCheckConfig(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// CreateUptimeCheckConfig creates a new uptime check configuration. +func (c *UptimeCheckClient) CreateUptimeCheckConfig(ctx context.Context, req *monitoringpb.CreateUptimeCheckConfigRequest, opts ...gax.CallOption) (*monitoringpb.UptimeCheckConfig, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CreateUptimeCheckConfig[0:len(c.CallOptions.CreateUptimeCheckConfig):len(c.CallOptions.CreateUptimeCheckConfig)], opts...) + var resp *monitoringpb.UptimeCheckConfig + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.uptimeCheckClient.CreateUptimeCheckConfig(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// UpdateUptimeCheckConfig updates an uptime check configuration. You can either replace the entire +// configuration with a new one or replace only certain fields in the current +// configuration by specifying the fields to be updated via "updateMask". +// Returns the updated configuration. +func (c *UptimeCheckClient) UpdateUptimeCheckConfig(ctx context.Context, req *monitoringpb.UpdateUptimeCheckConfigRequest, opts ...gax.CallOption) (*monitoringpb.UptimeCheckConfig, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.UpdateUptimeCheckConfig[0:len(c.CallOptions.UpdateUptimeCheckConfig):len(c.CallOptions.UpdateUptimeCheckConfig)], opts...) + var resp *monitoringpb.UptimeCheckConfig + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.uptimeCheckClient.UpdateUptimeCheckConfig(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeleteUptimeCheckConfig deletes an uptime check configuration. Note that this method will fail +// if the uptime check configuration is referenced by an alert policy or +// other dependent configs that would be rendered invalid by the deletion. +func (c *UptimeCheckClient) DeleteUptimeCheckConfig(ctx context.Context, req *monitoringpb.DeleteUptimeCheckConfigRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DeleteUptimeCheckConfig[0:len(c.CallOptions.DeleteUptimeCheckConfig):len(c.CallOptions.DeleteUptimeCheckConfig)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.uptimeCheckClient.DeleteUptimeCheckConfig(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// ListUptimeCheckIps returns the list of IPs that checkers run from +func (c *UptimeCheckClient) ListUptimeCheckIps(ctx context.Context, req *monitoringpb.ListUptimeCheckIpsRequest, opts ...gax.CallOption) *UptimeCheckIpIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListUptimeCheckIps[0:len(c.CallOptions.ListUptimeCheckIps):len(c.CallOptions.ListUptimeCheckIps)], opts...) + it := &UptimeCheckIpIterator{} + req = proto.Clone(req).(*monitoringpb.ListUptimeCheckIpsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.UptimeCheckIp, string, error) { + var resp *monitoringpb.ListUptimeCheckIpsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.uptimeCheckClient.ListUptimeCheckIps(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.UptimeCheckIps, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.PageSize) + return it +} + +// UptimeCheckConfigIterator manages a stream of *monitoringpb.UptimeCheckConfig. +type UptimeCheckConfigIterator struct { + items []*monitoringpb.UptimeCheckConfig + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.UptimeCheckConfig, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *UptimeCheckConfigIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *UptimeCheckConfigIterator) Next() (*monitoringpb.UptimeCheckConfig, error) { + var item *monitoringpb.UptimeCheckConfig + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *UptimeCheckConfigIterator) bufLen() int { + return len(it.items) +} + +func (it *UptimeCheckConfigIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// UptimeCheckIpIterator manages a stream of *monitoringpb.UptimeCheckIp. +type UptimeCheckIpIterator struct { + items []*monitoringpb.UptimeCheckIp + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.UptimeCheckIp, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *UptimeCheckIpIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *UptimeCheckIpIterator) Next() (*monitoringpb.UptimeCheckIp, error) { + var item *monitoringpb.UptimeCheckIp + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *UptimeCheckIpIterator) bufLen() int { + return len(it.items) +} + +func (it *UptimeCheckIpIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/trace/apiv2/doc.go b/vendor/cloud.google.com/go/trace/apiv2/doc.go new file mode 100644 index 000000000..2f7ffdc01 --- /dev/null +++ b/vendor/cloud.google.com/go/trace/apiv2/doc.go @@ -0,0 +1,51 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package trace is an auto-generated package for the +// Stackdriver Trace API. +// +// NOTE: This package is in alpha. It is not stable, and is likely to change. +// +// Sends application trace data to Stackdriver Trace for viewing. Trace data +// is +// collected for all App Engine applications by default. Trace data from +// other +// applications can be provided using this API. +package trace // import "cloud.google.com/go/trace/apiv2" + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { + out, _ := metadata.FromOutgoingContext(ctx) + out = out.Copy() + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return metadata.NewOutgoingContext(ctx, out) +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/trace.append", + } +} diff --git a/vendor/cloud.google.com/go/trace/apiv2/path_funcs.go b/vendor/cloud.google.com/go/trace/apiv2/path_funcs.go new file mode 100644 index 000000000..80b8d40b5 --- /dev/null +++ b/vendor/cloud.google.com/go/trace/apiv2/path_funcs.go @@ -0,0 +1,43 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +// ProjectPath returns the path for the project resource. +// +// Deprecated: Use +// fmt.Sprintf("projects/%s", project) +// instead. +func ProjectPath(project string) string { + return "" + + "projects/" + + project + + "" +} + +// SpanPath returns the path for the span resource. +// +// Deprecated: Use +// fmt.Sprintf("projects/%s/traces/%s/spans/%s", project, trace, span) +// instead. +func SpanPath(project, trace, span string) string { + return "" + + "projects/" + + project + + "/traces/" + + trace + + "/spans/" + + span + + "" +} diff --git a/vendor/cloud.google.com/go/trace/apiv2/trace_client.go b/vendor/cloud.google.com/go/trace/apiv2/trace_client.go new file mode 100644 index 000000000..27a016218 --- /dev/null +++ b/vendor/cloud.google.com/go/trace/apiv2/trace_client.go @@ -0,0 +1,153 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package trace + +import ( + "time" + + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/api/transport" + cloudtracepb "google.golang.org/genproto/googleapis/devtools/cloudtrace/v2" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// CallOptions contains the retry settings for each method of Client. +type CallOptions struct { + BatchWriteSpans []gax.CallOption + CreateSpan []gax.CallOption +} + +func defaultClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("cloudtrace.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultCallOptions() *CallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 1000 * time.Millisecond, + Multiplier: 1.2, + }) + }), + }, + } + return &CallOptions{ + BatchWriteSpans: retry[[2]string{"default", "non_idempotent"}], + CreateSpan: retry[[2]string{"default", "idempotent"}], + } +} + +// Client is a client for interacting with Stackdriver Trace API. +// +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type Client struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + client cloudtracepb.TraceServiceClient + + // The call options for this service. + CallOptions *CallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewClient creates a new trace service client. +// +// This file describes an API for collecting and viewing traces and spans +// within a trace. A Trace is a collection of spans corresponding to a single +// operation or set of operations for an application. A span is an individual +// timed event which forms a node of the trace tree. A single trace may +// contain span(s) from multiple services. +func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { + conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &Client{ + conn: conn, + CallOptions: defaultCallOptions(), + + client: cloudtracepb.NewTraceServiceClient(conn), + } + c.setGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *Client) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *Client) Close() error { + return c.conn.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *Client) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// BatchWriteSpans sends new spans to new or existing traces. You cannot update +// existing spans. +func (c *Client) BatchWriteSpans(ctx context.Context, req *cloudtracepb.BatchWriteSpansRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.BatchWriteSpans[0:len(c.CallOptions.BatchWriteSpans):len(c.CallOptions.BatchWriteSpans)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.client.BatchWriteSpans(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// CreateSpan creates a new span. +func (c *Client) CreateSpan(ctx context.Context, req *cloudtracepb.Span, opts ...gax.CallOption) (*cloudtracepb.Span, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CreateSpan[0:len(c.CallOptions.CreateSpan):len(c.CallOptions.CreateSpan)], opts...) + var resp *cloudtracepb.Span + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.CreateSpan(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/AUTHORS b/vendor/contrib.go.opencensus.io/exporter/stackdriver/AUTHORS new file mode 100644 index 000000000..e491a9e7f --- /dev/null +++ b/vendor/contrib.go.opencensus.io/exporter/stackdriver/AUTHORS @@ -0,0 +1 @@ +Google Inc. diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/LICENSE b/vendor/contrib.go.opencensus.io/exporter/stackdriver/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/contrib.go.opencensus.io/exporter/stackdriver/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/label.go b/vendor/contrib.go.opencensus.io/exporter/stackdriver/label.go new file mode 100644 index 000000000..88835cc0f --- /dev/null +++ b/vendor/contrib.go.opencensus.io/exporter/stackdriver/label.go @@ -0,0 +1,33 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package stackdriver + +// Labels represents a set of Stackdriver Monitoring labels. +type Labels struct { + m map[string]labelValue +} + +type labelValue struct { + val, desc string +} + +// Set stores a label with the given key, value and description, +// overwriting any previous values with the given key. +func (labels *Labels) Set(key, value, description string) { + if labels.m == nil { + labels.m = make(map[string]labelValue) + } + labels.m[key] = labelValue{value, description} +} diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/aws_identity_doc_utils.go b/vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/aws_identity_doc_utils.go new file mode 100644 index 000000000..d6a23a8cf --- /dev/null +++ b/vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/aws_identity_doc_utils.go @@ -0,0 +1,53 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package monitoredresource + +import ( + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/session" +) + +// awsIdentityDocument is used to store parsed AWS Identity Document. +type awsIdentityDocument struct { + // accountID is the AWS account number for the VM. + accountID string + + // instanceID is the instance id of the instance. + instanceID string + + // Region is the AWS region for the VM. + region string +} + +// retrieveAWSIdentityDocument attempts to retrieve AWS Identity Document. +// If the environment is AWS EC2 Instance then a valid document is retrieved. +// Relevant attributes from the document are stored in awsIdentityDoc. +// This is only done once. +func retrieveAWSIdentityDocument() *awsIdentityDocument { + awsIdentityDoc := awsIdentityDocument{} + c := ec2metadata.New(session.New()) + if c.Available() == false { + return nil + } + ec2InstanceIdentifyDocument, err := c.GetInstanceIdentityDocument() + if err != nil { + return nil + } + awsIdentityDoc.region = ec2InstanceIdentifyDocument.Region + awsIdentityDoc.instanceID = ec2InstanceIdentifyDocument.InstanceID + awsIdentityDoc.accountID = ec2InstanceIdentifyDocument.AccountID + + return &awsIdentityDoc +} diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/gcp_metadata_config.go b/vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/gcp_metadata_config.go new file mode 100644 index 000000000..ceb754e51 --- /dev/null +++ b/vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/gcp_metadata_config.go @@ -0,0 +1,90 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package monitoredresource + +import ( + "log" + "os" + "strings" + + "cloud.google.com/go/compute/metadata" +) + +// gcpMetadata represents metadata retrieved from GCP (GKE and GCE) environment. +type gcpMetadata struct { + + // projectID is the identifier of the GCP project associated with this resource, such as "my-project". + projectID string + + // instanceID is the numeric VM instance identifier assigned by Compute Engine. + instanceID string + + // clusterName is the name for the cluster the container is running in. + clusterName string + + // containerName is the name of the container. + containerName string + + // namespaceID is the identifier for the cluster namespace the container is running in + namespaceID string + + // podID is the identifier for the pod the container is running in. + podID string + + // zone is the Compute Engine zone in which the VM is running. + zone string +} + +// retrieveGCPMetadata retrieves value of each Attribute from Metadata Server +// in GKE container and GCE instance environment. +// Some attributes are retrieved from the system environment. +// This is only executed detectOnce. +func retrieveGCPMetadata() *gcpMetadata { + gcpMetadata := gcpMetadata{} + var err error + gcpMetadata.instanceID, err = metadata.InstanceID() + if err != nil { + // Not a GCP environment + return &gcpMetadata + } + + gcpMetadata.projectID, err = metadata.ProjectID() + logError(err) + + gcpMetadata.zone, err = metadata.Zone() + logError(err) + + clusterName, err := metadata.InstanceAttributeValue("cluster-name") + logError(err) + gcpMetadata.clusterName = strings.TrimSpace(clusterName) + + // Following attributes are derived from environment variables. They are configured + // via yaml file. For details refer to: + // https://cloud.google.com/kubernetes-engine/docs/tutorials/custom-metrics-autoscaling#exporting_metrics_from_the_application + gcpMetadata.namespaceID = os.Getenv("NAMESPACE") + gcpMetadata.containerName = os.Getenv("CONTAINER_NAME") + gcpMetadata.podID = os.Getenv("HOSTNAME") + + return &gcpMetadata +} + +// logError logs error only if the error is present and it is not 'not defined' +func logError(err error) { + if err != nil { + if !strings.Contains(err.Error(), "not defined") { + log.Printf("Error retrieving gcp metadata: %v", err) + } + } +} diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/monitored_resources.go b/vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/monitored_resources.go new file mode 100644 index 000000000..c07e55ce0 --- /dev/null +++ b/vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/monitored_resources.go @@ -0,0 +1,217 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package monitoredresource + +import ( + "fmt" + "os" + "sync" +) + +// Interface is a type that represent monitor resource that satisfies monitoredresource.Interface +type Interface interface { + + // MonitoredResource returns the resource type and resource labels. + MonitoredResource() (resType string, labels map[string]string) +} + +// GKEContainer represents gke_container type monitored resource. +// For definition refer to +// https://cloud.google.com/monitoring/api/resources#tag_gke_container +type GKEContainer struct { + + // ProjectID is the identifier of the GCP project associated with this resource, such as "my-project". + ProjectID string + + // InstanceID is the numeric VM instance identifier assigned by Compute Engine. + InstanceID string + + // ClusterName is the name for the cluster the container is running in. + ClusterName string + + // ContainerName is the name of the container. + ContainerName string + + // NamespaceID is the identifier for the cluster namespace the container is running in + NamespaceID string + + // PodID is the identifier for the pod the container is running in. + PodID string + + // Zone is the Compute Engine zone in which the VM is running. + Zone string +} + +// MonitoredResource returns resource type and resource labels for GKEContainer +func (gke *GKEContainer) MonitoredResource() (resType string, labels map[string]string) { + labels = map[string]string{ + "project_id": gke.ProjectID, + "instance_id": gke.InstanceID, + "zone": gke.Zone, + "cluster_name": gke.ClusterName, + "container_name": gke.ContainerName, + "namespace_id": gke.NamespaceID, + "pod_id": gke.PodID, + } + return "gke_container", labels +} + +// GCEInstance represents gce_instance type monitored resource. +// For definition refer to +// https://cloud.google.com/monitoring/api/resources#tag_gce_instance +type GCEInstance struct { + + // ProjectID is the identifier of the GCP project associated with this resource, such as "my-project". + ProjectID string + + // InstanceID is the numeric VM instance identifier assigned by Compute Engine. + InstanceID string + + // Zone is the Compute Engine zone in which the VM is running. + Zone string +} + +// MonitoredResource returns resource type and resource labels for GCEInstance +func (gce *GCEInstance) MonitoredResource() (resType string, labels map[string]string) { + labels = map[string]string{ + "project_id": gce.ProjectID, + "instance_id": gce.InstanceID, + "zone": gce.Zone, + } + return "gce_instance", labels +} + +// AWSEC2Instance represents aws_ec2_instance type monitored resource. +// For definition refer to +// https://cloud.google.com/monitoring/api/resources#tag_aws_ec2_instance +type AWSEC2Instance struct { + + // AWSAccount is the AWS account number for the VM. + AWSAccount string + + // InstanceID is the instance id of the instance. + InstanceID string + + // Region is the AWS region for the VM. The format of this field is "aws:{region}", + // where supported values for {region} are listed at + // http://docs.aws.amazon.com/general/latest/gr/rande.html. + Region string +} + +// MonitoredResource returns resource type and resource labels for AWSEC2Instance +func (aws *AWSEC2Instance) MonitoredResource() (resType string, labels map[string]string) { + labels = map[string]string{ + "aws_account": aws.AWSAccount, + "instance_id": aws.InstanceID, + "region": aws.Region, + } + return "aws_ec2_instance", labels +} + +// Autodetect auto detects monitored resources based on +// the environment where the application is running. +// It supports detection of following resource types +// 1. gke_container: +// 2. gce_instance: +// 3. aws_ec2_instance: +// +// Returns MonitoredResInterface which implements getLabels() and getType() +// For resource definition go to https://cloud.google.com/monitoring/api/resources +func Autodetect() Interface { + return func() Interface { + var autoDetected Interface + var awsIdentityDoc *awsIdentityDocument + var gcpMetadata *gcpMetadata + detectOnce.Do(func() { + + // First attempts to retrieve AWS Identity Doc and GCP metadata. + // It then determines the resource type + // In GCP and AWS environment both func finishes quickly. However, + // in an environment other than those (e.g local laptop) it + // takes 2 seconds for GCP and 5-6 for AWS. + var wg sync.WaitGroup + wg.Add(2) + + go func() { + defer wg.Done() + awsIdentityDoc = retrieveAWSIdentityDocument() + }() + go func() { + defer wg.Done() + gcpMetadata = retrieveGCPMetadata() + }() + + wg.Wait() + autoDetected = detectResourceType(awsIdentityDoc, gcpMetadata) + }) + return autoDetected + }() + +} + +// createAWSEC2InstanceMonitoredResource creates a aws_ec2_instance monitored resource +// awsIdentityDoc contains AWS EC2 specific attributes. +func createAWSEC2InstanceMonitoredResource(awsIdentityDoc *awsIdentityDocument) *AWSEC2Instance { + awsInstance := AWSEC2Instance{ + AWSAccount: awsIdentityDoc.accountID, + InstanceID: awsIdentityDoc.instanceID, + Region: fmt.Sprintf("aws:%s", awsIdentityDoc.region), + } + return &awsInstance +} + +// createGCEInstanceMonitoredResource creates a gce_instance monitored resource +// gcpMetadata contains GCP (GKE or GCE) specific attributes. +func createGCEInstanceMonitoredResource(gcpMetadata *gcpMetadata) *GCEInstance { + gceInstance := GCEInstance{ + ProjectID: gcpMetadata.projectID, + InstanceID: gcpMetadata.instanceID, + Zone: gcpMetadata.zone, + } + return &gceInstance +} + +// createGKEContainerMonitoredResource creates a gke_container monitored resource +// gcpMetadata contains GCP (GKE or GCE) specific attributes. +func createGKEContainerMonitoredResource(gcpMetadata *gcpMetadata) *GKEContainer { + gkeContainer := GKEContainer{ + ProjectID: gcpMetadata.projectID, + InstanceID: gcpMetadata.instanceID, + Zone: gcpMetadata.zone, + ContainerName: gcpMetadata.containerName, + ClusterName: gcpMetadata.clusterName, + NamespaceID: gcpMetadata.namespaceID, + PodID: gcpMetadata.podID, + } + return &gkeContainer +} + +// detectOnce is used to make sure GCP and AWS metadata detect function executes only once. +var detectOnce sync.Once + +// detectResourceType determines the resource type. +// awsIdentityDoc contains AWS EC2 attributes. nil if it is not AWS EC2 environment +// gcpMetadata contains GCP (GKE or GCE) specific attributes. +func detectResourceType(awsIdentityDoc *awsIdentityDocument, gcpMetadata *gcpMetadata) Interface { + if os.Getenv("KUBERNETES_SERVICE_HOST") != "" && + gcpMetadata != nil && gcpMetadata.instanceID != "" { + return createGKEContainerMonitoredResource(gcpMetadata) + } else if gcpMetadata != nil && gcpMetadata.instanceID != "" { + return createGCEInstanceMonitoredResource(gcpMetadata) + } else if awsIdentityDoc != nil { + return createAWSEC2InstanceMonitoredResource(awsIdentityDoc) + } + return nil +} diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/sanitize.go b/vendor/contrib.go.opencensus.io/exporter/stackdriver/sanitize.go new file mode 100644 index 000000000..184bb1d43 --- /dev/null +++ b/vendor/contrib.go.opencensus.io/exporter/stackdriver/sanitize.go @@ -0,0 +1,50 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package stackdriver + +import ( + "strings" + "unicode" +) + +const labelKeySizeLimit = 100 + +// sanitize returns a string that is trunacated to 100 characters if it's too +// long, and replaces non-alphanumeric characters to underscores. +func sanitize(s string) string { + if len(s) == 0 { + return s + } + if len(s) > labelKeySizeLimit { + s = s[:labelKeySizeLimit] + } + s = strings.Map(sanitizeRune, s) + if unicode.IsDigit(rune(s[0])) { + s = "key_" + s + } + if s[0] == '_' { + s = "key" + s + } + return s +} + +// converts anything that is not a letter or digit to an underscore +func sanitizeRune(r rune) rune { + if unicode.IsLetter(r) || unicode.IsDigit(r) { + return r + } + // Everything else turns into an underscore + return '_' +} diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/stackdriver.go b/vendor/contrib.go.opencensus.io/exporter/stackdriver/stackdriver.go new file mode 100644 index 000000000..a4cfaf87c --- /dev/null +++ b/vendor/contrib.go.opencensus.io/exporter/stackdriver/stackdriver.go @@ -0,0 +1,278 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package stackdriver contains the OpenCensus exporters for +// Stackdriver Monitoring and Stackdriver Tracing. +// +// This exporter can be used to send metrics to Stackdriver Monitoring and traces +// to Stackdriver trace. +// +// The package uses Application Default Credentials to authenticate by default. +// See: https://developers.google.com/identity/protocols/application-default-credentials +// +// Alternatively, pass the authentication options in both the MonitoringClientOptions +// and the TraceClientOptions fields of Options. +// +// Stackdriver Monitoring +// +// This exporter support exporting OpenCensus views to Stackdriver Monitoring. +// Each registered view becomes a metric in Stackdriver Monitoring, with the +// tags becoming labels. +// +// The aggregation function determines the metric kind: LastValue aggregations +// generate Gauge metrics and all other aggregations generate Cumulative metrics. +// +// In order to be able to push your stats to Stackdriver Monitoring, you must: +// +// 1. Create a Cloud project: https://support.google.com/cloud/answer/6251787?hl=en +// 2. Enable billing: https://support.google.com/cloud/answer/6288653#new-billing +// 3. Enable the Stackdriver Monitoring API: https://console.cloud.google.com/apis/dashboard +// 4. Make sure you have a Premium Stackdriver account: https://cloud.google.com/monitoring/accounts/tiers +// +// These steps enable the API but don't require that your app is hosted on Google Cloud Platform. +// +// Stackdriver Trace +// +// This exporter supports exporting Trace Spans to Stackdriver Trace. It also +// supports the Google "Cloud Trace" propagation format header. +package stackdriver // import "contrib.go.opencensus.io/exporter/stackdriver" + +import ( + "context" + "errors" + "fmt" + "log" + "time" + + traceapi "cloud.google.com/go/trace/apiv2" + "contrib.go.opencensus.io/exporter/stackdriver/monitoredresource" + "go.opencensus.io/stats/view" + "go.opencensus.io/trace" + "golang.org/x/oauth2/google" + "google.golang.org/api/option" + monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" +) + +// Options contains options for configuring the exporter. +type Options struct { + // ProjectID is the identifier of the Stackdriver + // project the user is uploading the stats data to. + // If not set, this will default to your "Application Default Credentials". + // For details see: https://developers.google.com/accounts/docs/application-default-credentials + ProjectID string + + // OnError is the hook to be called when there is + // an error uploading the stats or tracing data. + // If no custom hook is set, errors are logged. + // Optional. + OnError func(err error) + + // MonitoringClientOptions are additional options to be passed + // to the underlying Stackdriver Monitoring API client. + // Optional. + MonitoringClientOptions []option.ClientOption + + // TraceClientOptions are additional options to be passed + // to the underlying Stackdriver Trace API client. + // Optional. + TraceClientOptions []option.ClientOption + + // BundleDelayThreshold determines the max amount of time + // the exporter can wait before uploading view data to + // the backend. + // Optional. + BundleDelayThreshold time.Duration + + // BundleCountThreshold determines how many view data events + // can be buffered before batch uploading them to the backend. + // Optional. + BundleCountThreshold int + + // Resource sets the MonitoredResource against which all views will be + // recorded by this exporter. + // + // All Stackdriver metrics created by this exporter are custom metrics, + // so only a limited number of MonitoredResource types are supported, see: + // https://cloud.google.com/monitoring/custom-metrics/creating-metrics#which-resource + // + // An important consideration when setting the Resource here is that + // Stackdriver Monitoring only allows a single writer per + // TimeSeries, see: https://cloud.google.com/monitoring/api/v3/metrics-details#intro-time-series + // A TimeSeries is uniquely defined by the metric type name + // (constructed from the view name and the MetricPrefix), the Resource field, + // and the set of label key/value pairs (in OpenCensus terminology: tag). + // + // If no custom Resource is set, a default MonitoredResource + // with type global and no resource labels will be used. If you explicitly + // set this field, you may also want to set custom DefaultMonitoringLabels. + // + // Deprecated: Use MonitoredResource instead. + Resource *monitoredrespb.MonitoredResource + + // MonitoredResource sets the MonitoredResource against which all views will be + // recorded by this exporter. + // + // All Stackdriver metrics created by this exporter are custom metrics, + // so only a limited number of MonitoredResource types are supported, see: + // https://cloud.google.com/monitoring/custom-metrics/creating-metrics#which-resource + // + // An important consideration when setting the MonitoredResource here is that + // Stackdriver Monitoring only allows a single writer per + // TimeSeries, see: https://cloud.google.com/monitoring/api/v3/metrics-details#intro-time-series + // A TimeSeries is uniquely defined by the metric type name + // (constructed from the view name and the MetricPrefix), the MonitoredResource field, + // and the set of label key/value pairs (in OpenCensus terminology: tag). + // + // If no custom MonitoredResource is set AND if Resource is also not set then + // a default MonitoredResource with type global and no resource labels will be used. + // If you explicitly set this field, you may also want to set custom DefaultMonitoringLabels. + // + // This field replaces Resource field. If this is set then it will override the + // Resource field. + // Optional, but encouraged. + MonitoredResource monitoredresource.Interface + + // MetricPrefix overrides the prefix of a Stackdriver metric type names. + // Optional. If unset defaults to "OpenCensus". + MetricPrefix string + + // DefaultTraceAttributes will be appended to every span that is exported to + // Stackdriver Trace. + DefaultTraceAttributes map[string]interface{} + + // DefaultMonitoringLabels are labels added to every metric created by this + // exporter in Stackdriver Monitoring. + // + // If unset, this defaults to a single label with key "opencensus_task" and + // value "go-@". This default ensures that the set of labels + // together with the default Resource (global) are unique to this + // process, as required by Stackdriver Monitoring. + // + // If you set DefaultMonitoringLabels, make sure that the Resource field + // together with these labels is unique to the + // current process. This is to ensure that there is only a single writer to + // each TimeSeries in Stackdriver. + // + // Set this to &Labels{} (a pointer to an empty Labels) to avoid getting the + // default "opencensus_task" label. You should only do this if you know that + // the Resource you set uniquely identifies this Go process. + DefaultMonitoringLabels *Labels + + // Context allows users to provide a custom context for API calls. + // + // This context will be used several times: first, to create Stackdriver + // trace and metric clients, and then every time a new batch of traces or + // stats needs to be uploaded. + // + // If unset, context.Background() will be used. + Context context.Context +} + +// Exporter is a stats.Exporter and trace.Exporter +// implementation that uploads data to Stackdriver. +type Exporter struct { + traceExporter *traceExporter + statsExporter *statsExporter +} + +// NewExporter creates a new Exporter that implements both stats.Exporter and +// trace.Exporter. +func NewExporter(o Options) (*Exporter, error) { + if o.Context == nil { + o.Context = context.Background() + } + if o.ProjectID == "" { + creds, err := google.FindDefaultCredentials(o.Context, traceapi.DefaultAuthScopes()...) + if err != nil { + return nil, fmt.Errorf("stackdriver: %v", err) + } + if creds.ProjectID == "" { + return nil, errors.New("stackdriver: no project found with application default credentials") + } + o.ProjectID = creds.ProjectID + } + + if o.MonitoredResource != nil { + o.Resource = convertMonitoredResourceToPB(o.MonitoredResource) + } + + se, err := newStatsExporter(o, true) + if err != nil { + return nil, err + } + te, err := newTraceExporter(o) + if err != nil { + return nil, err + } + return &Exporter{ + statsExporter: se, + traceExporter: te, + }, nil +} + +// ExportView exports to the Stackdriver Monitoring if view data +// has one or more rows. +func (e *Exporter) ExportView(vd *view.Data) { + e.statsExporter.ExportView(vd) +} + +// ExportSpan exports a SpanData to Stackdriver Trace. +func (e *Exporter) ExportSpan(sd *trace.SpanData) { + if len(e.traceExporter.o.DefaultTraceAttributes) > 0 { + sd = e.sdWithDefaultTraceAttributes(sd) + } + e.traceExporter.ExportSpan(sd) +} + +func (e *Exporter) sdWithDefaultTraceAttributes(sd *trace.SpanData) *trace.SpanData { + newSD := *sd + newSD.Attributes = make(map[string]interface{}) + for k, v := range e.traceExporter.o.DefaultTraceAttributes { + newSD.Attributes[k] = v + } + for k, v := range sd.Attributes { + newSD.Attributes[k] = v + } + return &newSD +} + +// Flush waits for exported data to be uploaded. +// +// This is useful if your program is ending and you do not +// want to lose recent stats or spans. +func (e *Exporter) Flush() { + e.statsExporter.Flush() + e.traceExporter.Flush() +} + +func (o Options) handleError(err error) { + if o.OnError != nil { + o.OnError(err) + return + } + log.Printf("Failed to export to Stackdriver: %v", err) +} + +// convertMonitoredResourceToPB converts MonitoredResource data in to +// protocol buffer. +func convertMonitoredResourceToPB(mr monitoredresource.Interface) *monitoredrespb.MonitoredResource { + mrpb := new(monitoredrespb.MonitoredResource) + var labels map[string]string + mrpb.Type, labels = mr.MonitoredResource() + mrpb.Labels = make(map[string]string) + for k, v := range labels { + mrpb.Labels[k] = v + } + return mrpb +} diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/stats.go b/vendor/contrib.go.opencensus.io/exporter/stackdriver/stats.go new file mode 100644 index 000000000..9014aec3d --- /dev/null +++ b/vendor/contrib.go.opencensus.io/exporter/stackdriver/stats.go @@ -0,0 +1,471 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package stackdriver + +import ( + "context" + "errors" + "fmt" + "os" + "path" + "strconv" + "strings" + "sync" + "time" + + "go.opencensus.io" + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" + "go.opencensus.io/trace" + + "cloud.google.com/go/monitoring/apiv3" + "github.com/golang/protobuf/ptypes/timestamp" + "google.golang.org/api/option" + "google.golang.org/api/support/bundler" + distributionpb "google.golang.org/genproto/googleapis/api/distribution" + labelpb "google.golang.org/genproto/googleapis/api/label" + "google.golang.org/genproto/googleapis/api/metric" + metricpb "google.golang.org/genproto/googleapis/api/metric" + monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" + monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" +) + +const ( + maxTimeSeriesPerUpload = 200 + opencensusTaskKey = "opencensus_task" + opencensusTaskDescription = "Opencensus task identifier" + defaultDisplayNamePrefix = "OpenCensus" + version = "0.6.0" +) + +var userAgent = fmt.Sprintf("opencensus-go %s; stackdriver-exporter %s", opencensus.Version(), version) + +// statsExporter exports stats to the Stackdriver Monitoring. +type statsExporter struct { + bundler *bundler.Bundler + o Options + + createdViewsMu sync.Mutex + createdViews map[string]*metricpb.MetricDescriptor // Views already created remotely + + c *monitoring.MetricClient + defaultLabels map[string]labelValue +} + +var ( + errBlankProjectID = errors.New("expecting a non-blank ProjectID") +) + +// newStatsExporter returns an exporter that uploads stats data to Stackdriver Monitoring. +// Only one Stackdriver exporter should be created per ProjectID per process, any subsequent +// invocations of NewExporter with the same ProjectID will return an error. +func newStatsExporter(o Options, enforceProjectUniqueness bool) (*statsExporter, error) { + if strings.TrimSpace(o.ProjectID) == "" { + return nil, errBlankProjectID + } + + opts := append(o.MonitoringClientOptions, option.WithUserAgent(userAgent)) + client, err := monitoring.NewMetricClient(o.Context, opts...) + if err != nil { + return nil, err + } + e := &statsExporter{ + c: client, + o: o, + createdViews: make(map[string]*metricpb.MetricDescriptor), + } + if o.DefaultMonitoringLabels != nil { + e.defaultLabels = o.DefaultMonitoringLabels.m + } else { + e.defaultLabels = map[string]labelValue{ + opencensusTaskKey: {val: getTaskValue(), desc: opencensusTaskDescription}, + } + } + e.bundler = bundler.NewBundler((*view.Data)(nil), func(bundle interface{}) { + vds := bundle.([]*view.Data) + e.handleUpload(vds...) + }) + e.bundler.DelayThreshold = e.o.BundleDelayThreshold + e.bundler.BundleCountThreshold = e.o.BundleCountThreshold + return e, nil +} + +// ExportView exports to the Stackdriver Monitoring if view data +// has one or more rows. +func (e *statsExporter) ExportView(vd *view.Data) { + if len(vd.Rows) == 0 { + return + } + err := e.bundler.Add(vd, 1) + switch err { + case nil: + return + case bundler.ErrOversizedItem: + go e.handleUpload(vd) + case bundler.ErrOverflow: + e.o.handleError(errors.New("failed to upload: buffer full")) + default: + e.o.handleError(err) + } +} + +// getTaskValue returns a task label value in the format of +// "go-@". +func getTaskValue() string { + hostname, err := os.Hostname() + if err != nil { + hostname = "localhost" + } + return "go-" + strconv.Itoa(os.Getpid()) + "@" + hostname +} + +// handleUpload handles uploading a slice +// of Data, as well as error handling. +func (e *statsExporter) handleUpload(vds ...*view.Data) { + if err := e.uploadStats(vds); err != nil { + e.o.handleError(err) + } +} + +// Flush waits for exported view data to be uploaded. +// +// This is useful if your program is ending and you do not +// want to lose recent spans. +func (e *statsExporter) Flush() { + e.bundler.Flush() +} + +func (e *statsExporter) uploadStats(vds []*view.Data) error { + ctx, span := trace.StartSpan( + e.o.Context, + "contrib.go.opencensus.io/exporter/stackdriver.uploadStats", + trace.WithSampler(trace.NeverSample()), + ) + defer span.End() + + for _, vd := range vds { + if err := e.createMeasure(ctx, vd.View); err != nil { + span.SetStatus(trace.Status{Code: 2, Message: err.Error()}) + return err + } + } + for _, req := range e.makeReq(vds, maxTimeSeriesPerUpload) { + if err := createTimeSeries(ctx, e.c, req); err != nil { + span.SetStatus(trace.Status{Code: 2, Message: err.Error()}) + // TODO(jbd): Don't fail fast here, batch errors? + return err + } + } + return nil +} + +func (e *statsExporter) makeReq(vds []*view.Data, limit int) []*monitoringpb.CreateTimeSeriesRequest { + var reqs []*monitoringpb.CreateTimeSeriesRequest + var timeSeries []*monitoringpb.TimeSeries + + resource := e.o.Resource + if resource == nil { + resource = &monitoredrespb.MonitoredResource{ + Type: "global", + } + } + + for _, vd := range vds { + for _, row := range vd.Rows { + ts := &monitoringpb.TimeSeries{ + Metric: &metricpb.Metric{ + Type: namespacedViewName(vd.View.Name), + Labels: newLabels(e.defaultLabels, row.Tags), + }, + Resource: resource, + Points: []*monitoringpb.Point{newPoint(vd.View, row, vd.Start, vd.End)}, + } + timeSeries = append(timeSeries, ts) + if len(timeSeries) == limit { + reqs = append(reqs, &monitoringpb.CreateTimeSeriesRequest{ + Name: monitoring.MetricProjectPath(e.o.ProjectID), + TimeSeries: timeSeries, + }) + timeSeries = []*monitoringpb.TimeSeries{} + } + } + } + if len(timeSeries) > 0 { + reqs = append(reqs, &monitoringpb.CreateTimeSeriesRequest{ + Name: monitoring.MetricProjectPath(e.o.ProjectID), + TimeSeries: timeSeries, + }) + } + return reqs +} + +// createMeasure creates a MetricDescriptor for the given view data in Stackdriver Monitoring. +// An error will be returned if there is already a metric descriptor created with the same name +// but it has a different aggregation or keys. +func (e *statsExporter) createMeasure(ctx context.Context, v *view.View) error { + e.createdViewsMu.Lock() + defer e.createdViewsMu.Unlock() + + m := v.Measure + agg := v.Aggregation + tagKeys := v.TagKeys + viewName := v.Name + + if md, ok := e.createdViews[viewName]; ok { + return e.equalMeasureAggTagKeys(md, m, agg, tagKeys) + } + + metricType := namespacedViewName(viewName) + var valueType metricpb.MetricDescriptor_ValueType + unit := m.Unit() + // Default metric Kind + metricKind := metricpb.MetricDescriptor_CUMULATIVE + + switch agg.Type { + case view.AggTypeCount: + valueType = metricpb.MetricDescriptor_INT64 + // If the aggregation type is count, which counts the number of recorded measurements, the unit must be "1", + // because this view does not apply to the recorded values. + unit = stats.UnitDimensionless + case view.AggTypeSum: + switch m.(type) { + case *stats.Int64Measure: + valueType = metricpb.MetricDescriptor_INT64 + case *stats.Float64Measure: + valueType = metricpb.MetricDescriptor_DOUBLE + } + case view.AggTypeDistribution: + valueType = metricpb.MetricDescriptor_DISTRIBUTION + case view.AggTypeLastValue: + metricKind = metricpb.MetricDescriptor_GAUGE + switch m.(type) { + case *stats.Int64Measure: + valueType = metricpb.MetricDescriptor_INT64 + case *stats.Float64Measure: + valueType = metricpb.MetricDescriptor_DOUBLE + } + default: + return fmt.Errorf("unsupported aggregation type: %s", agg.Type.String()) + } + + displayNamePrefix := defaultDisplayNamePrefix + if e.o.MetricPrefix != "" { + displayNamePrefix = e.o.MetricPrefix + } + + md, err := createMetricDescriptor(ctx, e.c, &monitoringpb.CreateMetricDescriptorRequest{ + Name: fmt.Sprintf("projects/%s", e.o.ProjectID), + MetricDescriptor: &metricpb.MetricDescriptor{ + Name: fmt.Sprintf("projects/%s/metricDescriptors/%s", e.o.ProjectID, metricType), + DisplayName: path.Join(displayNamePrefix, viewName), + Description: v.Description, + Unit: unit, + Type: metricType, + MetricKind: metricKind, + ValueType: valueType, + Labels: newLabelDescriptors(e.defaultLabels, v.TagKeys), + }, + }) + if err != nil { + return err + } + + e.createdViews[viewName] = md + return nil +} + +func newPoint(v *view.View, row *view.Row, start, end time.Time) *monitoringpb.Point { + switch v.Aggregation.Type { + case view.AggTypeLastValue: + return newGaugePoint(v, row, end) + default: + return newCumulativePoint(v, row, start, end) + } +} + +func newCumulativePoint(v *view.View, row *view.Row, start, end time.Time) *monitoringpb.Point { + return &monitoringpb.Point{ + Interval: &monitoringpb.TimeInterval{ + StartTime: ×tamp.Timestamp{ + Seconds: start.Unix(), + Nanos: int32(start.Nanosecond()), + }, + EndTime: ×tamp.Timestamp{ + Seconds: end.Unix(), + Nanos: int32(end.Nanosecond()), + }, + }, + Value: newTypedValue(v, row), + } +} + +func newGaugePoint(v *view.View, row *view.Row, end time.Time) *monitoringpb.Point { + gaugeTime := ×tamp.Timestamp{ + Seconds: end.Unix(), + Nanos: int32(end.Nanosecond()), + } + return &monitoringpb.Point{ + Interval: &monitoringpb.TimeInterval{ + EndTime: gaugeTime, + }, + Value: newTypedValue(v, row), + } +} + +func newTypedValue(vd *view.View, r *view.Row) *monitoringpb.TypedValue { + switch v := r.Data.(type) { + case *view.CountData: + return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_Int64Value{ + Int64Value: v.Value, + }} + case *view.SumData: + switch vd.Measure.(type) { + case *stats.Int64Measure: + return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_Int64Value{ + Int64Value: int64(v.Value), + }} + case *stats.Float64Measure: + return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DoubleValue{ + DoubleValue: v.Value, + }} + } + case *view.DistributionData: + return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DistributionValue{ + DistributionValue: &distributionpb.Distribution{ + Count: v.Count, + Mean: v.Mean, + SumOfSquaredDeviation: v.SumOfSquaredDev, + // TODO(songya): uncomment this once Stackdriver supports min/max. + // Range: &distributionpb.Distribution_Range{ + // Min: v.Min, + // Max: v.Max, + // }, + BucketOptions: &distributionpb.Distribution_BucketOptions{ + Options: &distributionpb.Distribution_BucketOptions_ExplicitBuckets{ + ExplicitBuckets: &distributionpb.Distribution_BucketOptions_Explicit{ + Bounds: vd.Aggregation.Buckets, + }, + }, + }, + BucketCounts: v.CountPerBucket, + }, + }} + case *view.LastValueData: + switch vd.Measure.(type) { + case *stats.Int64Measure: + return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_Int64Value{ + Int64Value: int64(v.Value), + }} + case *stats.Float64Measure: + return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DoubleValue{ + DoubleValue: v.Value, + }} + } + } + return nil +} + +func namespacedViewName(v string) string { + return path.Join("custom.googleapis.com", "opencensus", v) +} + +func newLabels(defaults map[string]labelValue, tags []tag.Tag) map[string]string { + labels := make(map[string]string) + for k, lbl := range defaults { + labels[sanitize(k)] = lbl.val + } + for _, tag := range tags { + labels[sanitize(tag.Key.Name())] = tag.Value + } + return labels +} + +func newLabelDescriptors(defaults map[string]labelValue, keys []tag.Key) []*labelpb.LabelDescriptor { + labelDescriptors := make([]*labelpb.LabelDescriptor, 0, len(keys)+len(defaults)) + for key, lbl := range defaults { + labelDescriptors = append(labelDescriptors, &labelpb.LabelDescriptor{ + Key: sanitize(key), + Description: lbl.desc, + ValueType: labelpb.LabelDescriptor_STRING, + }) + } + for _, key := range keys { + labelDescriptors = append(labelDescriptors, &labelpb.LabelDescriptor{ + Key: sanitize(key.Name()), + ValueType: labelpb.LabelDescriptor_STRING, // We only use string tags + }) + } + return labelDescriptors +} + +func (e *statsExporter) equalMeasureAggTagKeys(md *metricpb.MetricDescriptor, m stats.Measure, agg *view.Aggregation, keys []tag.Key) error { + var aggTypeMatch bool + switch md.ValueType { + case metricpb.MetricDescriptor_INT64: + if _, ok := m.(*stats.Int64Measure); !(ok || agg.Type == view.AggTypeCount) { + return fmt.Errorf("stackdriver metric descriptor was not created as int64") + } + aggTypeMatch = agg.Type == view.AggTypeCount || agg.Type == view.AggTypeSum || agg.Type == view.AggTypeLastValue + case metricpb.MetricDescriptor_DOUBLE: + if _, ok := m.(*stats.Float64Measure); !ok { + return fmt.Errorf("stackdriver metric descriptor was not created as double") + } + aggTypeMatch = agg.Type == view.AggTypeSum || agg.Type == view.AggTypeLastValue + case metricpb.MetricDescriptor_DISTRIBUTION: + aggTypeMatch = agg.Type == view.AggTypeDistribution + } + + if !aggTypeMatch { + return fmt.Errorf("stackdriver metric descriptor was not created with aggregation type %T", agg.Type) + } + + labels := make(map[string]struct{}, len(keys)+len(e.defaultLabels)) + for _, k := range keys { + labels[sanitize(k.Name())] = struct{}{} + } + for k := range e.defaultLabels { + labels[sanitize(k)] = struct{}{} + } + + for _, k := range md.Labels { + if _, ok := labels[k.Key]; !ok { + return fmt.Errorf("stackdriver metric descriptor %q was not created with label %q", md.Type, k) + } + delete(labels, k.Key) + } + + if len(labels) > 0 { + extra := make([]string, 0, len(labels)) + for k := range labels { + extra = append(extra, k) + } + return fmt.Errorf("stackdriver metric descriptor %q contains unexpected labels: %s", md.Type, strings.Join(extra, ", ")) + } + + return nil +} + +var createMetricDescriptor = func(ctx context.Context, c *monitoring.MetricClient, mdr *monitoringpb.CreateMetricDescriptorRequest) (*metric.MetricDescriptor, error) { + return c.CreateMetricDescriptor(ctx, mdr) +} + +var getMetricDescriptor = func(ctx context.Context, c *monitoring.MetricClient, mdr *monitoringpb.GetMetricDescriptorRequest) (*metric.MetricDescriptor, error) { + return c.GetMetricDescriptor(ctx, mdr) +} + +var createTimeSeries = func(ctx context.Context, c *monitoring.MetricClient, ts *monitoringpb.CreateTimeSeriesRequest) error { + return c.CreateTimeSeries(ctx, ts) +} diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/trace.go b/vendor/contrib.go.opencensus.io/exporter/stackdriver/trace.go new file mode 100644 index 000000000..135a64c67 --- /dev/null +++ b/vendor/contrib.go.opencensus.io/exporter/stackdriver/trace.go @@ -0,0 +1,171 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package stackdriver + +import ( + "fmt" + "log" + "sync" + "time" + + tracingclient "cloud.google.com/go/trace/apiv2" + "go.opencensus.io/trace" + "google.golang.org/api/support/bundler" + tracepb "google.golang.org/genproto/googleapis/devtools/cloudtrace/v2" +) + +// traceExporter is an implementation of trace.Exporter that uploads spans to +// Stackdriver. +// +type traceExporter struct { + o Options + projectID string + bundler *bundler.Bundler + // uploadFn defaults to uploadSpans; it can be replaced for tests. + uploadFn func(spans []*trace.SpanData) + overflowLogger + client *tracingclient.Client +} + +var _ trace.Exporter = (*traceExporter)(nil) + +func newTraceExporter(o Options) (*traceExporter, error) { + client, err := tracingclient.NewClient(o.Context, o.TraceClientOptions...) + if err != nil { + return nil, fmt.Errorf("stackdriver: couldn't initialize trace client: %v", err) + } + return newTraceExporterWithClient(o, client), nil +} + +func newTraceExporterWithClient(o Options, c *tracingclient.Client) *traceExporter { + e := &traceExporter{ + projectID: o.ProjectID, + client: c, + o: o, + } + bundler := bundler.NewBundler((*trace.SpanData)(nil), func(bundle interface{}) { + e.uploadFn(bundle.([]*trace.SpanData)) + }) + if o.BundleDelayThreshold > 0 { + bundler.DelayThreshold = o.BundleDelayThreshold + } else { + bundler.DelayThreshold = 2 * time.Second + } + if o.BundleCountThreshold > 0 { + bundler.BundleCountThreshold = o.BundleCountThreshold + } else { + bundler.BundleCountThreshold = 50 + } + // The measured "bytes" are not really bytes, see exportReceiver. + bundler.BundleByteThreshold = bundler.BundleCountThreshold * 200 + bundler.BundleByteLimit = bundler.BundleCountThreshold * 1000 + bundler.BufferedByteLimit = bundler.BundleCountThreshold * 2000 + + e.bundler = bundler + e.uploadFn = e.uploadSpans + return e +} + +// ExportSpan exports a SpanData to Stackdriver Trace. +func (e *traceExporter) ExportSpan(s *trace.SpanData) { + // n is a length heuristic. + n := 1 + n += len(s.Attributes) + n += len(s.Annotations) + n += len(s.MessageEvents) + err := e.bundler.Add(s, n) + switch err { + case nil: + return + case bundler.ErrOversizedItem: + go e.uploadFn([]*trace.SpanData{s}) + case bundler.ErrOverflow: + e.overflowLogger.log() + default: + e.o.handleError(err) + } +} + +// Flush waits for exported trace spans to be uploaded. +// +// This is useful if your program is ending and you do not want to lose recent +// spans. +func (e *traceExporter) Flush() { + e.bundler.Flush() +} + +// uploadSpans uploads a set of spans to Stackdriver. +func (e *traceExporter) uploadSpans(spans []*trace.SpanData) { + req := tracepb.BatchWriteSpansRequest{ + Name: "projects/" + e.projectID, + Spans: make([]*tracepb.Span, 0, len(spans)), + } + for _, span := range spans { + req.Spans = append(req.Spans, protoFromSpanData(span, e.projectID, e.o.Resource)) + } + // Create a never-sampled span to prevent traces associated with exporter. + ctx, span := trace.StartSpan( // TODO: add timeouts + e.o.Context, + "contrib.go.opencensus.io/exporter/stackdriver.uploadSpans", + trace.WithSampler(trace.NeverSample()), + ) + defer span.End() + span.AddAttributes(trace.Int64Attribute("num_spans", int64(len(spans)))) + + err := e.client.BatchWriteSpans(ctx, &req) + if err != nil { + span.SetStatus(trace.Status{Code: 2, Message: err.Error()}) + e.o.handleError(err) + } +} + +// overflowLogger ensures that at most one overflow error log message is +// written every 5 seconds. +type overflowLogger struct { + mu sync.Mutex + pause bool + accum int +} + +func (o *overflowLogger) delay() { + o.pause = true + time.AfterFunc(5*time.Second, func() { + o.mu.Lock() + defer o.mu.Unlock() + switch { + case o.accum == 0: + o.pause = false + case o.accum == 1: + log.Println("OpenCensus Stackdriver exporter: failed to upload span: buffer full") + o.accum = 0 + o.delay() + default: + log.Printf("OpenCensus Stackdriver exporter: failed to upload %d spans: buffer full", o.accum) + o.accum = 0 + o.delay() + } + }) +} + +func (o *overflowLogger) log() { + o.mu.Lock() + defer o.mu.Unlock() + if !o.pause { + log.Println("OpenCensus Stackdriver exporter: failed to upload span: buffer full") + o.delay() + } else { + o.accum++ + } +} diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/trace_proto.go b/vendor/contrib.go.opencensus.io/exporter/stackdriver/trace_proto.go new file mode 100644 index 000000000..9e8ead863 --- /dev/null +++ b/vendor/contrib.go.opencensus.io/exporter/stackdriver/trace_proto.go @@ -0,0 +1,277 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package stackdriver + +import ( + "fmt" + "math" + "time" + "unicode/utf8" + + timestamppb "github.com/golang/protobuf/ptypes/timestamp" + wrapperspb "github.com/golang/protobuf/ptypes/wrappers" + "go.opencensus.io/plugin/ochttp" + "go.opencensus.io/trace" + monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" + tracepb "google.golang.org/genproto/googleapis/devtools/cloudtrace/v2" + statuspb "google.golang.org/genproto/googleapis/rpc/status" +) + +const ( + maxAnnotationEventsPerSpan = 32 + maxMessageEventsPerSpan = 128 + maxAttributeStringValue = 256 + agentLabel = "g.co/agent" + + labelHTTPHost = `/http/host` + labelHTTPMethod = `/http/method` + labelHTTPStatusCode = `/http/status_code` + labelHTTPPath = `/http/path` + labelHTTPUserAgent = `/http/user_agent` +) + +// proto returns a protocol buffer representation of a SpanData. +func protoFromSpanData(s *trace.SpanData, projectID string, mr *monitoredrespb.MonitoredResource) *tracepb.Span { + if s == nil { + return nil + } + + traceIDString := s.SpanContext.TraceID.String() + spanIDString := s.SpanContext.SpanID.String() + + name := s.Name + switch s.SpanKind { + case trace.SpanKindClient: + name = "Sent." + name + case trace.SpanKindServer: + name = "Recv." + name + } + + sp := &tracepb.Span{ + Name: "projects/" + projectID + "/traces/" + traceIDString + "/spans/" + spanIDString, + SpanId: spanIDString, + DisplayName: trunc(name, 128), + StartTime: timestampProto(s.StartTime), + EndTime: timestampProto(s.EndTime), + SameProcessAsParentSpan: &wrapperspb.BoolValue{Value: !s.HasRemoteParent}, + } + if p := s.ParentSpanID; p != (trace.SpanID{}) { + sp.ParentSpanId = p.String() + } + if s.Status.Code != 0 || s.Status.Message != "" { + sp.Status = &statuspb.Status{Code: s.Status.Code, Message: s.Status.Message} + } + + var annotations, droppedAnnotationsCount, messageEvents, droppedMessageEventsCount int + copyAttributes(&sp.Attributes, s.Attributes) + + // Copy MonitoredResources as span Attributes + sp.Attributes = copyMonitoredResourceAttributes(sp.Attributes, mr) + + as := s.Annotations + for i, a := range as { + if annotations >= maxAnnotationEventsPerSpan { + droppedAnnotationsCount = len(as) - i + break + } + annotation := &tracepb.Span_TimeEvent_Annotation{Description: trunc(a.Message, maxAttributeStringValue)} + copyAttributes(&annotation.Attributes, a.Attributes) + event := &tracepb.Span_TimeEvent{ + Time: timestampProto(a.Time), + Value: &tracepb.Span_TimeEvent_Annotation_{Annotation: annotation}, + } + annotations++ + if sp.TimeEvents == nil { + sp.TimeEvents = &tracepb.Span_TimeEvents{} + } + sp.TimeEvents.TimeEvent = append(sp.TimeEvents.TimeEvent, event) + } + + if sp.Attributes == nil { + sp.Attributes = &tracepb.Span_Attributes{ + AttributeMap: make(map[string]*tracepb.AttributeValue), + } + } + sp.Attributes.AttributeMap[agentLabel] = &tracepb.AttributeValue{ + Value: &tracepb.AttributeValue_StringValue{ + StringValue: trunc(userAgent, maxAttributeStringValue), + }, + } + + es := s.MessageEvents + for i, e := range es { + if messageEvents >= maxMessageEventsPerSpan { + droppedMessageEventsCount = len(es) - i + break + } + messageEvents++ + if sp.TimeEvents == nil { + sp.TimeEvents = &tracepb.Span_TimeEvents{} + } + sp.TimeEvents.TimeEvent = append(sp.TimeEvents.TimeEvent, &tracepb.Span_TimeEvent{ + Time: timestampProto(e.Time), + Value: &tracepb.Span_TimeEvent_MessageEvent_{ + MessageEvent: &tracepb.Span_TimeEvent_MessageEvent{ + Type: tracepb.Span_TimeEvent_MessageEvent_Type(e.EventType), + Id: e.MessageID, + UncompressedSizeBytes: e.UncompressedByteSize, + CompressedSizeBytes: e.CompressedByteSize, + }, + }, + }) + } + + if droppedAnnotationsCount != 0 || droppedMessageEventsCount != 0 { + if sp.TimeEvents == nil { + sp.TimeEvents = &tracepb.Span_TimeEvents{} + } + sp.TimeEvents.DroppedAnnotationsCount = clip32(droppedAnnotationsCount) + sp.TimeEvents.DroppedMessageEventsCount = clip32(droppedMessageEventsCount) + } + + if len(s.Links) > 0 { + sp.Links = &tracepb.Span_Links{} + sp.Links.Link = make([]*tracepb.Span_Link, 0, len(s.Links)) + for _, l := range s.Links { + link := &tracepb.Span_Link{ + TraceId: l.TraceID.String(), + SpanId: l.SpanID.String(), + Type: tracepb.Span_Link_Type(l.Type), + } + copyAttributes(&link.Attributes, l.Attributes) + sp.Links.Link = append(sp.Links.Link, link) + } + } + return sp +} + +// timestampProto creates a timestamp proto for a time.Time. +func timestampProto(t time.Time) *timestamppb.Timestamp { + return ×tamppb.Timestamp{ + Seconds: t.Unix(), + Nanos: int32(t.Nanosecond()), + } +} + +// copyMonitoredResourceAttributes copies proto monitoredResource to proto map field (Span_Attributes) +// it creates the map if it is nil. +func copyMonitoredResourceAttributes(out *tracepb.Span_Attributes, mr *monitoredrespb.MonitoredResource) *tracepb.Span_Attributes { + if mr == nil { + return out + } + if out == nil { + out = &tracepb.Span_Attributes{} + } + if out.AttributeMap == nil { + out.AttributeMap = make(map[string]*tracepb.AttributeValue) + } + for k, v := range mr.Labels { + av := attributeValue(v) + out.AttributeMap[fmt.Sprintf("g.co/r/%s/%s", mr.Type, k)] = av + } + return out +} + +// copyAttributes copies a map of attributes to a proto map field. +// It creates the map if it is nil. +func copyAttributes(out **tracepb.Span_Attributes, in map[string]interface{}) { + if len(in) == 0 { + return + } + if *out == nil { + *out = &tracepb.Span_Attributes{} + } + if (*out).AttributeMap == nil { + (*out).AttributeMap = make(map[string]*tracepb.AttributeValue) + } + var dropped int32 + for key, value := range in { + av := attributeValue(value) + if av == nil { + continue + } + switch key { + case ochttp.PathAttribute: + (*out).AttributeMap[labelHTTPPath] = av + case ochttp.HostAttribute: + (*out).AttributeMap[labelHTTPHost] = av + case ochttp.MethodAttribute: + (*out).AttributeMap[labelHTTPMethod] = av + case ochttp.UserAgentAttribute: + (*out).AttributeMap[labelHTTPUserAgent] = av + case ochttp.StatusCodeAttribute: + (*out).AttributeMap[labelHTTPStatusCode] = av + default: + if len(key) > 128 { + dropped++ + continue + } + (*out).AttributeMap[key] = av + } + } + (*out).DroppedAttributesCount = dropped +} + +func attributeValue(v interface{}) *tracepb.AttributeValue { + switch value := v.(type) { + case bool: + return &tracepb.AttributeValue{ + Value: &tracepb.AttributeValue_BoolValue{BoolValue: value}, + } + case int64: + return &tracepb.AttributeValue{ + Value: &tracepb.AttributeValue_IntValue{IntValue: value}, + } + case string: + return &tracepb.AttributeValue{ + Value: &tracepb.AttributeValue_StringValue{StringValue: trunc(value, maxAttributeStringValue)}, + } + } + return nil +} + +// trunc returns a TruncatableString truncated to the given limit. +func trunc(s string, limit int) *tracepb.TruncatableString { + if len(s) > limit { + b := []byte(s[:limit]) + for { + r, size := utf8.DecodeLastRune(b) + if r == utf8.RuneError && size == 1 { + b = b[:len(b)-1] + } else { + break + } + } + return &tracepb.TruncatableString{ + Value: string(b), + TruncatedByteCount: clip32(len(s) - len(b)), + } + } + return &tracepb.TruncatableString{ + Value: s, + TruncatedByteCount: 0, + } +} + +// clip32 clips an int to the range of an int32. +func clip32(x int) int32 { + if x < math.MinInt32 { + return math.MinInt32 + } + if x > math.MaxInt32 { + return math.MaxInt32 + } + return int32(x) +} diff --git a/vendor/github.com/aws/aws-sdk-go/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go/LICENSE.txt new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go/NOTICE.txt b/vendor/github.com/aws/aws-sdk-go/NOTICE.txt new file mode 100644 index 000000000..5f14d1162 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/NOTICE.txt @@ -0,0 +1,3 @@ +AWS SDK for Go +Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. +Copyright 2014-2015 Stripe, Inc. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go new file mode 100644 index 000000000..56fdfc2bf --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go @@ -0,0 +1,145 @@ +// Package awserr represents API error interface accessors for the SDK. +package awserr + +// An Error wraps lower level errors with code, message and an original error. +// The underlying concrete error type may also satisfy other interfaces which +// can be to used to obtain more specific information about the error. +// +// Calling Error() or String() will always include the full information about +// an error based on its underlying type. +// +// Example: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if awsErr, ok := err.(awserr.Error); ok { +// // Get error details +// log.Println("Error:", awsErr.Code(), awsErr.Message()) +// +// // Prints out full error message, including original error if there was one. +// log.Println("Error:", awsErr.Error()) +// +// // Get original error +// if origErr := awsErr.OrigErr(); origErr != nil { +// // operate on original error. +// } +// } else { +// fmt.Println(err.Error()) +// } +// } +// +type Error interface { + // Satisfy the generic error interface. + error + + // Returns the short phrase depicting the classification of the error. + Code() string + + // Returns the error details message. + Message() string + + // Returns the original error if one was set. Nil is returned if not set. + OrigErr() error +} + +// BatchError is a batch of errors which also wraps lower level errors with +// code, message, and original errors. Calling Error() will include all errors +// that occurred in the batch. +// +// Deprecated: Replaced with BatchedErrors. Only defined for backwards +// compatibility. +type BatchError interface { + // Satisfy the generic error interface. + error + + // Returns the short phrase depicting the classification of the error. + Code() string + + // Returns the error details message. + Message() string + + // Returns the original error if one was set. Nil is returned if not set. + OrigErrs() []error +} + +// BatchedErrors is a batch of errors which also wraps lower level errors with +// code, message, and original errors. Calling Error() will include all errors +// that occurred in the batch. +// +// Replaces BatchError +type BatchedErrors interface { + // Satisfy the base Error interface. + Error + + // Returns the original error if one was set. Nil is returned if not set. + OrigErrs() []error +} + +// New returns an Error object described by the code, message, and origErr. +// +// If origErr satisfies the Error interface it will not be wrapped within a new +// Error object and will instead be returned. +func New(code, message string, origErr error) Error { + var errs []error + if origErr != nil { + errs = append(errs, origErr) + } + return newBaseError(code, message, errs) +} + +// NewBatchError returns an BatchedErrors with a collection of errors as an +// array of errors. +func NewBatchError(code, message string, errs []error) BatchedErrors { + return newBaseError(code, message, errs) +} + +// A RequestFailure is an interface to extract request failure information from +// an Error such as the request ID of the failed request returned by a service. +// RequestFailures may not always have a requestID value if the request failed +// prior to reaching the service such as a connection error. +// +// Example: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if reqerr, ok := err.(RequestFailure); ok { +// log.Println("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID()) +// } else { +// log.Println("Error:", err.Error()) +// } +// } +// +// Combined with awserr.Error: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if awsErr, ok := err.(awserr.Error); ok { +// // Generic AWS Error with Code, Message, and original error (if any) +// fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) +// +// if reqErr, ok := err.(awserr.RequestFailure); ok { +// // A service error occurred +// fmt.Println(reqErr.StatusCode(), reqErr.RequestID()) +// } +// } else { +// fmt.Println(err.Error()) +// } +// } +// +type RequestFailure interface { + Error + + // The status code of the HTTP response. + StatusCode() int + + // The request ID returned by the service for a request failure. This will + // be empty if no request ID is available such as the request failed due + // to a connection error. + RequestID() string +} + +// NewRequestFailure returns a new request error wrapper for the given Error +// provided. +func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure { + return newRequestError(err, statusCode, reqID) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go new file mode 100644 index 000000000..0202a008f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go @@ -0,0 +1,194 @@ +package awserr + +import "fmt" + +// SprintError returns a string of the formatted error code. +// +// Both extra and origErr are optional. If they are included their lines +// will be added, but if they are not included their lines will be ignored. +func SprintError(code, message, extra string, origErr error) string { + msg := fmt.Sprintf("%s: %s", code, message) + if extra != "" { + msg = fmt.Sprintf("%s\n\t%s", msg, extra) + } + if origErr != nil { + msg = fmt.Sprintf("%s\ncaused by: %s", msg, origErr.Error()) + } + return msg +} + +// A baseError wraps the code and message which defines an error. It also +// can be used to wrap an original error object. +// +// Should be used as the root for errors satisfying the awserr.Error. Also +// for any error which does not fit into a specific error wrapper type. +type baseError struct { + // Classification of error + code string + + // Detailed information about error + message string + + // Optional original error this error is based off of. Allows building + // chained errors. + errs []error +} + +// newBaseError returns an error object for the code, message, and errors. +// +// code is a short no whitespace phrase depicting the classification of +// the error that is being created. +// +// message is the free flow string containing detailed information about the +// error. +// +// origErrs is the error objects which will be nested under the new errors to +// be returned. +func newBaseError(code, message string, origErrs []error) *baseError { + b := &baseError{ + code: code, + message: message, + errs: origErrs, + } + + return b +} + +// Error returns the string representation of the error. +// +// See ErrorWithExtra for formatting. +// +// Satisfies the error interface. +func (b baseError) Error() string { + size := len(b.errs) + if size > 0 { + return SprintError(b.code, b.message, "", errorList(b.errs)) + } + + return SprintError(b.code, b.message, "", nil) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (b baseError) String() string { + return b.Error() +} + +// Code returns the short phrase depicting the classification of the error. +func (b baseError) Code() string { + return b.code +} + +// Message returns the error details message. +func (b baseError) Message() string { + return b.message +} + +// OrigErr returns the original error if one was set. Nil is returned if no +// error was set. This only returns the first element in the list. If the full +// list is needed, use BatchedErrors. +func (b baseError) OrigErr() error { + switch len(b.errs) { + case 0: + return nil + case 1: + return b.errs[0] + default: + if err, ok := b.errs[0].(Error); ok { + return NewBatchError(err.Code(), err.Message(), b.errs[1:]) + } + return NewBatchError("BatchedErrors", + "multiple errors occurred", b.errs) + } +} + +// OrigErrs returns the original errors if one was set. An empty slice is +// returned if no error was set. +func (b baseError) OrigErrs() []error { + return b.errs +} + +// So that the Error interface type can be included as an anonymous field +// in the requestError struct and not conflict with the error.Error() method. +type awsError Error + +// A requestError wraps a request or service error. +// +// Composed of baseError for code, message, and original error. +type requestError struct { + awsError + statusCode int + requestID string +} + +// newRequestError returns a wrapped error with additional information for +// request status code, and service requestID. +// +// Should be used to wrap all request which involve service requests. Even if +// the request failed without a service response, but had an HTTP status code +// that may be meaningful. +// +// Also wraps original errors via the baseError. +func newRequestError(err Error, statusCode int, requestID string) *requestError { + return &requestError{ + awsError: err, + statusCode: statusCode, + requestID: requestID, + } +} + +// Error returns the string representation of the error. +// Satisfies the error interface. +func (r requestError) Error() string { + extra := fmt.Sprintf("status code: %d, request id: %s", + r.statusCode, r.requestID) + return SprintError(r.Code(), r.Message(), extra, r.OrigErr()) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (r requestError) String() string { + return r.Error() +} + +// StatusCode returns the wrapped status code for the error +func (r requestError) StatusCode() int { + return r.statusCode +} + +// RequestID returns the wrapped requestID +func (r requestError) RequestID() string { + return r.requestID +} + +// OrigErrs returns the original errors if one was set. An empty slice is +// returned if no error was set. +func (r requestError) OrigErrs() []error { + if b, ok := r.awsError.(BatchedErrors); ok { + return b.OrigErrs() + } + return []error{r.OrigErr()} +} + +// An error list that satisfies the golang interface +type errorList []error + +// Error returns the string representation of the error. +// +// Satisfies the error interface. +func (e errorList) Error() string { + msg := "" + // How do we want to handle the array size being zero + if size := len(e); size > 0 { + for i := 0; i < size; i++ { + msg += fmt.Sprintf("%s", e[i].Error()) + // We check the next index to see if it is within the slice. + // If it is, then we append a newline. We do this, because unit tests + // could be broken with the additional '\n' + if i+1 < size { + msg += "\n" + } + } + } + return msg +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go new file mode 100644 index 000000000..1a3d106d5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go @@ -0,0 +1,108 @@ +package awsutil + +import ( + "io" + "reflect" + "time" +) + +// Copy deeply copies a src structure to dst. Useful for copying request and +// response structures. +// +// Can copy between structs of different type, but will only copy fields which +// are assignable, and exist in both structs. Fields which are not assignable, +// or do not exist in both structs are ignored. +func Copy(dst, src interface{}) { + dstval := reflect.ValueOf(dst) + if !dstval.IsValid() { + panic("Copy dst cannot be nil") + } + + rcopy(dstval, reflect.ValueOf(src), true) +} + +// CopyOf returns a copy of src while also allocating the memory for dst. +// src must be a pointer type or this operation will fail. +func CopyOf(src interface{}) (dst interface{}) { + dsti := reflect.New(reflect.TypeOf(src).Elem()) + dst = dsti.Interface() + rcopy(dsti, reflect.ValueOf(src), true) + return +} + +// rcopy performs a recursive copy of values from the source to destination. +// +// root is used to skip certain aspects of the copy which are not valid +// for the root node of a object. +func rcopy(dst, src reflect.Value, root bool) { + if !src.IsValid() { + return + } + + switch src.Kind() { + case reflect.Ptr: + if _, ok := src.Interface().(io.Reader); ok { + if dst.Kind() == reflect.Ptr && dst.Elem().CanSet() { + dst.Elem().Set(src) + } else if dst.CanSet() { + dst.Set(src) + } + } else { + e := src.Type().Elem() + if dst.CanSet() && !src.IsNil() { + if _, ok := src.Interface().(*time.Time); !ok { + dst.Set(reflect.New(e)) + } else { + tempValue := reflect.New(e) + tempValue.Elem().Set(src.Elem()) + // Sets time.Time's unexported values + dst.Set(tempValue) + } + } + if src.Elem().IsValid() { + // Keep the current root state since the depth hasn't changed + rcopy(dst.Elem(), src.Elem(), root) + } + } + case reflect.Struct: + t := dst.Type() + for i := 0; i < t.NumField(); i++ { + name := t.Field(i).Name + srcVal := src.FieldByName(name) + dstVal := dst.FieldByName(name) + if srcVal.IsValid() && dstVal.CanSet() { + rcopy(dstVal, srcVal, false) + } + } + case reflect.Slice: + if src.IsNil() { + break + } + + s := reflect.MakeSlice(src.Type(), src.Len(), src.Cap()) + dst.Set(s) + for i := 0; i < src.Len(); i++ { + rcopy(dst.Index(i), src.Index(i), false) + } + case reflect.Map: + if src.IsNil() { + break + } + + s := reflect.MakeMap(src.Type()) + dst.Set(s) + for _, k := range src.MapKeys() { + v := src.MapIndex(k) + v2 := reflect.New(v.Type()).Elem() + rcopy(v2, v, false) + dst.SetMapIndex(k, v2) + } + default: + // Assign the value if possible. If its not assignable, the value would + // need to be converted and the impact of that may be unexpected, or is + // not compatible with the dst type. + if src.Type().AssignableTo(dst.Type()) { + dst.Set(src) + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go new file mode 100644 index 000000000..59fa4a558 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go @@ -0,0 +1,27 @@ +package awsutil + +import ( + "reflect" +) + +// DeepEqual returns if the two values are deeply equal like reflect.DeepEqual. +// In addition to this, this method will also dereference the input values if +// possible so the DeepEqual performed will not fail if one parameter is a +// pointer and the other is not. +// +// DeepEqual will not perform indirection of nested values of the input parameters. +func DeepEqual(a, b interface{}) bool { + ra := reflect.Indirect(reflect.ValueOf(a)) + rb := reflect.Indirect(reflect.ValueOf(b)) + + if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid { + // If the elements are both nil, and of the same type the are equal + // If they are of different types they are not equal + return reflect.TypeOf(a) == reflect.TypeOf(b) + } else if raValid != rbValid { + // Both values must be valid to be equal + return false + } + + return reflect.DeepEqual(ra.Interface(), rb.Interface()) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go new file mode 100644 index 000000000..11c52c389 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go @@ -0,0 +1,222 @@ +package awsutil + +import ( + "reflect" + "regexp" + "strconv" + "strings" + + "github.com/jmespath/go-jmespath" +) + +var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`) + +// rValuesAtPath returns a slice of values found in value v. The values +// in v are explored recursively so all nested values are collected. +func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTerm bool) []reflect.Value { + pathparts := strings.Split(path, "||") + if len(pathparts) > 1 { + for _, pathpart := range pathparts { + vals := rValuesAtPath(v, pathpart, createPath, caseSensitive, nilTerm) + if len(vals) > 0 { + return vals + } + } + return nil + } + + values := []reflect.Value{reflect.Indirect(reflect.ValueOf(v))} + components := strings.Split(path, ".") + for len(values) > 0 && len(components) > 0 { + var index *int64 + var indexStar bool + c := strings.TrimSpace(components[0]) + if c == "" { // no actual component, illegal syntax + return nil + } else if caseSensitive && c != "*" && strings.ToLower(c[0:1]) == c[0:1] { + // TODO normalize case for user + return nil // don't support unexported fields + } + + // parse this component + if m := indexRe.FindStringSubmatch(c); m != nil { + c = m[1] + if m[2] == "" { + index = nil + indexStar = true + } else { + i, _ := strconv.ParseInt(m[2], 10, 32) + index = &i + indexStar = false + } + } + + nextvals := []reflect.Value{} + for _, value := range values { + // pull component name out of struct member + if value.Kind() != reflect.Struct { + continue + } + + if c == "*" { // pull all members + for i := 0; i < value.NumField(); i++ { + if f := reflect.Indirect(value.Field(i)); f.IsValid() { + nextvals = append(nextvals, f) + } + } + continue + } + + value = value.FieldByNameFunc(func(name string) bool { + if c == name { + return true + } else if !caseSensitive && strings.ToLower(name) == strings.ToLower(c) { + return true + } + return false + }) + + if nilTerm && value.Kind() == reflect.Ptr && len(components[1:]) == 0 { + if !value.IsNil() { + value.Set(reflect.Zero(value.Type())) + } + return []reflect.Value{value} + } + + if createPath && value.Kind() == reflect.Ptr && value.IsNil() { + // TODO if the value is the terminus it should not be created + // if the value to be set to its position is nil. + value.Set(reflect.New(value.Type().Elem())) + value = value.Elem() + } else { + value = reflect.Indirect(value) + } + + if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { + if !createPath && value.IsNil() { + value = reflect.ValueOf(nil) + } + } + + if value.IsValid() { + nextvals = append(nextvals, value) + } + } + values = nextvals + + if indexStar || index != nil { + nextvals = []reflect.Value{} + for _, valItem := range values { + value := reflect.Indirect(valItem) + if value.Kind() != reflect.Slice { + continue + } + + if indexStar { // grab all indices + for i := 0; i < value.Len(); i++ { + idx := reflect.Indirect(value.Index(i)) + if idx.IsValid() { + nextvals = append(nextvals, idx) + } + } + continue + } + + // pull out index + i := int(*index) + if i >= value.Len() { // check out of bounds + if createPath { + // TODO resize slice + } else { + continue + } + } else if i < 0 { // support negative indexing + i = value.Len() + i + } + value = reflect.Indirect(value.Index(i)) + + if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { + if !createPath && value.IsNil() { + value = reflect.ValueOf(nil) + } + } + + if value.IsValid() { + nextvals = append(nextvals, value) + } + } + values = nextvals + } + + components = components[1:] + } + return values +} + +// ValuesAtPath returns a list of values at the case insensitive lexical +// path inside of a structure. +func ValuesAtPath(i interface{}, path string) ([]interface{}, error) { + result, err := jmespath.Search(path, i) + if err != nil { + return nil, err + } + + v := reflect.ValueOf(result) + if !v.IsValid() || (v.Kind() == reflect.Ptr && v.IsNil()) { + return nil, nil + } + if s, ok := result.([]interface{}); ok { + return s, err + } + if v.Kind() == reflect.Map && v.Len() == 0 { + return nil, nil + } + if v.Kind() == reflect.Slice { + out := make([]interface{}, v.Len()) + for i := 0; i < v.Len(); i++ { + out[i] = v.Index(i).Interface() + } + return out, nil + } + + return []interface{}{result}, nil +} + +// SetValueAtPath sets a value at the case insensitive lexical path inside +// of a structure. +func SetValueAtPath(i interface{}, path string, v interface{}) { + if rvals := rValuesAtPath(i, path, true, false, v == nil); rvals != nil { + for _, rval := range rvals { + if rval.Kind() == reflect.Ptr && rval.IsNil() { + continue + } + setValue(rval, v) + } + } +} + +func setValue(dstVal reflect.Value, src interface{}) { + if dstVal.Kind() == reflect.Ptr { + dstVal = reflect.Indirect(dstVal) + } + srcVal := reflect.ValueOf(src) + + if !srcVal.IsValid() { // src is literal nil + if dstVal.CanAddr() { + // Convert to pointer so that pointer's value can be nil'ed + // dstVal = dstVal.Addr() + } + dstVal.Set(reflect.Zero(dstVal.Type())) + + } else if srcVal.Kind() == reflect.Ptr { + if srcVal.IsNil() { + srcVal = reflect.Zero(dstVal.Type()) + } else { + srcVal = reflect.ValueOf(src).Elem() + } + dstVal.Set(srcVal) + } else { + dstVal.Set(srcVal) + } + +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go new file mode 100644 index 000000000..710eb432f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go @@ -0,0 +1,113 @@ +package awsutil + +import ( + "bytes" + "fmt" + "io" + "reflect" + "strings" +) + +// Prettify returns the string representation of a value. +func Prettify(i interface{}) string { + var buf bytes.Buffer + prettify(reflect.ValueOf(i), 0, &buf) + return buf.String() +} + +// prettify will recursively walk value v to build a textual +// representation of the value. +func prettify(v reflect.Value, indent int, buf *bytes.Buffer) { + for v.Kind() == reflect.Ptr { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Struct: + strtype := v.Type().String() + if strtype == "time.Time" { + fmt.Fprintf(buf, "%s", v.Interface()) + break + } else if strings.HasPrefix(strtype, "io.") { + buf.WriteString("") + break + } + + buf.WriteString("{\n") + + names := []string{} + for i := 0; i < v.Type().NumField(); i++ { + name := v.Type().Field(i).Name + f := v.Field(i) + if name[0:1] == strings.ToLower(name[0:1]) { + continue // ignore unexported fields + } + if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice || f.Kind() == reflect.Map) && f.IsNil() { + continue // ignore unset fields + } + names = append(names, name) + } + + for i, n := range names { + val := v.FieldByName(n) + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(n + ": ") + prettify(val, indent+2, buf) + + if i < len(names)-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + case reflect.Slice: + strtype := v.Type().String() + if strtype == "[]uint8" { + fmt.Fprintf(buf, " len %d", v.Len()) + break + } + + nl, id, id2 := "", "", "" + if v.Len() > 3 { + nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) + } + buf.WriteString("[" + nl) + for i := 0; i < v.Len(); i++ { + buf.WriteString(id2) + prettify(v.Index(i), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString("," + nl) + } + } + + buf.WriteString(nl + id + "]") + case reflect.Map: + buf.WriteString("{\n") + + for i, k := range v.MapKeys() { + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(k.String() + ": ") + prettify(v.MapIndex(k), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + default: + if !v.IsValid() { + fmt.Fprint(buf, "") + return + } + format := "%v" + switch v.Interface().(type) { + case string: + format = "%q" + case io.ReadSeeker, io.Reader: + format = "buffer(%p)" + } + fmt.Fprintf(buf, format, v.Interface()) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go new file mode 100644 index 000000000..b6432f1a1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go @@ -0,0 +1,89 @@ +package awsutil + +import ( + "bytes" + "fmt" + "reflect" + "strings" +) + +// StringValue returns the string representation of a value. +func StringValue(i interface{}) string { + var buf bytes.Buffer + stringValue(reflect.ValueOf(i), 0, &buf) + return buf.String() +} + +func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) { + for v.Kind() == reflect.Ptr { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Struct: + buf.WriteString("{\n") + + names := []string{} + for i := 0; i < v.Type().NumField(); i++ { + name := v.Type().Field(i).Name + f := v.Field(i) + if name[0:1] == strings.ToLower(name[0:1]) { + continue // ignore unexported fields + } + if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice) && f.IsNil() { + continue // ignore unset fields + } + names = append(names, name) + } + + for i, n := range names { + val := v.FieldByName(n) + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(n + ": ") + stringValue(val, indent+2, buf) + + if i < len(names)-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + case reflect.Slice: + nl, id, id2 := "", "", "" + if v.Len() > 3 { + nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) + } + buf.WriteString("[" + nl) + for i := 0; i < v.Len(); i++ { + buf.WriteString(id2) + stringValue(v.Index(i), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString("," + nl) + } + } + + buf.WriteString(nl + id + "]") + case reflect.Map: + buf.WriteString("{\n") + + for i, k := range v.MapKeys() { + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(k.String() + ": ") + stringValue(v.MapIndex(k), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + default: + format := "%v" + switch v.Interface().(type) { + case string: + format = "%q" + } + fmt.Fprintf(buf, format, v.Interface()) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go new file mode 100644 index 000000000..212fe25e7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go @@ -0,0 +1,96 @@ +package client + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" +) + +// A Config provides configuration to a service client instance. +type Config struct { + Config *aws.Config + Handlers request.Handlers + Endpoint string + SigningRegion string + SigningName string + + // States that the signing name did not come from a modeled source but + // was derived based on other data. Used by service client constructors + // to determine if the signin name can be overriden based on metadata the + // service has. + SigningNameDerived bool +} + +// ConfigProvider provides a generic way for a service client to receive +// the ClientConfig without circular dependencies. +type ConfigProvider interface { + ClientConfig(serviceName string, cfgs ...*aws.Config) Config +} + +// ConfigNoResolveEndpointProvider same as ConfigProvider except it will not +// resolve the endpoint automatically. The service client's endpoint must be +// provided via the aws.Config.Endpoint field. +type ConfigNoResolveEndpointProvider interface { + ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) Config +} + +// A Client implements the base client request and response handling +// used by all service clients. +type Client struct { + request.Retryer + metadata.ClientInfo + + Config aws.Config + Handlers request.Handlers +} + +// New will return a pointer to a new initialized service client. +func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, options ...func(*Client)) *Client { + svc := &Client{ + Config: cfg, + ClientInfo: info, + Handlers: handlers.Copy(), + } + + switch retryer, ok := cfg.Retryer.(request.Retryer); { + case ok: + svc.Retryer = retryer + case cfg.Retryer != nil && cfg.Logger != nil: + s := fmt.Sprintf("WARNING: %T does not implement request.Retryer; using DefaultRetryer instead", cfg.Retryer) + cfg.Logger.Log(s) + fallthrough + default: + maxRetries := aws.IntValue(cfg.MaxRetries) + if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries { + maxRetries = 3 + } + svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries} + } + + svc.AddDebugHandlers() + + for _, option := range options { + option(svc) + } + + return svc +} + +// NewRequest returns a new Request pointer for the service API +// operation and parameters. +func (c *Client) NewRequest(operation *request.Operation, params interface{}, data interface{}) *request.Request { + return request.New(c.Config, c.ClientInfo, c.Handlers, c.Retryer, operation, params, data) +} + +// AddDebugHandlers injects debug logging handlers into the service to log request +// debug information. +func (c *Client) AddDebugHandlers() { + if !c.Config.LogLevel.AtLeast(aws.LogDebug) { + return + } + + c.Handlers.Send.PushFrontNamed(LogHTTPRequestHandler) + c.Handlers.Send.PushBackNamed(LogHTTPResponseHandler) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go new file mode 100644 index 000000000..a397b0d04 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go @@ -0,0 +1,116 @@ +package client + +import ( + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkrand" +) + +// DefaultRetryer implements basic retry logic using exponential backoff for +// most services. If you want to implement custom retry logic, implement the +// request.Retryer interface or create a structure type that composes this +// struct and override the specific methods. For example, to override only +// the MaxRetries method: +// +// type retryer struct { +// client.DefaultRetryer +// } +// +// // This implementation always has 100 max retries +// func (d retryer) MaxRetries() int { return 100 } +type DefaultRetryer struct { + NumMaxRetries int +} + +// MaxRetries returns the number of maximum returns the service will use to make +// an individual API request. +func (d DefaultRetryer) MaxRetries() int { + return d.NumMaxRetries +} + +// RetryRules returns the delay duration before retrying this request again +func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration { + // Set the upper limit of delay in retrying at ~five minutes + minTime := 30 + throttle := d.shouldThrottle(r) + if throttle { + if delay, ok := getRetryDelay(r); ok { + return delay + } + + minTime = 500 + } + + retryCount := r.RetryCount + if throttle && retryCount > 8 { + retryCount = 8 + } else if retryCount > 13 { + retryCount = 13 + } + + delay := (1 << uint(retryCount)) * (sdkrand.SeededRand.Intn(minTime) + minTime) + return time.Duration(delay) * time.Millisecond +} + +// ShouldRetry returns true if the request should be retried. +func (d DefaultRetryer) ShouldRetry(r *request.Request) bool { + // If one of the other handlers already set the retry state + // we don't want to override it based on the service's state + if r.Retryable != nil { + return *r.Retryable + } + + if r.HTTPResponse.StatusCode >= 500 && r.HTTPResponse.StatusCode != 501 { + return true + } + return r.IsErrorRetryable() || d.shouldThrottle(r) +} + +// ShouldThrottle returns true if the request should be throttled. +func (d DefaultRetryer) shouldThrottle(r *request.Request) bool { + switch r.HTTPResponse.StatusCode { + case 429: + case 502: + case 503: + case 504: + default: + return r.IsErrorThrottle() + } + + return true +} + +// This will look in the Retry-After header, RFC 7231, for how long +// it will wait before attempting another request +func getRetryDelay(r *request.Request) (time.Duration, bool) { + if !canUseRetryAfterHeader(r) { + return 0, false + } + + delayStr := r.HTTPResponse.Header.Get("Retry-After") + if len(delayStr) == 0 { + return 0, false + } + + delay, err := strconv.Atoi(delayStr) + if err != nil { + return 0, false + } + + return time.Duration(delay) * time.Second, true +} + +// Will look at the status code to see if the retry header pertains to +// the status code. +func canUseRetryAfterHeader(r *request.Request) bool { + switch r.HTTPResponse.StatusCode { + case 429: + case 503: + default: + return false + } + + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go new file mode 100644 index 000000000..ce9fb896d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go @@ -0,0 +1,184 @@ +package client + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http/httputil" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +const logReqMsg = `DEBUG: Request %s/%s Details: +---[ REQUEST POST-SIGN ]----------------------------- +%s +-----------------------------------------------------` + +const logReqErrMsg = `DEBUG ERROR: Request %s/%s: +---[ REQUEST DUMP ERROR ]----------------------------- +%s +------------------------------------------------------` + +type logWriter struct { + // Logger is what we will use to log the payload of a response. + Logger aws.Logger + // buf stores the contents of what has been read + buf *bytes.Buffer +} + +func (logger *logWriter) Write(b []byte) (int, error) { + return logger.buf.Write(b) +} + +type teeReaderCloser struct { + // io.Reader will be a tee reader that is used during logging. + // This structure will read from a body and write the contents to a logger. + io.Reader + // Source is used just to close when we are done reading. + Source io.ReadCloser +} + +func (reader *teeReaderCloser) Close() error { + return reader.Source.Close() +} + +// LogHTTPRequestHandler is a SDK request handler to log the HTTP request sent +// to a service. Will include the HTTP request body if the LogLevel of the +// request matches LogDebugWithHTTPBody. +var LogHTTPRequestHandler = request.NamedHandler{ + Name: "awssdk.client.LogRequest", + Fn: logRequest, +} + +func logRequest(r *request.Request) { + logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) + bodySeekable := aws.IsReaderSeekable(r.Body) + + b, err := httputil.DumpRequestOut(r.HTTPRequest, logBody) + if err != nil { + r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, err)) + return + } + + if logBody { + if !bodySeekable { + r.SetReaderBody(aws.ReadSeekCloser(r.HTTPRequest.Body)) + } + // Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's + // Body as a NoOpCloser and will not be reset after read by the HTTP + // client reader. + r.ResetBody() + } + + r.Config.Logger.Log(fmt.Sprintf(logReqMsg, + r.ClientInfo.ServiceName, r.Operation.Name, string(b))) +} + +// LogHTTPRequestHeaderHandler is a SDK request handler to log the HTTP request sent +// to a service. Will only log the HTTP request's headers. The request payload +// will not be read. +var LogHTTPRequestHeaderHandler = request.NamedHandler{ + Name: "awssdk.client.LogRequestHeader", + Fn: logRequestHeader, +} + +func logRequestHeader(r *request.Request) { + b, err := httputil.DumpRequestOut(r.HTTPRequest, false) + if err != nil { + r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, err)) + return + } + + r.Config.Logger.Log(fmt.Sprintf(logReqMsg, + r.ClientInfo.ServiceName, r.Operation.Name, string(b))) +} + +const logRespMsg = `DEBUG: Response %s/%s Details: +---[ RESPONSE ]-------------------------------------- +%s +-----------------------------------------------------` + +const logRespErrMsg = `DEBUG ERROR: Response %s/%s: +---[ RESPONSE DUMP ERROR ]----------------------------- +%s +-----------------------------------------------------` + +// LogHTTPResponseHandler is a SDK request handler to log the HTTP response +// received from a service. Will include the HTTP response body if the LogLevel +// of the request matches LogDebugWithHTTPBody. +var LogHTTPResponseHandler = request.NamedHandler{ + Name: "awssdk.client.LogResponse", + Fn: logResponse, +} + +func logResponse(r *request.Request) { + lw := &logWriter{r.Config.Logger, bytes.NewBuffer(nil)} + + logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) + if logBody { + r.HTTPResponse.Body = &teeReaderCloser{ + Reader: io.TeeReader(r.HTTPResponse.Body, lw), + Source: r.HTTPResponse.Body, + } + } + + handlerFn := func(req *request.Request) { + b, err := httputil.DumpResponse(req.HTTPResponse, false) + if err != nil { + lw.Logger.Log(fmt.Sprintf(logRespErrMsg, + req.ClientInfo.ServiceName, req.Operation.Name, err)) + return + } + + lw.Logger.Log(fmt.Sprintf(logRespMsg, + req.ClientInfo.ServiceName, req.Operation.Name, string(b))) + + if logBody { + b, err := ioutil.ReadAll(lw.buf) + if err != nil { + lw.Logger.Log(fmt.Sprintf(logRespErrMsg, + req.ClientInfo.ServiceName, req.Operation.Name, err)) + return + } + + lw.Logger.Log(string(b)) + } + } + + const handlerName = "awsdk.client.LogResponse.ResponseBody" + + r.Handlers.Unmarshal.SetBackNamed(request.NamedHandler{ + Name: handlerName, Fn: handlerFn, + }) + r.Handlers.UnmarshalError.SetBackNamed(request.NamedHandler{ + Name: handlerName, Fn: handlerFn, + }) +} + +// LogHTTPResponseHeaderHandler is a SDK request handler to log the HTTP +// response received from a service. Will only log the HTTP response's headers. +// The response payload will not be read. +var LogHTTPResponseHeaderHandler = request.NamedHandler{ + Name: "awssdk.client.LogResponseHeader", + Fn: logResponseHeader, +} + +func logResponseHeader(r *request.Request) { + if r.Config.Logger == nil { + return + } + + b, err := httputil.DumpResponse(r.HTTPResponse, false) + if err != nil { + r.Config.Logger.Log(fmt.Sprintf(logRespErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, err)) + return + } + + r.Config.Logger.Log(fmt.Sprintf(logRespMsg, + r.ClientInfo.ServiceName, r.Operation.Name, string(b))) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go new file mode 100644 index 000000000..920e9fddf --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go @@ -0,0 +1,13 @@ +package metadata + +// ClientInfo wraps immutable data from the client.Client structure. +type ClientInfo struct { + ServiceName string + ServiceID string + APIVersion string + Endpoint string + SigningName string + SigningRegion string + JSONVersion string + TargetPrefix string +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go new file mode 100644 index 000000000..58c89fa0d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/config.go @@ -0,0 +1,492 @@ +package aws + +import ( + "net/http" + "time" + + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/endpoints" +) + +// UseServiceDefaultRetries instructs the config to use the service's own +// default number of retries. This will be the default action if +// Config.MaxRetries is nil also. +const UseServiceDefaultRetries = -1 + +// RequestRetryer is an alias for a type that implements the request.Retryer +// interface. +type RequestRetryer interface{} + +// A Config provides service configuration for service clients. By default, +// all clients will use the defaults.DefaultConfig structure. +// +// // Create Session with MaxRetry configuration to be shared by multiple +// // service clients. +// sess := session.Must(session.NewSession(&aws.Config{ +// MaxRetries: aws.Int(3), +// })) +// +// // Create S3 service client with a specific Region. +// svc := s3.New(sess, &aws.Config{ +// Region: aws.String("us-west-2"), +// }) +type Config struct { + // Enables verbose error printing of all credential chain errors. + // Should be used when wanting to see all errors while attempting to + // retrieve credentials. + CredentialsChainVerboseErrors *bool + + // The credentials object to use when signing requests. Defaults to a + // chain of credential providers to search for credentials in environment + // variables, shared credential file, and EC2 Instance Roles. + Credentials *credentials.Credentials + + // An optional endpoint URL (hostname only or fully qualified URI) + // that overrides the default generated endpoint for a client. Set this + // to `""` to use the default generated endpoint. + // + // @note You must still provide a `Region` value when specifying an + // endpoint for a client. + Endpoint *string + + // The resolver to use for looking up endpoints for AWS service clients + // to use based on region. + EndpointResolver endpoints.Resolver + + // EnforceShouldRetryCheck is used in the AfterRetryHandler to always call + // ShouldRetry regardless of whether or not if request.Retryable is set. + // This will utilize ShouldRetry method of custom retryers. If EnforceShouldRetryCheck + // is not set, then ShouldRetry will only be called if request.Retryable is nil. + // Proper handling of the request.Retryable field is important when setting this field. + EnforceShouldRetryCheck *bool + + // The region to send requests to. This parameter is required and must + // be configured globally or on a per-client basis unless otherwise + // noted. A full list of regions is found in the "Regions and Endpoints" + // document. + // + // @see http://docs.aws.amazon.com/general/latest/gr/rande.html + // AWS Regions and Endpoints + Region *string + + // Set this to `true` to disable SSL when sending requests. Defaults + // to `false`. + DisableSSL *bool + + // The HTTP client to use when sending requests. Defaults to + // `http.DefaultClient`. + HTTPClient *http.Client + + // An integer value representing the logging level. The default log level + // is zero (LogOff), which represents no logging. To enable logging set + // to a LogLevel Value. + LogLevel *LogLevelType + + // The logger writer interface to write logging messages to. Defaults to + // standard out. + Logger Logger + + // The maximum number of times that a request will be retried for failures. + // Defaults to -1, which defers the max retry setting to the service + // specific configuration. + MaxRetries *int + + // Retryer guides how HTTP requests should be retried in case of + // recoverable failures. + // + // When nil or the value does not implement the request.Retryer interface, + // the client.DefaultRetryer will be used. + // + // When both Retryer and MaxRetries are non-nil, the former is used and + // the latter ignored. + // + // To set the Retryer field in a type-safe manner and with chaining, use + // the request.WithRetryer helper function: + // + // cfg := request.WithRetryer(aws.NewConfig(), myRetryer) + // + Retryer RequestRetryer + + // Disables semantic parameter validation, which validates input for + // missing required fields and/or other semantic request input errors. + DisableParamValidation *bool + + // Disables the computation of request and response checksums, e.g., + // CRC32 checksums in Amazon DynamoDB. + DisableComputeChecksums *bool + + // Set this to `true` to force the request to use path-style addressing, + // i.e., `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client + // will use virtual hosted bucket addressing when possible + // (`http://BUCKET.s3.amazonaws.com/KEY`). + // + // @note This configuration option is specific to the Amazon S3 service. + // @see http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html + // Amazon S3: Virtual Hosting of Buckets + S3ForcePathStyle *bool + + // Set this to `true` to disable the SDK adding the `Expect: 100-Continue` + // header to PUT requests over 2MB of content. 100-Continue instructs the + // HTTP client not to send the body until the service responds with a + // `continue` status. This is useful to prevent sending the request body + // until after the request is authenticated, and validated. + // + // http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html + // + // 100-Continue is only enabled for Go 1.6 and above. See `http.Transport`'s + // `ExpectContinueTimeout` for information on adjusting the continue wait + // timeout. https://golang.org/pkg/net/http/#Transport + // + // You should use this flag to disble 100-Continue if you experience issues + // with proxies or third party S3 compatible services. + S3Disable100Continue *bool + + // Set this to `true` to enable S3 Accelerate feature. For all operations + // compatible with S3 Accelerate will use the accelerate endpoint for + // requests. Requests not compatible will fall back to normal S3 requests. + // + // The bucket must be enable for accelerate to be used with S3 client with + // accelerate enabled. If the bucket is not enabled for accelerate an error + // will be returned. The bucket name must be DNS compatible to also work + // with accelerate. + S3UseAccelerate *bool + + // S3DisableContentMD5Validation config option is temporarily disabled, + // For S3 GetObject API calls, #1837. + // + // Set this to `true` to disable the S3 service client from automatically + // adding the ContentMD5 to S3 Object Put and Upload API calls. This option + // will also disable the SDK from performing object ContentMD5 validation + // on GetObject API calls. + S3DisableContentMD5Validation *bool + + // Set this to `true` to disable the EC2Metadata client from overriding the + // default http.Client's Timeout. This is helpful if you do not want the + // EC2Metadata client to create a new http.Client. This options is only + // meaningful if you're not already using a custom HTTP client with the + // SDK. Enabled by default. + // + // Must be set and provided to the session.NewSession() in order to disable + // the EC2Metadata overriding the timeout for default credentials chain. + // + // Example: + // sess := session.Must(session.NewSession(aws.NewConfig() + // .WithEC2MetadataDiableTimeoutOverride(true))) + // + // svc := s3.New(sess) + // + EC2MetadataDisableTimeoutOverride *bool + + // Instructs the endpoint to be generated for a service client to + // be the dual stack endpoint. The dual stack endpoint will support + // both IPv4 and IPv6 addressing. + // + // Setting this for a service which does not support dual stack will fail + // to make requets. It is not recommended to set this value on the session + // as it will apply to all service clients created with the session. Even + // services which don't support dual stack endpoints. + // + // If the Endpoint config value is also provided the UseDualStack flag + // will be ignored. + // + // Only supported with. + // + // sess := session.Must(session.NewSession()) + // + // svc := s3.New(sess, &aws.Config{ + // UseDualStack: aws.Bool(true), + // }) + UseDualStack *bool + + // SleepDelay is an override for the func the SDK will call when sleeping + // during the lifecycle of a request. Specifically this will be used for + // request delays. This value should only be used for testing. To adjust + // the delay of a request see the aws/client.DefaultRetryer and + // aws/request.Retryer. + // + // SleepDelay will prevent any Context from being used for canceling retry + // delay of an API operation. It is recommended to not use SleepDelay at all + // and specify a Retryer instead. + SleepDelay func(time.Duration) + + // DisableRestProtocolURICleaning will not clean the URL path when making rest protocol requests. + // Will default to false. This would only be used for empty directory names in s3 requests. + // + // Example: + // sess := session.Must(session.NewSession(&aws.Config{ + // DisableRestProtocolURICleaning: aws.Bool(true), + // })) + // + // svc := s3.New(sess) + // out, err := svc.GetObject(&s3.GetObjectInput { + // Bucket: aws.String("bucketname"), + // Key: aws.String("//foo//bar//moo"), + // }) + DisableRestProtocolURICleaning *bool +} + +// NewConfig returns a new Config pointer that can be chained with builder +// methods to set multiple configuration values inline without using pointers. +// +// // Create Session with MaxRetry configuration to be shared by multiple +// // service clients. +// sess := session.Must(session.NewSession(aws.NewConfig(). +// WithMaxRetries(3), +// )) +// +// // Create S3 service client with a specific Region. +// svc := s3.New(sess, aws.NewConfig(). +// WithRegion("us-west-2"), +// ) +func NewConfig() *Config { + return &Config{} +} + +// WithCredentialsChainVerboseErrors sets a config verbose errors boolean and returning +// a Config pointer. +func (c *Config) WithCredentialsChainVerboseErrors(verboseErrs bool) *Config { + c.CredentialsChainVerboseErrors = &verboseErrs + return c +} + +// WithCredentials sets a config Credentials value returning a Config pointer +// for chaining. +func (c *Config) WithCredentials(creds *credentials.Credentials) *Config { + c.Credentials = creds + return c +} + +// WithEndpoint sets a config Endpoint value returning a Config pointer for +// chaining. +func (c *Config) WithEndpoint(endpoint string) *Config { + c.Endpoint = &endpoint + return c +} + +// WithEndpointResolver sets a config EndpointResolver value returning a +// Config pointer for chaining. +func (c *Config) WithEndpointResolver(resolver endpoints.Resolver) *Config { + c.EndpointResolver = resolver + return c +} + +// WithRegion sets a config Region value returning a Config pointer for +// chaining. +func (c *Config) WithRegion(region string) *Config { + c.Region = ®ion + return c +} + +// WithDisableSSL sets a config DisableSSL value returning a Config pointer +// for chaining. +func (c *Config) WithDisableSSL(disable bool) *Config { + c.DisableSSL = &disable + return c +} + +// WithHTTPClient sets a config HTTPClient value returning a Config pointer +// for chaining. +func (c *Config) WithHTTPClient(client *http.Client) *Config { + c.HTTPClient = client + return c +} + +// WithMaxRetries sets a config MaxRetries value returning a Config pointer +// for chaining. +func (c *Config) WithMaxRetries(max int) *Config { + c.MaxRetries = &max + return c +} + +// WithDisableParamValidation sets a config DisableParamValidation value +// returning a Config pointer for chaining. +func (c *Config) WithDisableParamValidation(disable bool) *Config { + c.DisableParamValidation = &disable + return c +} + +// WithDisableComputeChecksums sets a config DisableComputeChecksums value +// returning a Config pointer for chaining. +func (c *Config) WithDisableComputeChecksums(disable bool) *Config { + c.DisableComputeChecksums = &disable + return c +} + +// WithLogLevel sets a config LogLevel value returning a Config pointer for +// chaining. +func (c *Config) WithLogLevel(level LogLevelType) *Config { + c.LogLevel = &level + return c +} + +// WithLogger sets a config Logger value returning a Config pointer for +// chaining. +func (c *Config) WithLogger(logger Logger) *Config { + c.Logger = logger + return c +} + +// WithS3ForcePathStyle sets a config S3ForcePathStyle value returning a Config +// pointer for chaining. +func (c *Config) WithS3ForcePathStyle(force bool) *Config { + c.S3ForcePathStyle = &force + return c +} + +// WithS3Disable100Continue sets a config S3Disable100Continue value returning +// a Config pointer for chaining. +func (c *Config) WithS3Disable100Continue(disable bool) *Config { + c.S3Disable100Continue = &disable + return c +} + +// WithS3UseAccelerate sets a config S3UseAccelerate value returning a Config +// pointer for chaining. +func (c *Config) WithS3UseAccelerate(enable bool) *Config { + c.S3UseAccelerate = &enable + return c + +} + +// WithS3DisableContentMD5Validation sets a config +// S3DisableContentMD5Validation value returning a Config pointer for chaining. +func (c *Config) WithS3DisableContentMD5Validation(enable bool) *Config { + c.S3DisableContentMD5Validation = &enable + return c + +} + +// WithUseDualStack sets a config UseDualStack value returning a Config +// pointer for chaining. +func (c *Config) WithUseDualStack(enable bool) *Config { + c.UseDualStack = &enable + return c +} + +// WithEC2MetadataDisableTimeoutOverride sets a config EC2MetadataDisableTimeoutOverride value +// returning a Config pointer for chaining. +func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config { + c.EC2MetadataDisableTimeoutOverride = &enable + return c +} + +// WithSleepDelay overrides the function used to sleep while waiting for the +// next retry. Defaults to time.Sleep. +func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config { + c.SleepDelay = fn + return c +} + +// MergeIn merges the passed in configs into the existing config object. +func (c *Config) MergeIn(cfgs ...*Config) { + for _, other := range cfgs { + mergeInConfig(c, other) + } +} + +func mergeInConfig(dst *Config, other *Config) { + if other == nil { + return + } + + if other.CredentialsChainVerboseErrors != nil { + dst.CredentialsChainVerboseErrors = other.CredentialsChainVerboseErrors + } + + if other.Credentials != nil { + dst.Credentials = other.Credentials + } + + if other.Endpoint != nil { + dst.Endpoint = other.Endpoint + } + + if other.EndpointResolver != nil { + dst.EndpointResolver = other.EndpointResolver + } + + if other.Region != nil { + dst.Region = other.Region + } + + if other.DisableSSL != nil { + dst.DisableSSL = other.DisableSSL + } + + if other.HTTPClient != nil { + dst.HTTPClient = other.HTTPClient + } + + if other.LogLevel != nil { + dst.LogLevel = other.LogLevel + } + + if other.Logger != nil { + dst.Logger = other.Logger + } + + if other.MaxRetries != nil { + dst.MaxRetries = other.MaxRetries + } + + if other.Retryer != nil { + dst.Retryer = other.Retryer + } + + if other.DisableParamValidation != nil { + dst.DisableParamValidation = other.DisableParamValidation + } + + if other.DisableComputeChecksums != nil { + dst.DisableComputeChecksums = other.DisableComputeChecksums + } + + if other.S3ForcePathStyle != nil { + dst.S3ForcePathStyle = other.S3ForcePathStyle + } + + if other.S3Disable100Continue != nil { + dst.S3Disable100Continue = other.S3Disable100Continue + } + + if other.S3UseAccelerate != nil { + dst.S3UseAccelerate = other.S3UseAccelerate + } + + if other.S3DisableContentMD5Validation != nil { + dst.S3DisableContentMD5Validation = other.S3DisableContentMD5Validation + } + + if other.UseDualStack != nil { + dst.UseDualStack = other.UseDualStack + } + + if other.EC2MetadataDisableTimeoutOverride != nil { + dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride + } + + if other.SleepDelay != nil { + dst.SleepDelay = other.SleepDelay + } + + if other.DisableRestProtocolURICleaning != nil { + dst.DisableRestProtocolURICleaning = other.DisableRestProtocolURICleaning + } + + if other.EnforceShouldRetryCheck != nil { + dst.EnforceShouldRetryCheck = other.EnforceShouldRetryCheck + } +} + +// Copy will return a shallow copy of the Config object. If any additional +// configurations are provided they will be merged into the new config returned. +func (c *Config) Copy(cfgs ...*Config) *Config { + dst := &Config{} + dst.MergeIn(c) + + for _, cfg := range cfgs { + dst.MergeIn(cfg) + } + + return dst +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context.go b/vendor/github.com/aws/aws-sdk-go/aws/context.go new file mode 100644 index 000000000..79f426853 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/context.go @@ -0,0 +1,71 @@ +package aws + +import ( + "time" +) + +// Context is an copy of the Go v1.7 stdlib's context.Context interface. +// It is represented as a SDK interface to enable you to use the "WithContext" +// API methods with Go v1.6 and a Context type such as golang.org/x/net/context. +// +// See https://golang.org/pkg/context on how to use contexts. +type Context interface { + // Deadline returns the time when work done on behalf of this context + // should be canceled. Deadline returns ok==false when no deadline is + // set. Successive calls to Deadline return the same results. + Deadline() (deadline time.Time, ok bool) + + // Done returns a channel that's closed when work done on behalf of this + // context should be canceled. Done may return nil if this context can + // never be canceled. Successive calls to Done return the same value. + Done() <-chan struct{} + + // Err returns a non-nil error value after Done is closed. Err returns + // Canceled if the context was canceled or DeadlineExceeded if the + // context's deadline passed. No other values for Err are defined. + // After Done is closed, successive calls to Err return the same value. + Err() error + + // Value returns the value associated with this context for key, or nil + // if no value is associated with key. Successive calls to Value with + // the same key returns the same result. + // + // Use context values only for request-scoped data that transits + // processes and API boundaries, not for passing optional parameters to + // functions. + Value(key interface{}) interface{} +} + +// BackgroundContext returns a context that will never be canceled, has no +// values, and no deadline. This context is used by the SDK to provide +// backwards compatibility with non-context API operations and functionality. +// +// Go 1.6 and before: +// This context function is equivalent to context.Background in the Go stdlib. +// +// Go 1.7 and later: +// The context returned will be the value returned by context.Background() +// +// See https://golang.org/pkg/context for more information on Contexts. +func BackgroundContext() Context { + return backgroundCtx +} + +// SleepWithContext will wait for the timer duration to expire, or the context +// is canceled. Which ever happens first. If the context is canceled the Context's +// error will be returned. +// +// Expects Context to always return a non-nil error if the Done channel is closed. +func SleepWithContext(ctx Context, dur time.Duration) error { + t := time.NewTimer(dur) + defer t.Stop() + + select { + case <-t.C: + break + case <-ctx.Done(): + return ctx.Err() + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_1_6.go b/vendor/github.com/aws/aws-sdk-go/aws/context_1_6.go new file mode 100644 index 000000000..8fdda5303 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/context_1_6.go @@ -0,0 +1,41 @@ +// +build !go1.7 + +package aws + +import "time" + +// An emptyCtx is a copy of the Go 1.7 context.emptyCtx type. This is copied to +// provide a 1.6 and 1.5 safe version of context that is compatible with Go +// 1.7's Context. +// +// An emptyCtx is never canceled, has no values, and has no deadline. It is not +// struct{}, since vars of this type must have distinct addresses. +type emptyCtx int + +func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { + return +} + +func (*emptyCtx) Done() <-chan struct{} { + return nil +} + +func (*emptyCtx) Err() error { + return nil +} + +func (*emptyCtx) Value(key interface{}) interface{} { + return nil +} + +func (e *emptyCtx) String() string { + switch e { + case backgroundCtx: + return "aws.BackgroundContext" + } + return "unknown empty Context" +} + +var ( + backgroundCtx = new(emptyCtx) +) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/context_1_7.go new file mode 100644 index 000000000..064f75c92 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/context_1_7.go @@ -0,0 +1,9 @@ +// +build go1.7 + +package aws + +import "context" + +var ( + backgroundCtx = context.Background() +) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go new file mode 100644 index 000000000..ff5d58e06 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go @@ -0,0 +1,387 @@ +package aws + +import "time" + +// String returns a pointer to the string value passed in. +func String(v string) *string { + return &v +} + +// StringValue returns the value of the string pointer passed in or +// "" if the pointer is nil. +func StringValue(v *string) string { + if v != nil { + return *v + } + return "" +} + +// StringSlice converts a slice of string values into a slice of +// string pointers +func StringSlice(src []string) []*string { + dst := make([]*string, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// StringValueSlice converts a slice of string pointers into a slice of +// string values +func StringValueSlice(src []*string) []string { + dst := make([]string, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// StringMap converts a string map of string values into a string +// map of string pointers +func StringMap(src map[string]string) map[string]*string { + dst := make(map[string]*string) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// StringValueMap converts a string map of string pointers into a string +// map of string values +func StringValueMap(src map[string]*string) map[string]string { + dst := make(map[string]string) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Bool returns a pointer to the bool value passed in. +func Bool(v bool) *bool { + return &v +} + +// BoolValue returns the value of the bool pointer passed in or +// false if the pointer is nil. +func BoolValue(v *bool) bool { + if v != nil { + return *v + } + return false +} + +// BoolSlice converts a slice of bool values into a slice of +// bool pointers +func BoolSlice(src []bool) []*bool { + dst := make([]*bool, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// BoolValueSlice converts a slice of bool pointers into a slice of +// bool values +func BoolValueSlice(src []*bool) []bool { + dst := make([]bool, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// BoolMap converts a string map of bool values into a string +// map of bool pointers +func BoolMap(src map[string]bool) map[string]*bool { + dst := make(map[string]*bool) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// BoolValueMap converts a string map of bool pointers into a string +// map of bool values +func BoolValueMap(src map[string]*bool) map[string]bool { + dst := make(map[string]bool) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int returns a pointer to the int value passed in. +func Int(v int) *int { + return &v +} + +// IntValue returns the value of the int pointer passed in or +// 0 if the pointer is nil. +func IntValue(v *int) int { + if v != nil { + return *v + } + return 0 +} + +// IntSlice converts a slice of int values into a slice of +// int pointers +func IntSlice(src []int) []*int { + dst := make([]*int, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// IntValueSlice converts a slice of int pointers into a slice of +// int values +func IntValueSlice(src []*int) []int { + dst := make([]int, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// IntMap converts a string map of int values into a string +// map of int pointers +func IntMap(src map[string]int) map[string]*int { + dst := make(map[string]*int) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// IntValueMap converts a string map of int pointers into a string +// map of int values +func IntValueMap(src map[string]*int) map[string]int { + dst := make(map[string]int) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int64 returns a pointer to the int64 value passed in. +func Int64(v int64) *int64 { + return &v +} + +// Int64Value returns the value of the int64 pointer passed in or +// 0 if the pointer is nil. +func Int64Value(v *int64) int64 { + if v != nil { + return *v + } + return 0 +} + +// Int64Slice converts a slice of int64 values into a slice of +// int64 pointers +func Int64Slice(src []int64) []*int64 { + dst := make([]*int64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int64ValueSlice converts a slice of int64 pointers into a slice of +// int64 values +func Int64ValueSlice(src []*int64) []int64 { + dst := make([]int64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int64Map converts a string map of int64 values into a string +// map of int64 pointers +func Int64Map(src map[string]int64) map[string]*int64 { + dst := make(map[string]*int64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int64ValueMap converts a string map of int64 pointers into a string +// map of int64 values +func Int64ValueMap(src map[string]*int64) map[string]int64 { + dst := make(map[string]int64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Float64 returns a pointer to the float64 value passed in. +func Float64(v float64) *float64 { + return &v +} + +// Float64Value returns the value of the float64 pointer passed in or +// 0 if the pointer is nil. +func Float64Value(v *float64) float64 { + if v != nil { + return *v + } + return 0 +} + +// Float64Slice converts a slice of float64 values into a slice of +// float64 pointers +func Float64Slice(src []float64) []*float64 { + dst := make([]*float64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Float64ValueSlice converts a slice of float64 pointers into a slice of +// float64 values +func Float64ValueSlice(src []*float64) []float64 { + dst := make([]float64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Float64Map converts a string map of float64 values into a string +// map of float64 pointers +func Float64Map(src map[string]float64) map[string]*float64 { + dst := make(map[string]*float64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Float64ValueMap converts a string map of float64 pointers into a string +// map of float64 values +func Float64ValueMap(src map[string]*float64) map[string]float64 { + dst := make(map[string]float64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Time returns a pointer to the time.Time value passed in. +func Time(v time.Time) *time.Time { + return &v +} + +// TimeValue returns the value of the time.Time pointer passed in or +// time.Time{} if the pointer is nil. +func TimeValue(v *time.Time) time.Time { + if v != nil { + return *v + } + return time.Time{} +} + +// SecondsTimeValue converts an int64 pointer to a time.Time value +// representing seconds since Epoch or time.Time{} if the pointer is nil. +func SecondsTimeValue(v *int64) time.Time { + if v != nil { + return time.Unix((*v / 1000), 0) + } + return time.Time{} +} + +// MillisecondsTimeValue converts an int64 pointer to a time.Time value +// representing milliseconds sinch Epoch or time.Time{} if the pointer is nil. +func MillisecondsTimeValue(v *int64) time.Time { + if v != nil { + return time.Unix(0, (*v * 1000000)) + } + return time.Time{} +} + +// TimeUnixMilli returns a Unix timestamp in milliseconds from "January 1, 1970 UTC". +// The result is undefined if the Unix time cannot be represented by an int64. +// Which includes calling TimeUnixMilli on a zero Time is undefined. +// +// This utility is useful for service API's such as CloudWatch Logs which require +// their unix time values to be in milliseconds. +// +// See Go stdlib https://golang.org/pkg/time/#Time.UnixNano for more information. +func TimeUnixMilli(t time.Time) int64 { + return t.UnixNano() / int64(time.Millisecond/time.Nanosecond) +} + +// TimeSlice converts a slice of time.Time values into a slice of +// time.Time pointers +func TimeSlice(src []time.Time) []*time.Time { + dst := make([]*time.Time, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// TimeValueSlice converts a slice of time.Time pointers into a slice of +// time.Time values +func TimeValueSlice(src []*time.Time) []time.Time { + dst := make([]time.Time, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// TimeMap converts a string map of time.Time values into a string +// map of time.Time pointers +func TimeMap(src map[string]time.Time) map[string]*time.Time { + dst := make(map[string]*time.Time) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// TimeValueMap converts a string map of time.Time pointers into a string +// map of time.Time values +func TimeValueMap(src map[string]*time.Time) map[string]time.Time { + dst := make(map[string]time.Time) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go new file mode 100644 index 000000000..cfcddf3dc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go @@ -0,0 +1,228 @@ +package corehandlers + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "regexp" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" +) + +// Interface for matching types which also have a Len method. +type lener interface { + Len() int +} + +// BuildContentLengthHandler builds the content length of a request based on the body, +// or will use the HTTPRequest.Header's "Content-Length" if defined. If unable +// to determine request body length and no "Content-Length" was specified it will panic. +// +// The Content-Length will only be added to the request if the length of the body +// is greater than 0. If the body is empty or the current `Content-Length` +// header is <= 0, the header will also be stripped. +var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLengthHandler", Fn: func(r *request.Request) { + var length int64 + + if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" { + length, _ = strconv.ParseInt(slength, 10, 64) + } else { + if r.Body != nil { + var err error + length, err = aws.SeekerLen(r.Body) + if err != nil { + r.Error = awserr.New(request.ErrCodeSerialization, "failed to get request body's length", err) + return + } + } + } + + if length > 0 { + r.HTTPRequest.ContentLength = length + r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length)) + } else { + r.HTTPRequest.ContentLength = 0 + r.HTTPRequest.Header.Del("Content-Length") + } +}} + +var reStatusCode = regexp.MustCompile(`^(\d{3})`) + +// ValidateReqSigHandler is a request handler to ensure that the request's +// signature doesn't expire before it is sent. This can happen when a request +// is built and signed significantly before it is sent. Or significant delays +// occur when retrying requests that would cause the signature to expire. +var ValidateReqSigHandler = request.NamedHandler{ + Name: "core.ValidateReqSigHandler", + Fn: func(r *request.Request) { + // Unsigned requests are not signed + if r.Config.Credentials == credentials.AnonymousCredentials { + return + } + + signedTime := r.Time + if !r.LastSignedAt.IsZero() { + signedTime = r.LastSignedAt + } + + // 10 minutes to allow for some clock skew/delays in transmission. + // Would be improved with aws/aws-sdk-go#423 + if signedTime.Add(10 * time.Minute).After(time.Now()) { + return + } + + fmt.Println("request expired, resigning") + r.Sign() + }, +} + +// SendHandler is a request handler to send service request using HTTP client. +var SendHandler = request.NamedHandler{ + Name: "core.SendHandler", + Fn: func(r *request.Request) { + sender := sendFollowRedirects + if r.DisableFollowRedirects { + sender = sendWithoutFollowRedirects + } + + if request.NoBody == r.HTTPRequest.Body { + // Strip off the request body if the NoBody reader was used as a + // place holder for a request body. This prevents the SDK from + // making requests with a request body when it would be invalid + // to do so. + // + // Use a shallow copy of the http.Request to ensure the race condition + // of transport on Body will not trigger + reqOrig, reqCopy := r.HTTPRequest, *r.HTTPRequest + reqCopy.Body = nil + r.HTTPRequest = &reqCopy + defer func() { + r.HTTPRequest = reqOrig + }() + } + + var err error + r.HTTPResponse, err = sender(r) + if err != nil { + handleSendError(r, err) + } + }, +} + +func sendFollowRedirects(r *request.Request) (*http.Response, error) { + return r.Config.HTTPClient.Do(r.HTTPRequest) +} + +func sendWithoutFollowRedirects(r *request.Request) (*http.Response, error) { + transport := r.Config.HTTPClient.Transport + if transport == nil { + transport = http.DefaultTransport + } + + return transport.RoundTrip(r.HTTPRequest) +} + +func handleSendError(r *request.Request, err error) { + // Prevent leaking if an HTTPResponse was returned. Clean up + // the body. + if r.HTTPResponse != nil { + r.HTTPResponse.Body.Close() + } + // Capture the case where url.Error is returned for error processing + // response. e.g. 301 without location header comes back as string + // error and r.HTTPResponse is nil. Other URL redirect errors will + // comeback in a similar method. + if e, ok := err.(*url.Error); ok && e.Err != nil { + if s := reStatusCode.FindStringSubmatch(e.Err.Error()); s != nil { + code, _ := strconv.ParseInt(s[1], 10, 64) + r.HTTPResponse = &http.Response{ + StatusCode: int(code), + Status: http.StatusText(int(code)), + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + return + } + } + if r.HTTPResponse == nil { + // Add a dummy request response object to ensure the HTTPResponse + // value is consistent. + r.HTTPResponse = &http.Response{ + StatusCode: int(0), + Status: http.StatusText(int(0)), + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + } + // Catch all other request errors. + r.Error = awserr.New("RequestError", "send request failed", err) + r.Retryable = aws.Bool(true) // network errors are retryable + + // Override the error with a context canceled error, if that was canceled. + ctx := r.Context() + select { + case <-ctx.Done(): + r.Error = awserr.New(request.CanceledErrorCode, + "request context canceled", ctx.Err()) + r.Retryable = aws.Bool(false) + default: + } +} + +// ValidateResponseHandler is a request handler to validate service response. +var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseHandler", Fn: func(r *request.Request) { + if r.HTTPResponse.StatusCode == 0 || r.HTTPResponse.StatusCode >= 300 { + // this may be replaced by an UnmarshalError handler + r.Error = awserr.New("UnknownError", "unknown error", nil) + } +}} + +// AfterRetryHandler performs final checks to determine if the request should +// be retried and how long to delay. +var AfterRetryHandler = request.NamedHandler{Name: "core.AfterRetryHandler", Fn: func(r *request.Request) { + // If one of the other handlers already set the retry state + // we don't want to override it based on the service's state + if r.Retryable == nil || aws.BoolValue(r.Config.EnforceShouldRetryCheck) { + r.Retryable = aws.Bool(r.ShouldRetry(r)) + } + + if r.WillRetry() { + r.RetryDelay = r.RetryRules(r) + + if sleepFn := r.Config.SleepDelay; sleepFn != nil { + // Support SleepDelay for backwards compatibility and testing + sleepFn(r.RetryDelay) + } else if err := aws.SleepWithContext(r.Context(), r.RetryDelay); err != nil { + r.Error = awserr.New(request.CanceledErrorCode, + "request context canceled", err) + r.Retryable = aws.Bool(false) + return + } + + // when the expired token exception occurs the credentials + // need to be expired locally so that the next request to + // get credentials will trigger a credentials refresh. + if r.IsErrorExpired() { + r.Config.Credentials.Expire() + } + + r.RetryCount++ + r.Error = nil + } +}} + +// ValidateEndpointHandler is a request handler to validate a request had the +// appropriate Region and Endpoint set. Will set r.Error if the endpoint or +// region is not valid. +var ValidateEndpointHandler = request.NamedHandler{Name: "core.ValidateEndpointHandler", Fn: func(r *request.Request) { + if r.ClientInfo.SigningRegion == "" && aws.StringValue(r.Config.Region) == "" { + r.Error = aws.ErrMissingRegion + } else if r.ClientInfo.Endpoint == "" { + r.Error = aws.ErrMissingEndpoint + } +}} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go new file mode 100644 index 000000000..7d50b1557 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go @@ -0,0 +1,17 @@ +package corehandlers + +import "github.com/aws/aws-sdk-go/aws/request" + +// ValidateParametersHandler is a request handler to validate the input parameters. +// Validating parameters only has meaning if done prior to the request being sent. +var ValidateParametersHandler = request.NamedHandler{Name: "core.ValidateParametersHandler", Fn: func(r *request.Request) { + if !r.ParamsFilled() { + return + } + + if v, ok := r.Params.(request.Validator); ok { + if err := v.Validate(); err != nil { + r.Error = err + } + } +}} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go new file mode 100644 index 000000000..a15f496bc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go @@ -0,0 +1,37 @@ +package corehandlers + +import ( + "os" + "runtime" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +// SDKVersionUserAgentHandler is a request handler for adding the SDK Version +// to the user agent. +var SDKVersionUserAgentHandler = request.NamedHandler{ + Name: "core.SDKVersionUserAgentHandler", + Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion, + runtime.Version(), runtime.GOOS, runtime.GOARCH), +} + +const execEnvVar = `AWS_EXECUTION_ENV` +const execEnvUAKey = `exec_env` + +// AddHostExecEnvUserAgentHander is a request handler appending the SDK's +// execution environment to the user agent. +// +// If the environment variable AWS_EXECUTION_ENV is set, its value will be +// appended to the user agent string. +var AddHostExecEnvUserAgentHander = request.NamedHandler{ + Name: "core.AddHostExecEnvUserAgentHander", + Fn: func(r *request.Request) { + v := os.Getenv(execEnvVar) + if len(v) == 0 { + return + } + + request.AddToUserAgent(r, execEnvUAKey+"/"+v) + }, +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go new file mode 100644 index 000000000..f298d6596 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go @@ -0,0 +1,102 @@ +package credentials + +import ( + "github.com/aws/aws-sdk-go/aws/awserr" +) + +var ( + // ErrNoValidProvidersFoundInChain Is returned when there are no valid + // providers in the ChainProvider. + // + // This has been deprecated. For verbose error messaging set + // aws.Config.CredentialsChainVerboseErrors to true + // + // @readonly + ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders", + `no valid providers in chain. Deprecated. + For verbose messaging see aws.Config.CredentialsChainVerboseErrors`, + nil) +) + +// A ChainProvider will search for a provider which returns credentials +// and cache that provider until Retrieve is called again. +// +// The ChainProvider provides a way of chaining multiple providers together +// which will pick the first available using priority order of the Providers +// in the list. +// +// If none of the Providers retrieve valid credentials Value, ChainProvider's +// Retrieve() will return the error ErrNoValidProvidersFoundInChain. +// +// If a Provider is found which returns valid credentials Value ChainProvider +// will cache that Provider for all calls to IsExpired(), until Retrieve is +// called again. +// +// Example of ChainProvider to be used with an EnvProvider and EC2RoleProvider. +// In this example EnvProvider will first check if any credentials are available +// via the environment variables. If there are none ChainProvider will check +// the next Provider in the list, EC2RoleProvider in this case. If EC2RoleProvider +// does not return any credentials ChainProvider will return the error +// ErrNoValidProvidersFoundInChain +// +// creds := credentials.NewChainCredentials( +// []credentials.Provider{ +// &credentials.EnvProvider{}, +// &ec2rolecreds.EC2RoleProvider{ +// Client: ec2metadata.New(sess), +// }, +// }) +// +// // Usage of ChainCredentials with aws.Config +// svc := ec2.New(session.Must(session.NewSession(&aws.Config{ +// Credentials: creds, +// }))) +// +type ChainProvider struct { + Providers []Provider + curr Provider + VerboseErrors bool +} + +// NewChainCredentials returns a pointer to a new Credentials object +// wrapping a chain of providers. +func NewChainCredentials(providers []Provider) *Credentials { + return NewCredentials(&ChainProvider{ + Providers: append([]Provider{}, providers...), + }) +} + +// Retrieve returns the credentials value or error if no provider returned +// without error. +// +// If a provider is found it will be cached and any calls to IsExpired() +// will return the expired state of the cached provider. +func (c *ChainProvider) Retrieve() (Value, error) { + var errs []error + for _, p := range c.Providers { + creds, err := p.Retrieve() + if err == nil { + c.curr = p + return creds, nil + } + errs = append(errs, err) + } + c.curr = nil + + var err error + err = ErrNoValidProvidersFoundInChain + if c.VerboseErrors { + err = awserr.NewBatchError("NoCredentialProviders", "no valid providers in chain", errs) + } + return Value{}, err +} + +// IsExpired will returned the expired state of the currently cached provider +// if there is one. If there is no current provider, true will be returned. +func (c *ChainProvider) IsExpired() bool { + if c.curr != nil { + return c.curr.IsExpired() + } + + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go new file mode 100644 index 000000000..a270844df --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go @@ -0,0 +1,259 @@ +// Package credentials provides credential retrieval and management +// +// The Credentials is the primary method of getting access to and managing +// credentials Values. Using dependency injection retrieval of the credential +// values is handled by a object which satisfies the Provider interface. +// +// By default the Credentials.Get() will cache the successful result of a +// Provider's Retrieve() until Provider.IsExpired() returns true. At which +// point Credentials will call Provider's Retrieve() to get new credential Value. +// +// The Provider is responsible for determining when credentials Value have expired. +// It is also important to note that Credentials will always call Retrieve the +// first time Credentials.Get() is called. +// +// Example of using the environment variable credentials. +// +// creds := credentials.NewEnvCredentials() +// +// // Retrieve the credentials value +// credValue, err := creds.Get() +// if err != nil { +// // handle error +// } +// +// Example of forcing credentials to expire and be refreshed on the next Get(). +// This may be helpful to proactively expire credentials and refresh them sooner +// than they would naturally expire on their own. +// +// creds := credentials.NewCredentials(&ec2rolecreds.EC2RoleProvider{}) +// creds.Expire() +// credsValue, err := creds.Get() +// // New credentials will be retrieved instead of from cache. +// +// +// Custom Provider +// +// Each Provider built into this package also provides a helper method to generate +// a Credentials pointer setup with the provider. To use a custom Provider just +// create a type which satisfies the Provider interface and pass it to the +// NewCredentials method. +// +// type MyProvider struct{} +// func (m *MyProvider) Retrieve() (Value, error) {...} +// func (m *MyProvider) IsExpired() bool {...} +// +// creds := credentials.NewCredentials(&MyProvider{}) +// credValue, err := creds.Get() +// +package credentials + +import ( + "sync" + "time" +) + +// AnonymousCredentials is an empty Credential object that can be used as +// dummy placeholder credentials for requests that do not need signed. +// +// This Credentials can be used to configure a service to not sign requests +// when making service API calls. For example, when accessing public +// s3 buckets. +// +// svc := s3.New(session.Must(session.NewSession(&aws.Config{ +// Credentials: credentials.AnonymousCredentials, +// }))) +// // Access public S3 buckets. +// +// @readonly +var AnonymousCredentials = NewStaticCredentials("", "", "") + +// A Value is the AWS credentials value for individual credential fields. +type Value struct { + // AWS Access key ID + AccessKeyID string + + // AWS Secret Access Key + SecretAccessKey string + + // AWS Session Token + SessionToken string + + // Provider used to get credentials + ProviderName string +} + +// A Provider is the interface for any component which will provide credentials +// Value. A provider is required to manage its own Expired state, and what to +// be expired means. +// +// The Provider should not need to implement its own mutexes, because +// that will be managed by Credentials. +type Provider interface { + // Retrieve returns nil if it successfully retrieved the value. + // Error is returned if the value were not obtainable, or empty. + Retrieve() (Value, error) + + // IsExpired returns if the credentials are no longer valid, and need + // to be retrieved. + IsExpired() bool +} + +// An ErrorProvider is a stub credentials provider that always returns an error +// this is used by the SDK when construction a known provider is not possible +// due to an error. +type ErrorProvider struct { + // The error to be returned from Retrieve + Err error + + // The provider name to set on the Retrieved returned Value + ProviderName string +} + +// Retrieve will always return the error that the ErrorProvider was created with. +func (p ErrorProvider) Retrieve() (Value, error) { + return Value{ProviderName: p.ProviderName}, p.Err +} + +// IsExpired will always return not expired. +func (p ErrorProvider) IsExpired() bool { + return false +} + +// A Expiry provides shared expiration logic to be used by credentials +// providers to implement expiry functionality. +// +// The best method to use this struct is as an anonymous field within the +// provider's struct. +// +// Example: +// type EC2RoleProvider struct { +// Expiry +// ... +// } +type Expiry struct { + // The date/time when to expire on + expiration time.Time + + // If set will be used by IsExpired to determine the current time. + // Defaults to time.Now if CurrentTime is not set. Available for testing + // to be able to mock out the current time. + CurrentTime func() time.Time +} + +// SetExpiration sets the expiration IsExpired will check when called. +// +// If window is greater than 0 the expiration time will be reduced by the +// window value. +// +// Using a window is helpful to trigger credentials to expire sooner than +// the expiration time given to ensure no requests are made with expired +// tokens. +func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) { + e.expiration = expiration + if window > 0 { + e.expiration = e.expiration.Add(-window) + } +} + +// IsExpired returns if the credentials are expired. +func (e *Expiry) IsExpired() bool { + curTime := e.CurrentTime + if curTime == nil { + curTime = time.Now + } + return e.expiration.Before(curTime()) +} + +// A Credentials provides concurrency safe retrieval of AWS credentials Value. +// Credentials will cache the credentials value until they expire. Once the value +// expires the next Get will attempt to retrieve valid credentials. +// +// Credentials is safe to use across multiple goroutines and will manage the +// synchronous state so the Providers do not need to implement their own +// synchronization. +// +// The first Credentials.Get() will always call Provider.Retrieve() to get the +// first instance of the credentials Value. All calls to Get() after that +// will return the cached credentials Value until IsExpired() returns true. +type Credentials struct { + creds Value + forceRefresh bool + + m sync.RWMutex + + provider Provider +} + +// NewCredentials returns a pointer to a new Credentials with the provider set. +func NewCredentials(provider Provider) *Credentials { + return &Credentials{ + provider: provider, + forceRefresh: true, + } +} + +// Get returns the credentials value, or error if the credentials Value failed +// to be retrieved. +// +// Will return the cached credentials Value if it has not expired. If the +// credentials Value has expired the Provider's Retrieve() will be called +// to refresh the credentials. +// +// If Credentials.Expire() was called the credentials Value will be force +// expired, and the next call to Get() will cause them to be refreshed. +func (c *Credentials) Get() (Value, error) { + // Check the cached credentials first with just the read lock. + c.m.RLock() + if !c.isExpired() { + creds := c.creds + c.m.RUnlock() + return creds, nil + } + c.m.RUnlock() + + // Credentials are expired need to retrieve the credentials taking the full + // lock. + c.m.Lock() + defer c.m.Unlock() + + if c.isExpired() { + creds, err := c.provider.Retrieve() + if err != nil { + return Value{}, err + } + c.creds = creds + c.forceRefresh = false + } + + return c.creds, nil +} + +// Expire expires the credentials and forces them to be retrieved on the +// next call to Get(). +// +// This will override the Provider's expired state, and force Credentials +// to call the Provider's Retrieve(). +func (c *Credentials) Expire() { + c.m.Lock() + defer c.m.Unlock() + + c.forceRefresh = true +} + +// IsExpired returns if the credentials are no longer valid, and need +// to be retrieved. +// +// If the Credentials were forced to be expired with Expire() this will +// reflect that override. +func (c *Credentials) IsExpired() bool { + c.m.RLock() + defer c.m.RUnlock() + + return c.isExpired() +} + +// isExpired helper method wrapping the definition of expired credentials. +func (c *Credentials) isExpired() bool { + return c.forceRefresh || c.provider.IsExpired() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go new file mode 100644 index 000000000..0ed791be6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go @@ -0,0 +1,178 @@ +package ec2rolecreds + +import ( + "bufio" + "encoding/json" + "fmt" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/internal/sdkuri" +) + +// ProviderName provides a name of EC2Role provider +const ProviderName = "EC2RoleProvider" + +// A EC2RoleProvider retrieves credentials from the EC2 service, and keeps track if +// those credentials are expired. +// +// Example how to configure the EC2RoleProvider with custom http Client, Endpoint +// or ExpiryWindow +// +// p := &ec2rolecreds.EC2RoleProvider{ +// // Pass in a custom timeout to be used when requesting +// // IAM EC2 Role credentials. +// Client: ec2metadata.New(sess, aws.Config{ +// HTTPClient: &http.Client{Timeout: 10 * time.Second}, +// }), +// +// // Do not use early expiry of credentials. If a non zero value is +// // specified the credentials will be expired early +// ExpiryWindow: 0, +// } +type EC2RoleProvider struct { + credentials.Expiry + + // Required EC2Metadata client to use when connecting to EC2 metadata service. + Client *ec2metadata.EC2Metadata + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration +} + +// NewCredentials returns a pointer to a new Credentials object wrapping +// the EC2RoleProvider. Takes a ConfigProvider to create a EC2Metadata client. +// The ConfigProvider is satisfied by the session.Session type. +func NewCredentials(c client.ConfigProvider, options ...func(*EC2RoleProvider)) *credentials.Credentials { + p := &EC2RoleProvider{ + Client: ec2metadata.New(c), + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping +// the EC2RoleProvider. Takes a EC2Metadata client to use when connecting to EC2 +// metadata service. +func NewCredentialsWithClient(client *ec2metadata.EC2Metadata, options ...func(*EC2RoleProvider)) *credentials.Credentials { + p := &EC2RoleProvider{ + Client: client, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// Retrieve retrieves credentials from the EC2 service. +// Error will be returned if the request fails, or unable to extract +// the desired credentials. +func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) { + credsList, err := requestCredList(m.Client) + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + if len(credsList) == 0 { + return credentials.Value{ProviderName: ProviderName}, awserr.New("EmptyEC2RoleList", "empty EC2 Role list", nil) + } + credsName := credsList[0] + + roleCreds, err := requestCred(m.Client, credsName) + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + m.SetExpiration(roleCreds.Expiration, m.ExpiryWindow) + + return credentials.Value{ + AccessKeyID: roleCreds.AccessKeyID, + SecretAccessKey: roleCreds.SecretAccessKey, + SessionToken: roleCreds.Token, + ProviderName: ProviderName, + }, nil +} + +// A ec2RoleCredRespBody provides the shape for unmarshaling credential +// request responses. +type ec2RoleCredRespBody struct { + // Success State + Expiration time.Time + AccessKeyID string + SecretAccessKey string + Token string + + // Error state + Code string + Message string +} + +const iamSecurityCredsPath = "iam/security-credentials/" + +// requestCredList requests a list of credentials from the EC2 service. +// If there are no credentials, or there is an error making or receiving the request +func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) { + resp, err := client.GetMetadata(iamSecurityCredsPath) + if err != nil { + return nil, awserr.New("EC2RoleRequestError", "no EC2 instance role found", err) + } + + credsList := []string{} + s := bufio.NewScanner(strings.NewReader(resp)) + for s.Scan() { + credsList = append(credsList, s.Text()) + } + + if err := s.Err(); err != nil { + return nil, awserr.New("SerializationError", "failed to read EC2 instance role from metadata service", err) + } + + return credsList, nil +} + +// requestCred requests the credentials for a specific credentials from the EC2 service. +// +// If the credentials cannot be found, or there is an error reading the response +// and error will be returned. +func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) { + resp, err := client.GetMetadata(sdkuri.PathJoin(iamSecurityCredsPath, credsName)) + if err != nil { + return ec2RoleCredRespBody{}, + awserr.New("EC2RoleRequestError", + fmt.Sprintf("failed to get %s EC2 instance role credentials", credsName), + err) + } + + respCreds := ec2RoleCredRespBody{} + if err := json.NewDecoder(strings.NewReader(resp)).Decode(&respCreds); err != nil { + return ec2RoleCredRespBody{}, + awserr.New("SerializationError", + fmt.Sprintf("failed to decode %s EC2 instance role credentials", credsName), + err) + } + + if respCreds.Code != "Success" { + // If an error code was returned something failed requesting the role. + return ec2RoleCredRespBody{}, awserr.New(respCreds.Code, respCreds.Message, nil) + } + + return respCreds, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go new file mode 100644 index 000000000..ace513138 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go @@ -0,0 +1,198 @@ +// Package endpointcreds provides support for retrieving credentials from an +// arbitrary HTTP endpoint. +// +// The credentials endpoint Provider can receive both static and refreshable +// credentials that will expire. Credentials are static when an "Expiration" +// value is not provided in the endpoint's response. +// +// Static credentials will never expire once they have been retrieved. The format +// of the static credentials response: +// { +// "AccessKeyId" : "MUA...", +// "SecretAccessKey" : "/7PC5om....", +// } +// +// Refreshable credentials will expire within the "ExpiryWindow" of the Expiration +// value in the response. The format of the refreshable credentials response: +// { +// "AccessKeyId" : "MUA...", +// "SecretAccessKey" : "/7PC5om....", +// "Token" : "AQoDY....=", +// "Expiration" : "2016-02-25T06:03:31Z" +// } +// +// Errors should be returned in the following format and only returned with 400 +// or 500 HTTP status codes. +// { +// "code": "ErrorCode", +// "message": "Helpful error message." +// } +package endpointcreds + +import ( + "encoding/json" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" +) + +// ProviderName is the name of the credentials provider. +const ProviderName = `CredentialsEndpointProvider` + +// Provider satisfies the credentials.Provider interface, and is a client to +// retrieve credentials from an arbitrary endpoint. +type Provider struct { + staticCreds bool + credentials.Expiry + + // Requires a AWS Client to make HTTP requests to the endpoint with. + // the Endpoint the request will be made to is provided by the aws.Config's + // Endpoint value. + Client *client.Client + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration + + // Optional authorization token value if set will be used as the value of + // the Authorization header of the endpoint credential request. + AuthorizationToken string +} + +// NewProviderClient returns a credentials Provider for retrieving AWS credentials +// from arbitrary endpoint. +func NewProviderClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) credentials.Provider { + p := &Provider{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "CredentialsEndpoint", + Endpoint: endpoint, + }, + handlers, + ), + } + + p.Client.Handlers.Unmarshal.PushBack(unmarshalHandler) + p.Client.Handlers.UnmarshalError.PushBack(unmarshalError) + p.Client.Handlers.Validate.Clear() + p.Client.Handlers.Validate.PushBack(validateEndpointHandler) + + for _, option := range options { + option(p) + } + + return p +} + +// NewCredentialsClient returns a Credentials wrapper for retrieving credentials +// from an arbitrary endpoint concurrently. The client will request the +func NewCredentialsClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) *credentials.Credentials { + return credentials.NewCredentials(NewProviderClient(cfg, handlers, endpoint, options...)) +} + +// IsExpired returns true if the credentials retrieved are expired, or not yet +// retrieved. +func (p *Provider) IsExpired() bool { + if p.staticCreds { + return false + } + return p.Expiry.IsExpired() +} + +// Retrieve will attempt to request the credentials from the endpoint the Provider +// was configured for. And error will be returned if the retrieval fails. +func (p *Provider) Retrieve() (credentials.Value, error) { + resp, err := p.getCredentials() + if err != nil { + return credentials.Value{ProviderName: ProviderName}, + awserr.New("CredentialsEndpointError", "failed to load credentials", err) + } + + if resp.Expiration != nil { + p.SetExpiration(*resp.Expiration, p.ExpiryWindow) + } else { + p.staticCreds = true + } + + return credentials.Value{ + AccessKeyID: resp.AccessKeyID, + SecretAccessKey: resp.SecretAccessKey, + SessionToken: resp.Token, + ProviderName: ProviderName, + }, nil +} + +type getCredentialsOutput struct { + Expiration *time.Time + AccessKeyID string + SecretAccessKey string + Token string +} + +type errorOutput struct { + Code string `json:"code"` + Message string `json:"message"` +} + +func (p *Provider) getCredentials() (*getCredentialsOutput, error) { + op := &request.Operation{ + Name: "GetCredentials", + HTTPMethod: "GET", + } + + out := &getCredentialsOutput{} + req := p.Client.NewRequest(op, nil, out) + req.HTTPRequest.Header.Set("Accept", "application/json") + if authToken := p.AuthorizationToken; len(authToken) != 0 { + req.HTTPRequest.Header.Set("Authorization", authToken) + } + + return out, req.Send() +} + +func validateEndpointHandler(r *request.Request) { + if len(r.ClientInfo.Endpoint) == 0 { + r.Error = aws.ErrMissingEndpoint + } +} + +func unmarshalHandler(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + out := r.Data.(*getCredentialsOutput) + if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&out); err != nil { + r.Error = awserr.New("SerializationError", + "failed to decode endpoint credentials", + err, + ) + } +} + +func unmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + var errOut errorOutput + if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&errOut); err != nil { + r.Error = awserr.New("SerializationError", + "failed to decode endpoint credentials", + err, + ) + } + + // Response body format is not consistent between metadata endpoints. + // Grab the error message as a string and include that as the source error + r.Error = awserr.New(errOut.Code, errOut.Message, nil) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go new file mode 100644 index 000000000..c14231a16 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go @@ -0,0 +1,78 @@ +package credentials + +import ( + "os" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// EnvProviderName provides a name of Env provider +const EnvProviderName = "EnvProvider" + +var ( + // ErrAccessKeyIDNotFound is returned when the AWS Access Key ID can't be + // found in the process's environment. + // + // @readonly + ErrAccessKeyIDNotFound = awserr.New("EnvAccessKeyNotFound", "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment", nil) + + // ErrSecretAccessKeyNotFound is returned when the AWS Secret Access Key + // can't be found in the process's environment. + // + // @readonly + ErrSecretAccessKeyNotFound = awserr.New("EnvSecretNotFound", "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment", nil) +) + +// A EnvProvider retrieves credentials from the environment variables of the +// running process. Environment credentials never expire. +// +// Environment variables used: +// +// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY +// +// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY +type EnvProvider struct { + retrieved bool +} + +// NewEnvCredentials returns a pointer to a new Credentials object +// wrapping the environment variable provider. +func NewEnvCredentials() *Credentials { + return NewCredentials(&EnvProvider{}) +} + +// Retrieve retrieves the keys from the environment. +func (e *EnvProvider) Retrieve() (Value, error) { + e.retrieved = false + + id := os.Getenv("AWS_ACCESS_KEY_ID") + if id == "" { + id = os.Getenv("AWS_ACCESS_KEY") + } + + secret := os.Getenv("AWS_SECRET_ACCESS_KEY") + if secret == "" { + secret = os.Getenv("AWS_SECRET_KEY") + } + + if id == "" { + return Value{ProviderName: EnvProviderName}, ErrAccessKeyIDNotFound + } + + if secret == "" { + return Value{ProviderName: EnvProviderName}, ErrSecretAccessKeyNotFound + } + + e.retrieved = true + return Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: os.Getenv("AWS_SESSION_TOKEN"), + ProviderName: EnvProviderName, + }, nil +} + +// IsExpired returns if the credentials have been retrieved. +func (e *EnvProvider) IsExpired() bool { + return !e.retrieved +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go new file mode 100644 index 000000000..51e21e0f3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go @@ -0,0 +1,150 @@ +package credentials + +import ( + "fmt" + "os" + + "github.com/go-ini/ini" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/internal/shareddefaults" +) + +// SharedCredsProviderName provides a name of SharedCreds provider +const SharedCredsProviderName = "SharedCredentialsProvider" + +var ( + // ErrSharedCredentialsHomeNotFound is emitted when the user directory cannot be found. + ErrSharedCredentialsHomeNotFound = awserr.New("UserHomeNotFound", "user home directory not found.", nil) +) + +// A SharedCredentialsProvider retrieves credentials from the current user's home +// directory, and keeps track if those credentials are expired. +// +// Profile ini file example: $HOME/.aws/credentials +type SharedCredentialsProvider struct { + // Path to the shared credentials file. + // + // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the + // env value is empty will default to current user's home directory. + // Linux/OSX: "$HOME/.aws/credentials" + // Windows: "%USERPROFILE%\.aws\credentials" + Filename string + + // AWS Profile to extract credentials from the shared credentials file. If empty + // will default to environment variable "AWS_PROFILE" or "default" if + // environment variable is also not set. + Profile string + + // retrieved states if the credentials have been successfully retrieved. + retrieved bool +} + +// NewSharedCredentials returns a pointer to a new Credentials object +// wrapping the Profile file provider. +func NewSharedCredentials(filename, profile string) *Credentials { + return NewCredentials(&SharedCredentialsProvider{ + Filename: filename, + Profile: profile, + }) +} + +// Retrieve reads and extracts the shared credentials from the current +// users home directory. +func (p *SharedCredentialsProvider) Retrieve() (Value, error) { + p.retrieved = false + + filename, err := p.filename() + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, err + } + + creds, err := loadProfile(filename, p.profile()) + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, err + } + + p.retrieved = true + return creds, nil +} + +// IsExpired returns if the shared credentials have expired. +func (p *SharedCredentialsProvider) IsExpired() bool { + return !p.retrieved +} + +// loadProfiles loads from the file pointed to by shared credentials filename for profile. +// The credentials retrieved from the profile will be returned or error. Error will be +// returned if it fails to read from the file, or the data is invalid. +func loadProfile(filename, profile string) (Value, error) { + config, err := ini.Load(filename) + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to load shared credentials file", err) + } + iniProfile, err := config.GetSection(profile) + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to get profile", err) + } + + id, err := iniProfile.GetKey("aws_access_key_id") + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsAccessKey", + fmt.Sprintf("shared credentials %s in %s did not contain aws_access_key_id", profile, filename), + err) + } + + secret, err := iniProfile.GetKey("aws_secret_access_key") + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsSecret", + fmt.Sprintf("shared credentials %s in %s did not contain aws_secret_access_key", profile, filename), + nil) + } + + // Default to empty string if not found + token := iniProfile.Key("aws_session_token") + + return Value{ + AccessKeyID: id.String(), + SecretAccessKey: secret.String(), + SessionToken: token.String(), + ProviderName: SharedCredsProviderName, + }, nil +} + +// filename returns the filename to use to read AWS shared credentials. +// +// Will return an error if the user's home directory path cannot be found. +func (p *SharedCredentialsProvider) filename() (string, error) { + if len(p.Filename) != 0 { + return p.Filename, nil + } + + if p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); len(p.Filename) != 0 { + return p.Filename, nil + } + + if home := shareddefaults.UserHomeDir(); len(home) == 0 { + // Backwards compatibility of home directly not found error being returned. + // This error is too verbose, failure when opening the file would of been + // a better error to return. + return "", ErrSharedCredentialsHomeNotFound + } + + p.Filename = shareddefaults.SharedCredentialsFilename() + + return p.Filename, nil +} + +// profile returns the AWS shared credentials profile. If empty will read +// environment variable "AWS_PROFILE". If that is not set profile will +// return "default". +func (p *SharedCredentialsProvider) profile() string { + if p.Profile == "" { + p.Profile = os.Getenv("AWS_PROFILE") + } + if p.Profile == "" { + p.Profile = "default" + } + + return p.Profile +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go new file mode 100644 index 000000000..4f5dab3fc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go @@ -0,0 +1,57 @@ +package credentials + +import ( + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// StaticProviderName provides a name of Static provider +const StaticProviderName = "StaticProvider" + +var ( + // ErrStaticCredentialsEmpty is emitted when static credentials are empty. + // + // @readonly + ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil) +) + +// A StaticProvider is a set of credentials which are set programmatically, +// and will never expire. +type StaticProvider struct { + Value +} + +// NewStaticCredentials returns a pointer to a new Credentials object +// wrapping a static credentials value provider. +func NewStaticCredentials(id, secret, token string) *Credentials { + return NewCredentials(&StaticProvider{Value: Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: token, + }}) +} + +// NewStaticCredentialsFromCreds returns a pointer to a new Credentials object +// wrapping the static credentials value provide. Same as NewStaticCredentials +// but takes the creds Value instead of individual fields +func NewStaticCredentialsFromCreds(creds Value) *Credentials { + return NewCredentials(&StaticProvider{Value: creds}) +} + +// Retrieve returns the credentials or error if the credentials are invalid. +func (s *StaticProvider) Retrieve() (Value, error) { + if s.AccessKeyID == "" || s.SecretAccessKey == "" { + return Value{ProviderName: StaticProviderName}, ErrStaticCredentialsEmpty + } + + if len(s.Value.ProviderName) == 0 { + s.Value.ProviderName = StaticProviderName + } + return s.Value, nil +} + +// IsExpired returns if the credentials are expired. +// +// For StaticProvider, the credentials never expired. +func (s *StaticProvider) IsExpired() bool { + return false +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go new file mode 100644 index 000000000..4108e433e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go @@ -0,0 +1,298 @@ +/* +Package stscreds are credential Providers to retrieve STS AWS credentials. + +STS provides multiple ways to retrieve credentials which can be used when making +future AWS service API operation calls. + +The SDK will ensure that per instance of credentials.Credentials all requests +to refresh the credentials will be synchronized. But, the SDK is unable to +ensure synchronous usage of the AssumeRoleProvider if the value is shared +between multiple Credentials, Sessions or service clients. + +Assume Role + +To assume an IAM role using STS with the SDK you can create a new Credentials +with the SDKs's stscreds package. + + // Initial credentials loaded from SDK's default credential chain. Such as + // the environment, shared credentials (~/.aws/credentials), or EC2 Instance + // Role. These credentials will be used to to make the STS Assume Role API. + sess := session.Must(session.NewSession()) + + // Create the credentials from AssumeRoleProvider to assume the role + // referenced by the "myRoleARN" ARN. + creds := stscreds.NewCredentials(sess, "myRoleArn") + + // Create service client value configured for credentials + // from assumed role. + svc := s3.New(sess, &aws.Config{Credentials: creds}) + +Assume Role with static MFA Token + +To assume an IAM role with a MFA token you can either specify a MFA token code +directly or provide a function to prompt the user each time the credentials +need to refresh the role's credentials. Specifying the TokenCode should be used +for short lived operations that will not need to be refreshed, and when you do +not want to have direct control over the user provides their MFA token. + +With TokenCode the AssumeRoleProvider will be not be able to refresh the role's +credentials. + + // Create the credentials from AssumeRoleProvider to assume the role + // referenced by the "myRoleARN" ARN using the MFA token code provided. + creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) { + p.SerialNumber = aws.String("myTokenSerialNumber") + p.TokenCode = aws.String("00000000") + }) + + // Create service client value configured for credentials + // from assumed role. + svc := s3.New(sess, &aws.Config{Credentials: creds}) + +Assume Role with MFA Token Provider + +To assume an IAM role with MFA for longer running tasks where the credentials +may need to be refreshed setting the TokenProvider field of AssumeRoleProvider +will allow the credential provider to prompt for new MFA token code when the +role's credentials need to be refreshed. + +The StdinTokenProvider function is available to prompt on stdin to retrieve +the MFA token code from the user. You can also implement custom prompts by +satisfing the TokenProvider function signature. + +Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will +have undesirable results as the StdinTokenProvider will not be synchronized. A +single Credentials with an AssumeRoleProvider can be shared safely. + + // Create the credentials from AssumeRoleProvider to assume the role + // referenced by the "myRoleARN" ARN. Prompting for MFA token from stdin. + creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) { + p.SerialNumber = aws.String("myTokenSerialNumber") + p.TokenProvider = stscreds.StdinTokenProvider + }) + + // Create service client value configured for credentials + // from assumed role. + svc := s3.New(sess, &aws.Config{Credentials: creds}) + +*/ +package stscreds + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/service/sts" +) + +// StdinTokenProvider will prompt on stdout and read from stdin for a string value. +// An error is returned if reading from stdin fails. +// +// Use this function go read MFA tokens from stdin. The function makes no attempt +// to make atomic prompts from stdin across multiple gorouties. +// +// Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will +// have undesirable results as the StdinTokenProvider will not be synchronized. A +// single Credentials with an AssumeRoleProvider can be shared safely +// +// Will wait forever until something is provided on the stdin. +func StdinTokenProvider() (string, error) { + var v string + fmt.Printf("Assume Role MFA token code: ") + _, err := fmt.Scanln(&v) + + return v, err +} + +// ProviderName provides a name of AssumeRole provider +const ProviderName = "AssumeRoleProvider" + +// AssumeRoler represents the minimal subset of the STS client API used by this provider. +type AssumeRoler interface { + AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) +} + +// DefaultDuration is the default amount of time in minutes that the credentials +// will be valid for. +var DefaultDuration = time.Duration(15) * time.Minute + +// AssumeRoleProvider retrieves temporary credentials from the STS service, and +// keeps track of their expiration time. +// +// This credential provider will be used by the SDKs default credential change +// when shared configuration is enabled, and the shared config or shared credentials +// file configure assume role. See Session docs for how to do this. +// +// AssumeRoleProvider does not provide any synchronization and it is not safe +// to share this value across multiple Credentials, Sessions, or service clients +// without also sharing the same Credentials instance. +type AssumeRoleProvider struct { + credentials.Expiry + + // STS client to make assume role request with. + Client AssumeRoler + + // Role to be assumed. + RoleARN string + + // Session name, if you wish to reuse the credentials elsewhere. + RoleSessionName string + + // Expiry duration of the STS credentials. Defaults to 15 minutes if not set. + Duration time.Duration + + // Optional ExternalID to pass along, defaults to nil if not set. + ExternalID *string + + // The policy plain text must be 2048 bytes or shorter. However, an internal + // conversion compresses it into a packed binary format with a separate limit. + // The PackedPolicySize response element indicates by percentage how close to + // the upper size limit the policy is, with 100% equaling the maximum allowed + // size. + Policy *string + + // The identification number of the MFA device that is associated with the user + // who is making the AssumeRole call. Specify this value if the trust policy + // of the role being assumed includes a condition that requires MFA authentication. + // The value is either the serial number for a hardware device (such as GAHT12345678) + // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). + SerialNumber *string + + // The value provided by the MFA device, if the trust policy of the role being + // assumed requires MFA (that is, if the policy includes a condition that tests + // for MFA). If the role being assumed requires MFA and if the TokenCode value + // is missing or expired, the AssumeRole call returns an "access denied" error. + // + // If SerialNumber is set and neither TokenCode nor TokenProvider are also + // set an error will be returned. + TokenCode *string + + // Async method of providing MFA token code for assuming an IAM role with MFA. + // The value returned by the function will be used as the TokenCode in the Retrieve + // call. See StdinTokenProvider for a provider that prompts and reads from stdin. + // + // This token provider will be called when ever the assumed role's + // credentials need to be refreshed when SerialNumber is also set and + // TokenCode is not set. + // + // If both TokenCode and TokenProvider is set, TokenProvider will be used and + // TokenCode is ignored. + TokenProvider func() (string, error) + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration +} + +// NewCredentials returns a pointer to a new Credentials object wrapping the +// AssumeRoleProvider. The credentials will expire every 15 minutes and the +// role will be named after a nanosecond timestamp of this operation. +// +// Takes a Config provider to create the STS client. The ConfigProvider is +// satisfied by the session.Session type. +// +// It is safe to share the returned Credentials with multiple Sessions and +// service clients. All access to the credentials and refreshing them +// will be synchronized. +func NewCredentials(c client.ConfigProvider, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials { + p := &AssumeRoleProvider{ + Client: sts.New(c), + RoleARN: roleARN, + Duration: DefaultDuration, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping the +// AssumeRoleProvider. The credentials will expire every 15 minutes and the +// role will be named after a nanosecond timestamp of this operation. +// +// Takes an AssumeRoler which can be satisfied by the STS client. +// +// It is safe to share the returned Credentials with multiple Sessions and +// service clients. All access to the credentials and refreshing them +// will be synchronized. +func NewCredentialsWithClient(svc AssumeRoler, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials { + p := &AssumeRoleProvider{ + Client: svc, + RoleARN: roleARN, + Duration: DefaultDuration, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// Retrieve generates a new set of temporary credentials using STS. +func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) { + + // Apply defaults where parameters are not set. + if p.RoleSessionName == "" { + // Try to work out a role name that will hopefully end up unique. + p.RoleSessionName = fmt.Sprintf("%d", time.Now().UTC().UnixNano()) + } + if p.Duration == 0 { + // Expire as often as AWS permits. + p.Duration = DefaultDuration + } + input := &sts.AssumeRoleInput{ + DurationSeconds: aws.Int64(int64(p.Duration / time.Second)), + RoleArn: aws.String(p.RoleARN), + RoleSessionName: aws.String(p.RoleSessionName), + ExternalId: p.ExternalID, + } + if p.Policy != nil { + input.Policy = p.Policy + } + if p.SerialNumber != nil { + if p.TokenCode != nil { + input.SerialNumber = p.SerialNumber + input.TokenCode = p.TokenCode + } else if p.TokenProvider != nil { + input.SerialNumber = p.SerialNumber + code, err := p.TokenProvider() + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + input.TokenCode = aws.String(code) + } else { + return credentials.Value{ProviderName: ProviderName}, + awserr.New("AssumeRoleTokenNotAvailable", + "assume role with MFA enabled, but neither TokenCode nor TokenProvider are set", nil) + } + } + + roleOutput, err := p.Client.AssumeRole(input) + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + // We will proactively generate new credentials before they expire. + p.SetExpiration(*roleOutput.Credentials.Expiration, p.ExpiryWindow) + + return credentials.Value{ + AccessKeyID: *roleOutput.Credentials.AccessKeyId, + SecretAccessKey: *roleOutput.Credentials.SecretAccessKey, + SessionToken: *roleOutput.Credentials.SessionToken, + ProviderName: ProviderName, + }, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go new file mode 100644 index 000000000..152d785b3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go @@ -0,0 +1,46 @@ +// Package csm provides Client Side Monitoring (CSM) which enables sending metrics +// via UDP connection. Using the Start function will enable the reporting of +// metrics on a given port. If Start is called, with different parameters, again, +// a panic will occur. +// +// Pause can be called to pause any metrics publishing on a given port. Sessions +// that have had their handlers modified via InjectHandlers may still be used. +// However, the handlers will act as a no-op meaning no metrics will be published. +// +// Example: +// r, err := csm.Start("clientID", ":31000") +// if err != nil { +// panic(fmt.Errorf("failed starting CSM: %v", err)) +// } +// +// sess, err := session.NewSession(&aws.Config{}) +// if err != nil { +// panic(fmt.Errorf("failed loading session: %v", err)) +// } +// +// r.InjectHandlers(&sess.Handlers) +// +// client := s3.New(sess) +// resp, err := client.GetObject(&s3.GetObjectInput{ +// Bucket: aws.String("bucket"), +// Key: aws.String("key"), +// }) +// +// // Will pause monitoring +// r.Pause() +// resp, err = client.GetObject(&s3.GetObjectInput{ +// Bucket: aws.String("bucket"), +// Key: aws.String("key"), +// }) +// +// // Resume monitoring +// r.Continue() +// +// Start returns a Reporter that is used to enable or disable monitoring. If +// access to the Reporter is required later, calling Get will return the Reporter +// singleton. +// +// Example: +// r := csm.Get() +// r.Continue() +package csm diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go new file mode 100644 index 000000000..2f0c6eac9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go @@ -0,0 +1,67 @@ +package csm + +import ( + "fmt" + "sync" +) + +var ( + lock sync.Mutex +) + +// Client side metric handler names +const ( + APICallMetricHandlerName = "awscsm.SendAPICallMetric" + APICallAttemptMetricHandlerName = "awscsm.SendAPICallAttemptMetric" +) + +// Start will start the a long running go routine to capture +// client side metrics. Calling start multiple time will only +// start the metric listener once and will panic if a different +// client ID or port is passed in. +// +// Example: +// r, err := csm.Start("clientID", "127.0.0.1:8094") +// if err != nil { +// panic(fmt.Errorf("expected no error, but received %v", err)) +// } +// sess := session.NewSession() +// r.InjectHandlers(sess.Handlers) +// +// svc := s3.New(sess) +// out, err := svc.GetObject(&s3.GetObjectInput{ +// Bucket: aws.String("bucket"), +// Key: aws.String("key"), +// }) +func Start(clientID string, url string) (*Reporter, error) { + lock.Lock() + defer lock.Unlock() + + if sender == nil { + sender = newReporter(clientID, url) + } else { + if sender.clientID != clientID { + panic(fmt.Errorf("inconsistent client IDs. %q was expected, but received %q", sender.clientID, clientID)) + } + + if sender.url != url { + panic(fmt.Errorf("inconsistent URLs. %q was expected, but received %q", sender.url, url)) + } + } + + if err := connect(url); err != nil { + sender = nil + return nil, err + } + + return sender, nil +} + +// Get will return a reporter if one exists, if one does not exist, nil will +// be returned. +func Get() *Reporter { + lock.Lock() + defer lock.Unlock() + + return sender +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go new file mode 100644 index 000000000..6f57024d7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go @@ -0,0 +1,53 @@ +package csm + +import ( + "strconv" + "time" +) + +type metricTime time.Time + +func (t metricTime) MarshalJSON() ([]byte, error) { + ns := time.Duration(time.Time(t).UnixNano()) + return []byte(strconv.FormatInt(int64(ns/time.Millisecond), 10)), nil +} + +type metric struct { + ClientID *string `json:"ClientId,omitempty"` + API *string `json:"Api,omitempty"` + Service *string `json:"Service,omitempty"` + Timestamp *metricTime `json:"Timestamp,omitempty"` + Type *string `json:"Type,omitempty"` + Version *int `json:"Version,omitempty"` + + AttemptCount *int `json:"AttemptCount,omitempty"` + Latency *int `json:"Latency,omitempty"` + + Fqdn *string `json:"Fqdn,omitempty"` + UserAgent *string `json:"UserAgent,omitempty"` + AttemptLatency *int `json:"AttemptLatency,omitempty"` + + SessionToken *string `json:"SessionToken,omitempty"` + Region *string `json:"Region,omitempty"` + AccessKey *string `json:"AccessKey,omitempty"` + HTTPStatusCode *int `json:"HttpStatusCode,omitempty"` + XAmzID2 *string `json:"XAmzId2,omitempty"` + XAmzRequestID *string `json:"XAmznRequestId,omitempty"` + + AWSException *string `json:"AwsException,omitempty"` + AWSExceptionMessage *string `json:"AwsExceptionMessage,omitempty"` + SDKException *string `json:"SdkException,omitempty"` + SDKExceptionMessage *string `json:"SdkExceptionMessage,omitempty"` + + DestinationIP *string `json:"DestinationIp,omitempty"` + ConnectionReused *int `json:"ConnectionReused,omitempty"` + + AcquireConnectionLatency *int `json:"AcquireConnectionLatency,omitempty"` + ConnectLatency *int `json:"ConnectLatency,omitempty"` + RequestLatency *int `json:"RequestLatency,omitempty"` + DNSLatency *int `json:"DnsLatency,omitempty"` + TCPLatency *int `json:"TcpLatency,omitempty"` + SSLLatency *int `json:"SslLatency,omitempty"` + + MaxRetriesExceeded *int `json:"MaxRetriesExceeded,omitempty"` +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go new file mode 100644 index 000000000..514fc3739 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go @@ -0,0 +1,54 @@ +package csm + +import ( + "sync/atomic" +) + +const ( + runningEnum = iota + pausedEnum +) + +var ( + // MetricsChannelSize of metrics to hold in the channel + MetricsChannelSize = 100 +) + +type metricChan struct { + ch chan metric + paused int64 +} + +func newMetricChan(size int) metricChan { + return metricChan{ + ch: make(chan metric, size), + } +} + +func (ch *metricChan) Pause() { + atomic.StoreInt64(&ch.paused, pausedEnum) +} + +func (ch *metricChan) Continue() { + atomic.StoreInt64(&ch.paused, runningEnum) +} + +func (ch *metricChan) IsPaused() bool { + v := atomic.LoadInt64(&ch.paused) + return v == pausedEnum +} + +// Push will push metrics to the metric channel if the channel +// is not paused +func (ch *metricChan) Push(m metric) bool { + if ch.IsPaused() { + return false + } + + select { + case ch.ch <- m: + return true + default: + return false + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go new file mode 100644 index 000000000..118618442 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go @@ -0,0 +1,242 @@ +package csm + +import ( + "encoding/json" + "net" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +const ( + // DefaultPort is used when no port is specified + DefaultPort = "31000" +) + +// Reporter will gather metrics of API requests made and +// send those metrics to the CSM endpoint. +type Reporter struct { + clientID string + url string + conn net.Conn + metricsCh metricChan + done chan struct{} +} + +var ( + sender *Reporter +) + +func connect(url string) error { + const network = "udp" + if err := sender.connect(network, url); err != nil { + return err + } + + if sender.done == nil { + sender.done = make(chan struct{}) + go sender.start() + } + + return nil +} + +func newReporter(clientID, url string) *Reporter { + return &Reporter{ + clientID: clientID, + url: url, + metricsCh: newMetricChan(MetricsChannelSize), + } +} + +func (rep *Reporter) sendAPICallAttemptMetric(r *request.Request) { + if rep == nil { + return + } + + now := time.Now() + creds, _ := r.Config.Credentials.Get() + + m := metric{ + ClientID: aws.String(rep.clientID), + API: aws.String(r.Operation.Name), + Service: aws.String(r.ClientInfo.ServiceID), + Timestamp: (*metricTime)(&now), + UserAgent: aws.String(r.HTTPRequest.Header.Get("User-Agent")), + Region: r.Config.Region, + Type: aws.String("ApiCallAttempt"), + Version: aws.Int(1), + + XAmzRequestID: aws.String(r.RequestID), + + AttemptCount: aws.Int(r.RetryCount + 1), + AttemptLatency: aws.Int(int(now.Sub(r.AttemptTime).Nanoseconds() / int64(time.Millisecond))), + AccessKey: aws.String(creds.AccessKeyID), + } + + if r.HTTPResponse != nil { + m.HTTPStatusCode = aws.Int(r.HTTPResponse.StatusCode) + } + + if r.Error != nil { + if awserr, ok := r.Error.(awserr.Error); ok { + setError(&m, awserr) + } + } + + rep.metricsCh.Push(m) +} + +func setError(m *metric, err awserr.Error) { + msg := err.Error() + code := err.Code() + + switch code { + case "RequestError", + "SerializationError", + request.CanceledErrorCode: + m.SDKException = &code + m.SDKExceptionMessage = &msg + default: + m.AWSException = &code + m.AWSExceptionMessage = &msg + } +} + +func (rep *Reporter) sendAPICallMetric(r *request.Request) { + if rep == nil { + return + } + + now := time.Now() + m := metric{ + ClientID: aws.String(rep.clientID), + API: aws.String(r.Operation.Name), + Service: aws.String(r.ClientInfo.ServiceID), + Timestamp: (*metricTime)(&now), + Type: aws.String("ApiCall"), + AttemptCount: aws.Int(r.RetryCount + 1), + Region: r.Config.Region, + Latency: aws.Int(int(time.Now().Sub(r.Time) / time.Millisecond)), + XAmzRequestID: aws.String(r.RequestID), + MaxRetriesExceeded: aws.Int(boolIntValue(r.RetryCount >= r.MaxRetries())), + } + + // TODO: Probably want to figure something out for logging dropped + // metrics + rep.metricsCh.Push(m) +} + +func (rep *Reporter) connect(network, url string) error { + if rep.conn != nil { + rep.conn.Close() + } + + conn, err := net.Dial(network, url) + if err != nil { + return awserr.New("UDPError", "Could not connect", err) + } + + rep.conn = conn + + return nil +} + +func (rep *Reporter) close() { + if rep.done != nil { + close(rep.done) + } + + rep.metricsCh.Pause() +} + +func (rep *Reporter) start() { + defer func() { + rep.metricsCh.Pause() + }() + + for { + select { + case <-rep.done: + rep.done = nil + return + case m := <-rep.metricsCh.ch: + // TODO: What to do with this error? Probably should just log + b, err := json.Marshal(m) + if err != nil { + continue + } + + rep.conn.Write(b) + } + } +} + +// Pause will pause the metric channel preventing any new metrics from +// being added. +func (rep *Reporter) Pause() { + lock.Lock() + defer lock.Unlock() + + if rep == nil { + return + } + + rep.close() +} + +// Continue will reopen the metric channel and allow for monitoring +// to be resumed. +func (rep *Reporter) Continue() { + lock.Lock() + defer lock.Unlock() + if rep == nil { + return + } + + if !rep.metricsCh.IsPaused() { + return + } + + rep.metricsCh.Continue() +} + +// InjectHandlers will will enable client side metrics and inject the proper +// handlers to handle how metrics are sent. +// +// Example: +// // Start must be called in order to inject the correct handlers +// r, err := csm.Start("clientID", "127.0.0.1:8094") +// if err != nil { +// panic(fmt.Errorf("expected no error, but received %v", err)) +// } +// +// sess := session.NewSession() +// r.InjectHandlers(&sess.Handlers) +// +// // create a new service client with our client side metric session +// svc := s3.New(sess) +func (rep *Reporter) InjectHandlers(handlers *request.Handlers) { + if rep == nil { + return + } + + apiCallHandler := request.NamedHandler{Name: APICallMetricHandlerName, Fn: rep.sendAPICallMetric} + apiCallAttemptHandler := request.NamedHandler{Name: APICallAttemptMetricHandlerName, Fn: rep.sendAPICallAttemptMetric} + + handlers.Complete.PushFrontNamed(apiCallHandler) + handlers.Complete.PushFrontNamed(apiCallAttemptHandler) + + handlers.AfterRetry.PushFrontNamed(apiCallAttemptHandler) +} + +// boolIntValue return 1 for true and 0 for false. +func boolIntValue(b bool) int { + if b { + return 1 + } + + return 0 +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go new file mode 100644 index 000000000..6cd84cd96 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go @@ -0,0 +1,207 @@ +// Package defaults is a collection of helpers to retrieve the SDK's default +// configuration and handlers. +// +// Generally this package shouldn't be used directly, but session.Session +// instead. This package is useful when you need to reset the defaults +// of a session or service client to the SDK defaults before setting +// additional parameters. +package defaults + +import ( + "fmt" + "net" + "net/http" + "net/url" + "os" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" + "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/aws/aws-sdk-go/aws/request" +) + +// A Defaults provides a collection of default values for SDK clients. +type Defaults struct { + Config *aws.Config + Handlers request.Handlers +} + +// Get returns the SDK's default values with Config and handlers pre-configured. +func Get() Defaults { + cfg := Config() + handlers := Handlers() + cfg.Credentials = CredChain(cfg, handlers) + + return Defaults{ + Config: cfg, + Handlers: handlers, + } +} + +// Config returns the default configuration without credentials. +// To retrieve a config with credentials also included use +// `defaults.Get().Config` instead. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the configuration of an +// existing service client or session. +func Config() *aws.Config { + return aws.NewConfig(). + WithCredentials(credentials.AnonymousCredentials). + WithRegion(os.Getenv("AWS_REGION")). + WithHTTPClient(http.DefaultClient). + WithMaxRetries(aws.UseServiceDefaultRetries). + WithLogger(aws.NewDefaultLogger()). + WithLogLevel(aws.LogOff). + WithEndpointResolver(endpoints.DefaultResolver()) +} + +// Handlers returns the default request handlers. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the request handlers of an +// existing service client or session. +func Handlers() request.Handlers { + var handlers request.Handlers + + handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler) + handlers.Validate.AfterEachFn = request.HandlerListStopOnError + handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler) + handlers.Build.PushBackNamed(corehandlers.AddHostExecEnvUserAgentHander) + handlers.Build.AfterEachFn = request.HandlerListStopOnError + handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler) + handlers.Send.PushBackNamed(corehandlers.ValidateReqSigHandler) + handlers.Send.PushBackNamed(corehandlers.SendHandler) + handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler) + handlers.ValidateResponse.PushBackNamed(corehandlers.ValidateResponseHandler) + + return handlers +} + +// CredChain returns the default credential chain. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the credentials of an +// existing service client or session's Config. +func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credentials { + return credentials.NewCredentials(&credentials.ChainProvider{ + VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), + Providers: CredProviders(cfg, handlers), + }) +} + +// CredProviders returns the slice of providers used in +// the default credential chain. +// +// For applications that need to use some other provider (for example use +// different environment variables for legacy reasons) but still fall back +// on the default chain of providers. This allows that default chaint to be +// automatically updated +func CredProviders(cfg *aws.Config, handlers request.Handlers) []credentials.Provider { + return []credentials.Provider{ + &credentials.EnvProvider{}, + &credentials.SharedCredentialsProvider{Filename: "", Profile: ""}, + RemoteCredProvider(*cfg, handlers), + } +} + +const ( + httpProviderAuthorizationEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN" + httpProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI" + ecsCredsProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI" +) + +// RemoteCredProvider returns a credentials provider for the default remote +// endpoints such as EC2 or ECS Roles. +func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider { + if u := os.Getenv(httpProviderEnvVar); len(u) > 0 { + return localHTTPCredProvider(cfg, handlers, u) + } + + if uri := os.Getenv(ecsCredsProviderEnvVar); len(uri) > 0 { + u := fmt.Sprintf("http://169.254.170.2%s", uri) + return httpCredProvider(cfg, handlers, u) + } + + return ec2RoleProvider(cfg, handlers) +} + +var lookupHostFn = net.LookupHost + +func isLoopbackHost(host string) (bool, error) { + ip := net.ParseIP(host) + if ip != nil { + return ip.IsLoopback(), nil + } + + // Host is not an ip, perform lookup + addrs, err := lookupHostFn(host) + if err != nil { + return false, err + } + for _, addr := range addrs { + if !net.ParseIP(addr).IsLoopback() { + return false, nil + } + } + + return true, nil +} + +func localHTTPCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider { + var errMsg string + + parsed, err := url.Parse(u) + if err != nil { + errMsg = fmt.Sprintf("invalid URL, %v", err) + } else { + host := aws.URLHostname(parsed) + if len(host) == 0 { + errMsg = "unable to parse host from local HTTP cred provider URL" + } else if isLoopback, loopbackErr := isLoopbackHost(host); loopbackErr != nil { + errMsg = fmt.Sprintf("failed to resolve host %q, %v", host, loopbackErr) + } else if !isLoopback { + errMsg = fmt.Sprintf("invalid endpoint host, %q, only loopback hosts are allowed.", host) + } + } + + if len(errMsg) > 0 { + if cfg.Logger != nil { + cfg.Logger.Log("Ignoring, HTTP credential provider", errMsg, err) + } + return credentials.ErrorProvider{ + Err: awserr.New("CredentialsEndpointError", errMsg, err), + ProviderName: endpointcreds.ProviderName, + } + } + + return httpCredProvider(cfg, handlers, u) +} + +func httpCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider { + return endpointcreds.NewProviderClient(cfg, handlers, u, + func(p *endpointcreds.Provider) { + p.ExpiryWindow = 5 * time.Minute + p.AuthorizationToken = os.Getenv(httpProviderAuthorizationEnvVar) + }, + ) +} + +func ec2RoleProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider { + resolver := cfg.EndpointResolver + if resolver == nil { + resolver = endpoints.DefaultResolver() + } + + e, _ := resolver.EndpointFor(endpoints.Ec2metadataServiceID, "") + return &ec2rolecreds.EC2RoleProvider{ + Client: ec2metadata.NewClient(cfg, handlers, e.URL, e.SigningRegion), + ExpiryWindow: 5 * time.Minute, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go new file mode 100644 index 000000000..ca0ee1dcc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go @@ -0,0 +1,27 @@ +package defaults + +import ( + "github.com/aws/aws-sdk-go/internal/shareddefaults" +) + +// SharedCredentialsFilename returns the SDK's default file path +// for the shared credentials file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/credentials +// - Windows: %USERPROFILE%\.aws\credentials +func SharedCredentialsFilename() string { + return shareddefaults.SharedCredentialsFilename() +} + +// SharedConfigFilename returns the SDK's default file path for +// the shared config file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/config +// - Windows: %USERPROFILE%\.aws\config +func SharedConfigFilename() string { + return shareddefaults.SharedConfigFilename() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/doc.go new file mode 100644 index 000000000..4fcb61618 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/doc.go @@ -0,0 +1,56 @@ +// Package aws provides the core SDK's utilities and shared types. Use this package's +// utilities to simplify setting and reading API operations parameters. +// +// Value and Pointer Conversion Utilities +// +// This package includes a helper conversion utility for each scalar type the SDK's +// API use. These utilities make getting a pointer of the scalar, and dereferencing +// a pointer easier. +// +// Each conversion utility comes in two forms. Value to Pointer and Pointer to Value. +// The Pointer to value will safely dereference the pointer and return its value. +// If the pointer was nil, the scalar's zero value will be returned. +// +// The value to pointer functions will be named after the scalar type. So get a +// *string from a string value use the "String" function. This makes it easy to +// to get pointer of a literal string value, because getting the address of a +// literal requires assigning the value to a variable first. +// +// var strPtr *string +// +// // Without the SDK's conversion functions +// str := "my string" +// strPtr = &str +// +// // With the SDK's conversion functions +// strPtr = aws.String("my string") +// +// // Convert *string to string value +// str = aws.StringValue(strPtr) +// +// In addition to scalars the aws package also includes conversion utilities for +// map and slice for commonly types used in API parameters. The map and slice +// conversion functions use similar naming pattern as the scalar conversion +// functions. +// +// var strPtrs []*string +// var strs []string = []string{"Go", "Gophers", "Go"} +// +// // Convert []string to []*string +// strPtrs = aws.StringSlice(strs) +// +// // Convert []*string to []string +// strs = aws.StringValueSlice(strPtrs) +// +// SDK Default HTTP Client +// +// The SDK will use the http.DefaultClient if a HTTP client is not provided to +// the SDK's Session, or service client constructor. This means that if the +// http.DefaultClient is modified by other components of your application the +// modifications will be picked up by the SDK as well. +// +// In some cases this might be intended, but it is a better practice to create +// a custom HTTP Client to share explicitly through your application. You can +// configure the SDK to use the custom HTTP Client by setting the HTTPClient +// value of the SDK's Config type when creating a Session or service client. +package aws diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go new file mode 100644 index 000000000..c215cd3f5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go @@ -0,0 +1,162 @@ +package ec2metadata + +import ( + "encoding/json" + "fmt" + "net/http" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkuri" +) + +// GetMetadata uses the path provided to request information from the EC2 +// instance metdata service. The content will be returned as a string, or +// error if the request failed. +func (c *EC2Metadata) GetMetadata(p string) (string, error) { + op := &request.Operation{ + Name: "GetMetadata", + HTTPMethod: "GET", + HTTPPath: sdkuri.PathJoin("/meta-data", p), + } + + output := &metadataOutput{} + req := c.NewRequest(op, nil, output) + + return output.Content, req.Send() +} + +// GetUserData returns the userdata that was configured for the service. If +// there is no user-data setup for the EC2 instance a "NotFoundError" error +// code will be returned. +func (c *EC2Metadata) GetUserData() (string, error) { + op := &request.Operation{ + Name: "GetUserData", + HTTPMethod: "GET", + HTTPPath: "/user-data", + } + + output := &metadataOutput{} + req := c.NewRequest(op, nil, output) + req.Handlers.UnmarshalError.PushBack(func(r *request.Request) { + if r.HTTPResponse.StatusCode == http.StatusNotFound { + r.Error = awserr.New("NotFoundError", "user-data not found", r.Error) + } + }) + + return output.Content, req.Send() +} + +// GetDynamicData uses the path provided to request information from the EC2 +// instance metadata service for dynamic data. The content will be returned +// as a string, or error if the request failed. +func (c *EC2Metadata) GetDynamicData(p string) (string, error) { + op := &request.Operation{ + Name: "GetDynamicData", + HTTPMethod: "GET", + HTTPPath: sdkuri.PathJoin("/dynamic", p), + } + + output := &metadataOutput{} + req := c.NewRequest(op, nil, output) + + return output.Content, req.Send() +} + +// GetInstanceIdentityDocument retrieves an identity document describing an +// instance. Error is returned if the request fails or is unable to parse +// the response. +func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument, error) { + resp, err := c.GetDynamicData("instance-identity/document") + if err != nil { + return EC2InstanceIdentityDocument{}, + awserr.New("EC2MetadataRequestError", + "failed to get EC2 instance identity document", err) + } + + doc := EC2InstanceIdentityDocument{} + if err := json.NewDecoder(strings.NewReader(resp)).Decode(&doc); err != nil { + return EC2InstanceIdentityDocument{}, + awserr.New("SerializationError", + "failed to decode EC2 instance identity document", err) + } + + return doc, nil +} + +// IAMInfo retrieves IAM info from the metadata API +func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) { + resp, err := c.GetMetadata("iam/info") + if err != nil { + return EC2IAMInfo{}, + awserr.New("EC2MetadataRequestError", + "failed to get EC2 IAM info", err) + } + + info := EC2IAMInfo{} + if err := json.NewDecoder(strings.NewReader(resp)).Decode(&info); err != nil { + return EC2IAMInfo{}, + awserr.New("SerializationError", + "failed to decode EC2 IAM info", err) + } + + if info.Code != "Success" { + errMsg := fmt.Sprintf("failed to get EC2 IAM Info (%s)", info.Code) + return EC2IAMInfo{}, + awserr.New("EC2MetadataError", errMsg, nil) + } + + return info, nil +} + +// Region returns the region the instance is running in. +func (c *EC2Metadata) Region() (string, error) { + resp, err := c.GetMetadata("placement/availability-zone") + if err != nil { + return "", err + } + + // returns region without the suffix. Eg: us-west-2a becomes us-west-2 + return resp[:len(resp)-1], nil +} + +// Available returns if the application has access to the EC2 Metadata service. +// Can be used to determine if application is running within an EC2 Instance and +// the metadata service is available. +func (c *EC2Metadata) Available() bool { + if _, err := c.GetMetadata("instance-id"); err != nil { + return false + } + + return true +} + +// An EC2IAMInfo provides the shape for unmarshaling +// an IAM info from the metadata API +type EC2IAMInfo struct { + Code string + LastUpdated time.Time + InstanceProfileArn string + InstanceProfileID string +} + +// An EC2InstanceIdentityDocument provides the shape for unmarshaling +// an instance identity document +type EC2InstanceIdentityDocument struct { + DevpayProductCodes []string `json:"devpayProductCodes"` + AvailabilityZone string `json:"availabilityZone"` + PrivateIP string `json:"privateIp"` + Version string `json:"version"` + Region string `json:"region"` + InstanceID string `json:"instanceId"` + BillingProducts []string `json:"billingProducts"` + InstanceType string `json:"instanceType"` + AccountID string `json:"accountId"` + PendingTime time.Time `json:"pendingTime"` + ImageID string `json:"imageId"` + KernelID string `json:"kernelId"` + RamdiskID string `json:"ramdiskId"` + Architecture string `json:"architecture"` +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go new file mode 100644 index 000000000..53457cac3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go @@ -0,0 +1,149 @@ +// Package ec2metadata provides the client for making API calls to the +// EC2 Metadata service. +// +// This package's client can be disabled completely by setting the environment +// variable "AWS_EC2_METADATA_DISABLED=true". This environment variable set to +// true instructs the SDK to disable the EC2 Metadata client. The client cannot +// be used while the environemnt variable is set to true, (case insensitive). +package ec2metadata + +import ( + "bytes" + "errors" + "io" + "net/http" + "os" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/request" +) + +// ServiceName is the name of the service. +const ServiceName = "ec2metadata" +const disableServiceEnvVar = "AWS_EC2_METADATA_DISABLED" + +// A EC2Metadata is an EC2 Metadata service Client. +type EC2Metadata struct { + *client.Client +} + +// New creates a new instance of the EC2Metadata client with a session. +// This client is safe to use across multiple goroutines. +// +// +// Example: +// // Create a EC2Metadata client from just a session. +// svc := ec2metadata.New(mySession) +// +// // Create a EC2Metadata client with additional configuration +// svc := ec2metadata.New(mySession, aws.NewConfig().WithLogLevel(aws.LogDebugHTTPBody)) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2Metadata { + c := p.ClientConfig(ServiceName, cfgs...) + return NewClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// NewClient returns a new EC2Metadata client. Should be used to create +// a client when not using a session. Generally using just New with a session +// is preferred. +// +// If an unmodified HTTP client is provided from the stdlib default, or no client +// the EC2RoleProvider's EC2Metadata HTTP client's timeout will be shortened. +// To disable this set Config.EC2MetadataDisableTimeoutOverride to false. Enabled by default. +func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string, opts ...func(*client.Client)) *EC2Metadata { + if !aws.BoolValue(cfg.EC2MetadataDisableTimeoutOverride) && httpClientZero(cfg.HTTPClient) { + // If the http client is unmodified and this feature is not disabled + // set custom timeouts for EC2Metadata requests. + cfg.HTTPClient = &http.Client{ + // use a shorter timeout than default because the metadata + // service is local if it is running, and to fail faster + // if not running on an ec2 instance. + Timeout: 5 * time.Second, + } + } + + svc := &EC2Metadata{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + ServiceID: ServiceName, + Endpoint: endpoint, + APIVersion: "latest", + }, + handlers, + ), + } + + svc.Handlers.Unmarshal.PushBack(unmarshalHandler) + svc.Handlers.UnmarshalError.PushBack(unmarshalError) + svc.Handlers.Validate.Clear() + svc.Handlers.Validate.PushBack(validateEndpointHandler) + + // Disable the EC2 Metadata service if the environment variable is set. + // This shortcirctes the service's functionality to always fail to send + // requests. + if strings.ToLower(os.Getenv(disableServiceEnvVar)) == "true" { + svc.Handlers.Send.SwapNamed(request.NamedHandler{ + Name: corehandlers.SendHandler.Name, + Fn: func(r *request.Request) { + r.Error = awserr.New( + request.CanceledErrorCode, + "EC2 IMDS access disabled via "+disableServiceEnvVar+" env var", + nil) + }, + }) + } + + // Add additional options to the service config + for _, option := range opts { + option(svc.Client) + } + + return svc +} + +func httpClientZero(c *http.Client) bool { + return c == nil || (c.Transport == nil && c.CheckRedirect == nil && c.Jar == nil && c.Timeout == 0) +} + +type metadataOutput struct { + Content string +} + +func unmarshalHandler(r *request.Request) { + defer r.HTTPResponse.Body.Close() + b := &bytes.Buffer{} + if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil { + r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata respose", err) + return + } + + if data, ok := r.Data.(*metadataOutput); ok { + data.Content = b.String() + } +} + +func unmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + b := &bytes.Buffer{} + if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil { + r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata error respose", err) + return + } + + // Response body format is not consistent between metadata endpoints. + // Grab the error message as a string and include that as the source error + r.Error = awserr.New("EC2MetadataError", "failed to make EC2Metadata request", errors.New(b.String())) +} + +func validateEndpointHandler(r *request.Request) { + if r.ClientInfo.Endpoint == "" { + r.Error = aws.ErrMissingEndpoint + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go new file mode 100644 index 000000000..c04ba06c5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go @@ -0,0 +1,155 @@ +package endpoints + +import ( + "encoding/json" + "fmt" + "io" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +type modelDefinition map[string]json.RawMessage + +// A DecodeModelOptions are the options for how the endpoints model definition +// are decoded. +type DecodeModelOptions struct { + SkipCustomizations bool +} + +// Set combines all of the option functions together. +func (d *DecodeModelOptions) Set(optFns ...func(*DecodeModelOptions)) { + for _, fn := range optFns { + fn(d) + } +} + +// DecodeModel unmarshals a Regions and Endpoint model definition file into +// a endpoint Resolver. If the file format is not supported, or an error occurs +// when unmarshaling the model an error will be returned. +// +// Casting the return value of this func to a EnumPartitions will +// allow you to get a list of the partitions in the order the endpoints +// will be resolved in. +// +// resolver, err := endpoints.DecodeModel(reader) +// +// partitions := resolver.(endpoints.EnumPartitions).Partitions() +// for _, p := range partitions { +// // ... inspect partitions +// } +func DecodeModel(r io.Reader, optFns ...func(*DecodeModelOptions)) (Resolver, error) { + var opts DecodeModelOptions + opts.Set(optFns...) + + // Get the version of the partition file to determine what + // unmarshaling model to use. + modelDef := modelDefinition{} + if err := json.NewDecoder(r).Decode(&modelDef); err != nil { + return nil, newDecodeModelError("failed to decode endpoints model", err) + } + + var version string + if b, ok := modelDef["version"]; ok { + version = string(b) + } else { + return nil, newDecodeModelError("endpoints version not found in model", nil) + } + + if version == "3" { + return decodeV3Endpoints(modelDef, opts) + } + + return nil, newDecodeModelError( + fmt.Sprintf("endpoints version %s, not supported", version), nil) +} + +func decodeV3Endpoints(modelDef modelDefinition, opts DecodeModelOptions) (Resolver, error) { + b, ok := modelDef["partitions"] + if !ok { + return nil, newDecodeModelError("endpoints model missing partitions", nil) + } + + ps := partitions{} + if err := json.Unmarshal(b, &ps); err != nil { + return nil, newDecodeModelError("failed to decode endpoints model", err) + } + + if opts.SkipCustomizations { + return ps, nil + } + + // Customization + for i := 0; i < len(ps); i++ { + p := &ps[i] + custAddEC2Metadata(p) + custAddS3DualStack(p) + custRmIotDataService(p) + custFixAppAutoscalingChina(p) + } + + return ps, nil +} + +func custAddS3DualStack(p *partition) { + if p.ID != "aws" { + return + } + + s, ok := p.Services["s3"] + if !ok { + return + } + + s.Defaults.HasDualStack = boxedTrue + s.Defaults.DualStackHostname = "{service}.dualstack.{region}.{dnsSuffix}" + + p.Services["s3"] = s +} + +func custAddEC2Metadata(p *partition) { + p.Services["ec2metadata"] = service{ + IsRegionalized: boxedFalse, + PartitionEndpoint: "aws-global", + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + } +} + +func custRmIotDataService(p *partition) { + delete(p.Services, "data.iot") +} + +func custFixAppAutoscalingChina(p *partition) { + if p.ID != "aws-cn" { + return + } + + const serviceName = "application-autoscaling" + s, ok := p.Services[serviceName] + if !ok { + return + } + + const expectHostname = `autoscaling.{region}.amazonaws.com` + if e, a := s.Defaults.Hostname, expectHostname; e != a { + fmt.Printf("custFixAppAutoscalingChina: ignoring customization, expected %s, got %s\n", e, a) + return + } + + s.Defaults.Hostname = expectHostname + ".cn" + p.Services[serviceName] = s +} + +type decodeModelError struct { + awsError +} + +func newDecodeModelError(msg string, err error) decodeModelError { + return decodeModelError{ + awsError: awserr.New("DecodeEndpointsModelError", msg, err), + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go new file mode 100644 index 000000000..4bd182ab2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -0,0 +1,3328 @@ +// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT. + +package endpoints + +import ( + "regexp" +) + +// Partition identifiers +const ( + AwsPartitionID = "aws" // AWS Standard partition. + AwsCnPartitionID = "aws-cn" // AWS China partition. + AwsUsGovPartitionID = "aws-us-gov" // AWS GovCloud (US) partition. +) + +// AWS Standard partition's regions. +const ( + ApNortheast1RegionID = "ap-northeast-1" // Asia Pacific (Tokyo). + ApNortheast2RegionID = "ap-northeast-2" // Asia Pacific (Seoul). + ApSouth1RegionID = "ap-south-1" // Asia Pacific (Mumbai). + ApSoutheast1RegionID = "ap-southeast-1" // Asia Pacific (Singapore). + ApSoutheast2RegionID = "ap-southeast-2" // Asia Pacific (Sydney). + CaCentral1RegionID = "ca-central-1" // Canada (Central). + EuCentral1RegionID = "eu-central-1" // EU (Frankfurt). + EuWest1RegionID = "eu-west-1" // EU (Ireland). + EuWest2RegionID = "eu-west-2" // EU (London). + EuWest3RegionID = "eu-west-3" // EU (Paris). + SaEast1RegionID = "sa-east-1" // South America (Sao Paulo). + UsEast1RegionID = "us-east-1" // US East (N. Virginia). + UsEast2RegionID = "us-east-2" // US East (Ohio). + UsWest1RegionID = "us-west-1" // US West (N. California). + UsWest2RegionID = "us-west-2" // US West (Oregon). +) + +// AWS China partition's regions. +const ( + CnNorth1RegionID = "cn-north-1" // China (Beijing). + CnNorthwest1RegionID = "cn-northwest-1" // China (Ningxia). +) + +// AWS GovCloud (US) partition's regions. +const ( + UsGovWest1RegionID = "us-gov-west-1" // AWS GovCloud (US). +) + +// Service identifiers +const ( + A4bServiceID = "a4b" // A4b. + AcmServiceID = "acm" // Acm. + AcmPcaServiceID = "acm-pca" // AcmPca. + ApiMediatailorServiceID = "api.mediatailor" // ApiMediatailor. + ApiPricingServiceID = "api.pricing" // ApiPricing. + ApiSagemakerServiceID = "api.sagemaker" // ApiSagemaker. + ApigatewayServiceID = "apigateway" // Apigateway. + ApplicationAutoscalingServiceID = "application-autoscaling" // ApplicationAutoscaling. + Appstream2ServiceID = "appstream2" // Appstream2. + AthenaServiceID = "athena" // Athena. + AutoscalingServiceID = "autoscaling" // Autoscaling. + AutoscalingPlansServiceID = "autoscaling-plans" // AutoscalingPlans. + BatchServiceID = "batch" // Batch. + BudgetsServiceID = "budgets" // Budgets. + CeServiceID = "ce" // Ce. + Cloud9ServiceID = "cloud9" // Cloud9. + ClouddirectoryServiceID = "clouddirectory" // Clouddirectory. + CloudformationServiceID = "cloudformation" // Cloudformation. + CloudfrontServiceID = "cloudfront" // Cloudfront. + CloudhsmServiceID = "cloudhsm" // Cloudhsm. + Cloudhsmv2ServiceID = "cloudhsmv2" // Cloudhsmv2. + CloudsearchServiceID = "cloudsearch" // Cloudsearch. + CloudtrailServiceID = "cloudtrail" // Cloudtrail. + CodebuildServiceID = "codebuild" // Codebuild. + CodecommitServiceID = "codecommit" // Codecommit. + CodedeployServiceID = "codedeploy" // Codedeploy. + CodepipelineServiceID = "codepipeline" // Codepipeline. + CodestarServiceID = "codestar" // Codestar. + CognitoIdentityServiceID = "cognito-identity" // CognitoIdentity. + CognitoIdpServiceID = "cognito-idp" // CognitoIdp. + CognitoSyncServiceID = "cognito-sync" // CognitoSync. + ComprehendServiceID = "comprehend" // Comprehend. + ConfigServiceID = "config" // Config. + CurServiceID = "cur" // Cur. + DatapipelineServiceID = "datapipeline" // Datapipeline. + DaxServiceID = "dax" // Dax. + DevicefarmServiceID = "devicefarm" // Devicefarm. + DirectconnectServiceID = "directconnect" // Directconnect. + DiscoveryServiceID = "discovery" // Discovery. + DmsServiceID = "dms" // Dms. + DsServiceID = "ds" // Ds. + DynamodbServiceID = "dynamodb" // Dynamodb. + Ec2ServiceID = "ec2" // Ec2. + Ec2metadataServiceID = "ec2metadata" // Ec2metadata. + EcrServiceID = "ecr" // Ecr. + EcsServiceID = "ecs" // Ecs. + ElasticacheServiceID = "elasticache" // Elasticache. + ElasticbeanstalkServiceID = "elasticbeanstalk" // Elasticbeanstalk. + ElasticfilesystemServiceID = "elasticfilesystem" // Elasticfilesystem. + ElasticloadbalancingServiceID = "elasticloadbalancing" // Elasticloadbalancing. + ElasticmapreduceServiceID = "elasticmapreduce" // Elasticmapreduce. + ElastictranscoderServiceID = "elastictranscoder" // Elastictranscoder. + EmailServiceID = "email" // Email. + EntitlementMarketplaceServiceID = "entitlement.marketplace" // EntitlementMarketplace. + EsServiceID = "es" // Es. + EventsServiceID = "events" // Events. + FirehoseServiceID = "firehose" // Firehose. + FmsServiceID = "fms" // Fms. + GameliftServiceID = "gamelift" // Gamelift. + GlacierServiceID = "glacier" // Glacier. + GlueServiceID = "glue" // Glue. + GreengrassServiceID = "greengrass" // Greengrass. + GuarddutyServiceID = "guardduty" // Guardduty. + HealthServiceID = "health" // Health. + IamServiceID = "iam" // Iam. + ImportexportServiceID = "importexport" // Importexport. + InspectorServiceID = "inspector" // Inspector. + IotServiceID = "iot" // Iot. + IotanalyticsServiceID = "iotanalytics" // Iotanalytics. + KinesisServiceID = "kinesis" // Kinesis. + KinesisanalyticsServiceID = "kinesisanalytics" // Kinesisanalytics. + KinesisvideoServiceID = "kinesisvideo" // Kinesisvideo. + KmsServiceID = "kms" // Kms. + LambdaServiceID = "lambda" // Lambda. + LightsailServiceID = "lightsail" // Lightsail. + LogsServiceID = "logs" // Logs. + MachinelearningServiceID = "machinelearning" // Machinelearning. + MarketplacecommerceanalyticsServiceID = "marketplacecommerceanalytics" // Marketplacecommerceanalytics. + MediaconvertServiceID = "mediaconvert" // Mediaconvert. + MedialiveServiceID = "medialive" // Medialive. + MediapackageServiceID = "mediapackage" // Mediapackage. + MediastoreServiceID = "mediastore" // Mediastore. + MeteringMarketplaceServiceID = "metering.marketplace" // MeteringMarketplace. + MghServiceID = "mgh" // Mgh. + MobileanalyticsServiceID = "mobileanalytics" // Mobileanalytics. + ModelsLexServiceID = "models.lex" // ModelsLex. + MonitoringServiceID = "monitoring" // Monitoring. + MturkRequesterServiceID = "mturk-requester" // MturkRequester. + NeptuneServiceID = "neptune" // Neptune. + OpsworksServiceID = "opsworks" // Opsworks. + OpsworksCmServiceID = "opsworks-cm" // OpsworksCm. + OrganizationsServiceID = "organizations" // Organizations. + PinpointServiceID = "pinpoint" // Pinpoint. + PollyServiceID = "polly" // Polly. + RdsServiceID = "rds" // Rds. + RedshiftServiceID = "redshift" // Redshift. + RekognitionServiceID = "rekognition" // Rekognition. + ResourceGroupsServiceID = "resource-groups" // ResourceGroups. + Route53ServiceID = "route53" // Route53. + Route53domainsServiceID = "route53domains" // Route53domains. + RuntimeLexServiceID = "runtime.lex" // RuntimeLex. + RuntimeSagemakerServiceID = "runtime.sagemaker" // RuntimeSagemaker. + S3ServiceID = "s3" // S3. + SdbServiceID = "sdb" // Sdb. + SecretsmanagerServiceID = "secretsmanager" // Secretsmanager. + ServerlessrepoServiceID = "serverlessrepo" // Serverlessrepo. + ServicecatalogServiceID = "servicecatalog" // Servicecatalog. + ServicediscoveryServiceID = "servicediscovery" // Servicediscovery. + ShieldServiceID = "shield" // Shield. + SmsServiceID = "sms" // Sms. + SnowballServiceID = "snowball" // Snowball. + SnsServiceID = "sns" // Sns. + SqsServiceID = "sqs" // Sqs. + SsmServiceID = "ssm" // Ssm. + StatesServiceID = "states" // States. + StoragegatewayServiceID = "storagegateway" // Storagegateway. + StreamsDynamodbServiceID = "streams.dynamodb" // StreamsDynamodb. + StsServiceID = "sts" // Sts. + SupportServiceID = "support" // Support. + SwfServiceID = "swf" // Swf. + TaggingServiceID = "tagging" // Tagging. + TranslateServiceID = "translate" // Translate. + WafServiceID = "waf" // Waf. + WafRegionalServiceID = "waf-regional" // WafRegional. + WorkdocsServiceID = "workdocs" // Workdocs. + WorkmailServiceID = "workmail" // Workmail. + WorkspacesServiceID = "workspaces" // Workspaces. + XrayServiceID = "xray" // Xray. +) + +// DefaultResolver returns an Endpoint resolver that will be able +// to resolve endpoints for: AWS Standard, AWS China, and AWS GovCloud (US). +// +// Use DefaultPartitions() to get the list of the default partitions. +func DefaultResolver() Resolver { + return defaultPartitions +} + +// DefaultPartitions returns a list of the partitions the SDK is bundled +// with. The available partitions are: AWS Standard, AWS China, and AWS GovCloud (US). +// +// partitions := endpoints.DefaultPartitions +// for _, p := range partitions { +// // ... inspect partitions +// } +func DefaultPartitions() []Partition { + return defaultPartitions.Partitions() +} + +var defaultPartitions = partitions{ + awsPartition, + awscnPartition, + awsusgovPartition, +} + +// AwsPartition returns the Resolver for AWS Standard. +func AwsPartition() Partition { + return awsPartition.Partition() +} + +var awsPartition = partition{ + ID: "aws", + Name: "AWS Standard", + DNSSuffix: "amazonaws.com", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^(us|eu|ap|sa|ca)\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + Regions: regions{ + "ap-northeast-1": region{ + Description: "Asia Pacific (Tokyo)", + }, + "ap-northeast-2": region{ + Description: "Asia Pacific (Seoul)", + }, + "ap-south-1": region{ + Description: "Asia Pacific (Mumbai)", + }, + "ap-southeast-1": region{ + Description: "Asia Pacific (Singapore)", + }, + "ap-southeast-2": region{ + Description: "Asia Pacific (Sydney)", + }, + "ca-central-1": region{ + Description: "Canada (Central)", + }, + "eu-central-1": region{ + Description: "EU (Frankfurt)", + }, + "eu-west-1": region{ + Description: "EU (Ireland)", + }, + "eu-west-2": region{ + Description: "EU (London)", + }, + "eu-west-3": region{ + Description: "EU (Paris)", + }, + "sa-east-1": region{ + Description: "South America (Sao Paulo)", + }, + "us-east-1": region{ + Description: "US East (N. Virginia)", + }, + "us-east-2": region{ + Description: "US East (Ohio)", + }, + "us-west-1": region{ + Description: "US West (N. California)", + }, + "us-west-2": region{ + Description: "US West (Oregon)", + }, + }, + Services: services{ + "a4b": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "acm": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "acm-pca": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "api.mediatailor": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + }, + }, + "api.pricing": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "pricing", + }, + }, + Endpoints: endpoints{ + "ap-south-1": endpoint{}, + "us-east-1": endpoint{}, + }, + }, + "api.sagemaker": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "apigateway": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "application-autoscaling": service{ + Defaults: endpoint{ + Hostname: "autoscaling.{region}.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "application-autoscaling", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "appstream2": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Service: "appstream", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "athena": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "autoscaling": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "autoscaling-plans": service{ + Defaults: endpoint{ + Hostname: "autoscaling.{region}.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "autoscaling-plans", + }, + }, + Endpoints: endpoints{ + "ap-southeast-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "batch": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "budgets": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "budgets.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "ce": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "ce.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "cloud9": service{ + + Endpoints: endpoints{ + "ap-southeast-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "clouddirectory": service{ + + Endpoints: endpoints{ + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cloudformation": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cloudfront": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "cloudfront.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "cloudhsm": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cloudhsmv2": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "cloudhsm", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cloudsearch": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cloudtrail": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "codebuild": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "codebuild-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "codebuild-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "codebuild-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "codebuild-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "codecommit": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "codedeploy": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "codepipeline": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "codestar": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cognito-identity": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cognito-idp": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cognito-sync": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "comprehend": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "config": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cur": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "datapipeline": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "dax": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "devicefarm": service{ + + Endpoints: endpoints{ + "us-west-2": endpoint{}, + }, + }, + "directconnect": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "discovery": service{ + + Endpoints: endpoints{ + "us-west-2": endpoint{}, + }, + }, + "dms": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "ds": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "local": endpoint{ + Hostname: "localhost:8000", + Protocols: []string{"http"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "ec2": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "ec2metadata": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + }, + "ecr": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "ecs": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticache": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips": endpoint{ + Hostname: "elasticache-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticbeanstalk": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticfilesystem": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticloadbalancing": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticmapreduce": service{ + Defaults: endpoint{ + SSLCommonName: "{region}.{service}.{dnsSuffix}", + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{ + SSLCommonName: "{service}.{region}.{dnsSuffix}", + }, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{ + SSLCommonName: "{service}.{region}.{dnsSuffix}", + }, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elastictranscoder": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "email": service{ + + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "entitlement.marketplace": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "aws-marketplace", + }, + }, + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "es": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "events": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "firehose": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "fms": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "gamelift": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "glacier": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "glue": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "greengrass": service{ + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "guardduty": service{ + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "health": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "iam.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "importexport": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "importexport.amazonaws.com", + SignatureVersions: []string{"v2", "v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + Service: "IngestionService", + }, + }, + }, + }, + "inspector": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "iot": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "execute-api", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "iotanalytics": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kinesis": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kinesisanalytics": service{ + + Endpoints: endpoints{ + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kinesisvideo": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kms": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "lambda": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "lightsail": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "logs": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "machinelearning": service{ + + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + }, + }, + "marketplacecommerceanalytics": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "mediaconvert": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "medialive": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mediapackage": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mediastore": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "metering.marketplace": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "aws-marketplace", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mgh": service{ + + Endpoints: endpoints{ + "us-west-2": endpoint{}, + }, + }, + "mobileanalytics": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "models.lex": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "lex", + }, + }, + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "monitoring": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mturk-requester": service{ + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "sandbox": endpoint{ + Hostname: "mturk-requester-sandbox.us-east-1.amazonaws.com", + }, + "us-east-1": endpoint{}, + }, + }, + "neptune": service{ + + Endpoints: endpoints{ + "eu-west-1": endpoint{ + Hostname: "rds.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "us-east-1": endpoint{ + Hostname: "rds.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "rds.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-2": endpoint{ + Hostname: "rds.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "opsworks": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "opsworks-cm": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "organizations": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "organizations.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "pinpoint": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "mobiletargeting", + }, + }, + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "polly": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "rds": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{ + SSLCommonName: "{service}.{dnsSuffix}", + }, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "redshift": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "rekognition": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "resource-groups": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "route53": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "route53.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "route53domains": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "runtime.lex": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "lex", + }, + }, + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "runtime.sagemaker": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "s3": service{ + PartitionEndpoint: "us-east-1", + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + + HasDualStack: boxedTrue, + DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{ + Hostname: "s3.ap-northeast-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{ + Hostname: "s3.ap-southeast-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "ap-southeast-2": endpoint{ + Hostname: "s3.ap-southeast-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{ + Hostname: "s3.eu-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "s3-external-1": endpoint{ + Hostname: "s3-external-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "sa-east-1": endpoint{ + Hostname: "s3.sa-east-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "us-east-1": endpoint{ + Hostname: "s3.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "us-east-2": endpoint{}, + "us-west-1": endpoint{ + Hostname: "s3.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "us-west-2": endpoint{ + Hostname: "s3.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + }, + }, + "sdb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"v2"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{ + Hostname: "sdb.amazonaws.com", + }, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "secretsmanager": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "secretsmanager-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "secretsmanager-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "secretsmanager-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "secretsmanager-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "serverlessrepo": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{ + Protocols: []string{"https"}, + }, + "ap-northeast-2": endpoint{ + Protocols: []string{"https"}, + }, + "ap-south-1": endpoint{ + Protocols: []string{"https"}, + }, + "ap-southeast-1": endpoint{ + Protocols: []string{"https"}, + }, + "ap-southeast-2": endpoint{ + Protocols: []string{"https"}, + }, + "ca-central-1": endpoint{ + Protocols: []string{"https"}, + }, + "eu-central-1": endpoint{ + Protocols: []string{"https"}, + }, + "eu-west-1": endpoint{ + Protocols: []string{"https"}, + }, + "eu-west-2": endpoint{ + Protocols: []string{"https"}, + }, + "sa-east-1": endpoint{ + Protocols: []string{"https"}, + }, + "us-east-1": endpoint{ + Protocols: []string{"https"}, + }, + "us-east-2": endpoint{ + Protocols: []string{"https"}, + }, + "us-west-1": endpoint{ + Protocols: []string{"https"}, + }, + "us-west-2": endpoint{ + Protocols: []string{"https"}, + }, + }, + }, + "servicecatalog": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "servicediscovery": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "shield": service{ + IsRegionalized: boxedFalse, + Defaults: endpoint{ + SSLCommonName: "Shield.us-east-1.amazonaws.com", + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "sms": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "snowball": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "sns": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "sqs": service{ + Defaults: endpoint{ + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "sqs-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "sqs-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "sqs-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "sqs-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{ + SSLCommonName: "queue.{dnsSuffix}", + }, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "ssm": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "states": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "storagegateway": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "streams.dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "local": endpoint{ + Hostname: "localhost:8000", + Protocols: []string{"http"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "sts": service{ + PartitionEndpoint: "aws-global", + Defaults: endpoint{ + Hostname: "sts.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{ + Hostname: "sts.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "aws-global": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "sts-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "sts-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "sts-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "sts-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "support": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "swf": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "tagging": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "translate": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "translate-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "translate-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "translate-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "waf": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "waf.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "waf-regional": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "workdocs": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "workmail": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "workspaces": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "xray": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + }, +} + +// AwsCnPartition returns the Resolver for AWS China. +func AwsCnPartition() Partition { + return awscnPartition.Partition() +} + +var awscnPartition = partition{ + ID: "aws-cn", + Name: "AWS China", + DNSSuffix: "amazonaws.com.cn", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^cn\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + Regions: regions{ + "cn-north-1": region{ + Description: "China (Beijing)", + }, + "cn-northwest-1": region{ + Description: "China (Ningxia)", + }, + }, + Services: services{ + "apigateway": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "application-autoscaling": service{ + Defaults: endpoint{ + Hostname: "autoscaling.{region}.amazonaws.com.cn", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "application-autoscaling", + }, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "autoscaling": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "cloudformation": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "cloudtrail": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "codebuild": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "codedeploy": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "cognito-identity": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "config": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "directconnect": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "ds": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "ec2": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "ec2metadata": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + }, + "ecr": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "ecs": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "elasticache": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "elasticbeanstalk": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "elasticloadbalancing": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "elasticmapreduce": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "es": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{}, + }, + }, + "events": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "glacier": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-cn-global": endpoint{ + Hostname: "iam.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + }, + }, + "iot": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "execute-api", + }, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "kinesis": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "lambda": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "logs": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "monitoring": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "rds": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "redshift": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "s3": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "sms": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "snowball": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "sns": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "sqs": service{ + Defaults: endpoint{ + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "ssm": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "storagegateway": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "streams.dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "sts": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "swf": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "tagging": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + }, +} + +// AwsUsGovPartition returns the Resolver for AWS GovCloud (US). +func AwsUsGovPartition() Partition { + return awsusgovPartition.Partition() +} + +var awsusgovPartition = partition{ + ID: "aws-us-gov", + Name: "AWS GovCloud (US)", + DNSSuffix: "amazonaws.com", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^us\\-gov\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + Regions: regions{ + "us-gov-west-1": region{ + Description: "AWS GovCloud (US)", + }, + }, + Services: services{ + "acm": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "apigateway": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "application-autoscaling": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "autoscaling": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "cloudformation": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "cloudhsm": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "cloudhsmv2": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "cloudhsm", + }, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "cloudtrail": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "codedeploy": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "config": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "directconnect": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "dms": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "dynamodb": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "dynamodb.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "ec2": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "ec2metadata": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + }, + "ecr": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "ecs": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "elasticache": service{ + + Endpoints: endpoints{ + "fips": endpoint{ + Hostname: "elasticache-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, + "elasticbeanstalk": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "elasticloadbalancing": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "elasticmapreduce": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{ + Protocols: []string{"https"}, + }, + }, + }, + "es": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "events": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "glacier": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-us-gov-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-us-gov-global": endpoint{ + Hostname: "iam.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "inspector": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "iot": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "execute-api", + }, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "kinesis": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "kms": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "lambda": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "logs": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "metering.marketplace": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "aws-marketplace", + }, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "monitoring": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "polly": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "rds": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "redshift": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "rekognition": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "s3": service{ + Defaults: endpoint{ + SignatureVersions: []string{"s3", "s3v4"}, + }, + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "s3-fips-us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "s3.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + }, + }, + "sms": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "snowball": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "sns": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "sqs": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{ + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + }, + }, + "ssm": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "states": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "storagegateway": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "streams.dynamodb": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "dynamodb.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "sts": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "swf": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "tagging": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "translate": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "translate-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + }, +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go new file mode 100644 index 000000000..84316b92c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go @@ -0,0 +1,66 @@ +// Package endpoints provides the types and functionality for defining regions +// and endpoints, as well as querying those definitions. +// +// The SDK's Regions and Endpoints metadata is code generated into the endpoints +// package, and is accessible via the DefaultResolver function. This function +// returns a endpoint Resolver will search the metadata and build an associated +// endpoint if one is found. The default resolver will search all partitions +// known by the SDK. e.g AWS Standard (aws), AWS China (aws-cn), and +// AWS GovCloud (US) (aws-us-gov). +// . +// +// Enumerating Regions and Endpoint Metadata +// +// Casting the Resolver returned by DefaultResolver to a EnumPartitions interface +// will allow you to get access to the list of underlying Partitions with the +// Partitions method. This is helpful if you want to limit the SDK's endpoint +// resolving to a single partition, or enumerate regions, services, and endpoints +// in the partition. +// +// resolver := endpoints.DefaultResolver() +// partitions := resolver.(endpoints.EnumPartitions).Partitions() +// +// for _, p := range partitions { +// fmt.Println("Regions for", p.ID()) +// for id, _ := range p.Regions() { +// fmt.Println("*", id) +// } +// +// fmt.Println("Services for", p.ID()) +// for id, _ := range p.Services() { +// fmt.Println("*", id) +// } +// } +// +// Using Custom Endpoints +// +// The endpoints package also gives you the ability to use your own logic how +// endpoints are resolved. This is a great way to define a custom endpoint +// for select services, without passing that logic down through your code. +// +// If a type implements the Resolver interface it can be used to resolve +// endpoints. To use this with the SDK's Session and Config set the value +// of the type to the EndpointsResolver field of aws.Config when initializing +// the session, or service client. +// +// In addition the ResolverFunc is a wrapper for a func matching the signature +// of Resolver.EndpointFor, converting it to a type that satisfies the +// Resolver interface. +// +// +// myCustomResolver := func(service, region string, optFns ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) { +// if service == endpoints.S3ServiceID { +// return endpoints.ResolvedEndpoint{ +// URL: "s3.custom.endpoint.com", +// SigningRegion: "custom-signing-region", +// }, nil +// } +// +// return endpoints.DefaultResolver().EndpointFor(service, region, optFns...) +// } +// +// sess := session.Must(session.NewSession(&aws.Config{ +// Region: aws.String("us-west-2"), +// EndpointResolver: endpoints.ResolverFunc(myCustomResolver), +// })) +package endpoints diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go new file mode 100644 index 000000000..e29c09512 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go @@ -0,0 +1,449 @@ +package endpoints + +import ( + "fmt" + "regexp" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// Options provide the configuration needed to direct how the +// endpoints will be resolved. +type Options struct { + // DisableSSL forces the endpoint to be resolved as HTTP. + // instead of HTTPS if the service supports it. + DisableSSL bool + + // Sets the resolver to resolve the endpoint as a dualstack endpoint + // for the service. If dualstack support for a service is not known and + // StrictMatching is not enabled a dualstack endpoint for the service will + // be returned. This endpoint may not be valid. If StrictMatching is + // enabled only services that are known to support dualstack will return + // dualstack endpoints. + UseDualStack bool + + // Enables strict matching of services and regions resolved endpoints. + // If the partition doesn't enumerate the exact service and region an + // error will be returned. This option will prevent returning endpoints + // that look valid, but may not resolve to any real endpoint. + StrictMatching bool + + // Enables resolving a service endpoint based on the region provided if the + // service does not exist. The service endpoint ID will be used as the service + // domain name prefix. By default the endpoint resolver requires the service + // to be known when resolving endpoints. + // + // If resolving an endpoint on the partition list the provided region will + // be used to determine which partition's domain name pattern to the service + // endpoint ID with. If both the service and region are unkonwn and resolving + // the endpoint on partition list an UnknownEndpointError error will be returned. + // + // If resolving and endpoint on a partition specific resolver that partition's + // domain name pattern will be used with the service endpoint ID. If both + // region and service do not exist when resolving an endpoint on a specific + // partition the partition's domain pattern will be used to combine the + // endpoint and region together. + // + // This option is ignored if StrictMatching is enabled. + ResolveUnknownService bool +} + +// Set combines all of the option functions together. +func (o *Options) Set(optFns ...func(*Options)) { + for _, fn := range optFns { + fn(o) + } +} + +// DisableSSLOption sets the DisableSSL options. Can be used as a functional +// option when resolving endpoints. +func DisableSSLOption(o *Options) { + o.DisableSSL = true +} + +// UseDualStackOption sets the UseDualStack option. Can be used as a functional +// option when resolving endpoints. +func UseDualStackOption(o *Options) { + o.UseDualStack = true +} + +// StrictMatchingOption sets the StrictMatching option. Can be used as a functional +// option when resolving endpoints. +func StrictMatchingOption(o *Options) { + o.StrictMatching = true +} + +// ResolveUnknownServiceOption sets the ResolveUnknownService option. Can be used +// as a functional option when resolving endpoints. +func ResolveUnknownServiceOption(o *Options) { + o.ResolveUnknownService = true +} + +// A Resolver provides the interface for functionality to resolve endpoints. +// The build in Partition and DefaultResolver return value satisfy this interface. +type Resolver interface { + EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) +} + +// ResolverFunc is a helper utility that wraps a function so it satisfies the +// Resolver interface. This is useful when you want to add additional endpoint +// resolving logic, or stub out specific endpoints with custom values. +type ResolverFunc func(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) + +// EndpointFor wraps the ResolverFunc function to satisfy the Resolver interface. +func (fn ResolverFunc) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { + return fn(service, region, opts...) +} + +var schemeRE = regexp.MustCompile("^([^:]+)://") + +// AddScheme adds the HTTP or HTTPS schemes to a endpoint URL if there is no +// scheme. If disableSSL is true HTTP will set HTTP instead of the default HTTPS. +// +// If disableSSL is set, it will only set the URL's scheme if the URL does not +// contain a scheme. +func AddScheme(endpoint string, disableSSL bool) string { + if !schemeRE.MatchString(endpoint) { + scheme := "https" + if disableSSL { + scheme = "http" + } + endpoint = fmt.Sprintf("%s://%s", scheme, endpoint) + } + + return endpoint +} + +// EnumPartitions a provides a way to retrieve the underlying partitions that +// make up the SDK's default Resolver, or any resolver decoded from a model +// file. +// +// Use this interface with DefaultResolver and DecodeModels to get the list of +// Partitions. +type EnumPartitions interface { + Partitions() []Partition +} + +// RegionsForService returns a map of regions for the partition and service. +// If either the partition or service does not exist false will be returned +// as the second parameter. +// +// This example shows how to get the regions for DynamoDB in the AWS partition. +// rs, exists := endpoints.RegionsForService(endpoints.DefaultPartitions(), endpoints.AwsPartitionID, endpoints.DynamodbServiceID) +// +// This is equivalent to using the partition directly. +// rs := endpoints.AwsPartition().Services()[endpoints.DynamodbServiceID].Regions() +func RegionsForService(ps []Partition, partitionID, serviceID string) (map[string]Region, bool) { + for _, p := range ps { + if p.ID() != partitionID { + continue + } + if _, ok := p.p.Services[serviceID]; !ok { + break + } + + s := Service{ + id: serviceID, + p: p.p, + } + return s.Regions(), true + } + + return map[string]Region{}, false +} + +// PartitionForRegion returns the first partition which includes the region +// passed in. This includes both known regions and regions which match +// a pattern supported by the partition which may include regions that are +// not explicitly known by the partition. Use the Regions method of the +// returned Partition if explicit support is needed. +func PartitionForRegion(ps []Partition, regionID string) (Partition, bool) { + for _, p := range ps { + if _, ok := p.p.Regions[regionID]; ok || p.p.RegionRegex.MatchString(regionID) { + return p, true + } + } + + return Partition{}, false +} + +// A Partition provides the ability to enumerate the partition's regions +// and services. +type Partition struct { + id string + p *partition +} + +// ID returns the identifier of the partition. +func (p Partition) ID() string { return p.id } + +// EndpointFor attempts to resolve the endpoint based on service and region. +// See Options for information on configuring how the endpoint is resolved. +// +// If the service cannot be found in the metadata the UnknownServiceError +// error will be returned. This validation will occur regardless if +// StrictMatching is enabled. To enable resolving unknown services set the +// "ResolveUnknownService" option to true. When StrictMatching is disabled +// this option allows the partition resolver to resolve a endpoint based on +// the service endpoint ID provided. +// +// When resolving endpoints you can choose to enable StrictMatching. This will +// require the provided service and region to be known by the partition. +// If the endpoint cannot be strictly resolved an error will be returned. This +// mode is useful to ensure the endpoint resolved is valid. Without +// StrictMatching enabled the endpoint returned my look valid but may not work. +// StrictMatching requires the SDK to be updated if you want to take advantage +// of new regions and services expansions. +// +// Errors that can be returned. +// * UnknownServiceError +// * UnknownEndpointError +func (p Partition) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { + return p.p.EndpointFor(service, region, opts...) +} + +// Regions returns a map of Regions indexed by their ID. This is useful for +// enumerating over the regions in a partition. +func (p Partition) Regions() map[string]Region { + rs := map[string]Region{} + for id, r := range p.p.Regions { + rs[id] = Region{ + id: id, + desc: r.Description, + p: p.p, + } + } + + return rs +} + +// Services returns a map of Service indexed by their ID. This is useful for +// enumerating over the services in a partition. +func (p Partition) Services() map[string]Service { + ss := map[string]Service{} + for id := range p.p.Services { + ss[id] = Service{ + id: id, + p: p.p, + } + } + + return ss +} + +// A Region provides information about a region, and ability to resolve an +// endpoint from the context of a region, given a service. +type Region struct { + id, desc string + p *partition +} + +// ID returns the region's identifier. +func (r Region) ID() string { return r.id } + +// Description returns the region's description. The region description +// is free text, it can be empty, and it may change between SDK releases. +func (r Region) Description() string { return r.desc } + +// ResolveEndpoint resolves an endpoint from the context of the region given +// a service. See Partition.EndpointFor for usage and errors that can be returned. +func (r Region) ResolveEndpoint(service string, opts ...func(*Options)) (ResolvedEndpoint, error) { + return r.p.EndpointFor(service, r.id, opts...) +} + +// Services returns a list of all services that are known to be in this region. +func (r Region) Services() map[string]Service { + ss := map[string]Service{} + for id, s := range r.p.Services { + if _, ok := s.Endpoints[r.id]; ok { + ss[id] = Service{ + id: id, + p: r.p, + } + } + } + + return ss +} + +// A Service provides information about a service, and ability to resolve an +// endpoint from the context of a service, given a region. +type Service struct { + id string + p *partition +} + +// ID returns the identifier for the service. +func (s Service) ID() string { return s.id } + +// ResolveEndpoint resolves an endpoint from the context of a service given +// a region. See Partition.EndpointFor for usage and errors that can be returned. +func (s Service) ResolveEndpoint(region string, opts ...func(*Options)) (ResolvedEndpoint, error) { + return s.p.EndpointFor(s.id, region, opts...) +} + +// Regions returns a map of Regions that the service is present in. +// +// A region is the AWS region the service exists in. Whereas a Endpoint is +// an URL that can be resolved to a instance of a service. +func (s Service) Regions() map[string]Region { + rs := map[string]Region{} + for id := range s.p.Services[s.id].Endpoints { + if r, ok := s.p.Regions[id]; ok { + rs[id] = Region{ + id: id, + desc: r.Description, + p: s.p, + } + } + } + + return rs +} + +// Endpoints returns a map of Endpoints indexed by their ID for all known +// endpoints for a service. +// +// A region is the AWS region the service exists in. Whereas a Endpoint is +// an URL that can be resolved to a instance of a service. +func (s Service) Endpoints() map[string]Endpoint { + es := map[string]Endpoint{} + for id := range s.p.Services[s.id].Endpoints { + es[id] = Endpoint{ + id: id, + serviceID: s.id, + p: s.p, + } + } + + return es +} + +// A Endpoint provides information about endpoints, and provides the ability +// to resolve that endpoint for the service, and the region the endpoint +// represents. +type Endpoint struct { + id string + serviceID string + p *partition +} + +// ID returns the identifier for an endpoint. +func (e Endpoint) ID() string { return e.id } + +// ServiceID returns the identifier the endpoint belongs to. +func (e Endpoint) ServiceID() string { return e.serviceID } + +// ResolveEndpoint resolves an endpoint from the context of a service and +// region the endpoint represents. See Partition.EndpointFor for usage and +// errors that can be returned. +func (e Endpoint) ResolveEndpoint(opts ...func(*Options)) (ResolvedEndpoint, error) { + return e.p.EndpointFor(e.serviceID, e.id, opts...) +} + +// A ResolvedEndpoint is an endpoint that has been resolved based on a partition +// service, and region. +type ResolvedEndpoint struct { + // The endpoint URL + URL string + + // The region that should be used for signing requests. + SigningRegion string + + // The service name that should be used for signing requests. + SigningName string + + // States that the signing name for this endpoint was derived from metadata + // passed in, but was not explicitly modeled. + SigningNameDerived bool + + // The signing method that should be used for signing requests. + SigningMethod string +} + +// So that the Error interface type can be included as an anonymous field +// in the requestError struct and not conflict with the error.Error() method. +type awsError awserr.Error + +// A EndpointNotFoundError is returned when in StrictMatching mode, and the +// endpoint for the service and region cannot be found in any of the partitions. +type EndpointNotFoundError struct { + awsError + Partition string + Service string + Region string +} + +// A UnknownServiceError is returned when the service does not resolve to an +// endpoint. Includes a list of all known services for the partition. Returned +// when a partition does not support the service. +type UnknownServiceError struct { + awsError + Partition string + Service string + Known []string +} + +// NewUnknownServiceError builds and returns UnknownServiceError. +func NewUnknownServiceError(p, s string, known []string) UnknownServiceError { + return UnknownServiceError{ + awsError: awserr.New("UnknownServiceError", + "could not resolve endpoint for unknown service", nil), + Partition: p, + Service: s, + Known: known, + } +} + +// String returns the string representation of the error. +func (e UnknownServiceError) Error() string { + extra := fmt.Sprintf("partition: %q, service: %q", + e.Partition, e.Service) + if len(e.Known) > 0 { + extra += fmt.Sprintf(", known: %v", e.Known) + } + return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr()) +} + +// String returns the string representation of the error. +func (e UnknownServiceError) String() string { + return e.Error() +} + +// A UnknownEndpointError is returned when in StrictMatching mode and the +// service is valid, but the region does not resolve to an endpoint. Includes +// a list of all known endpoints for the service. +type UnknownEndpointError struct { + awsError + Partition string + Service string + Region string + Known []string +} + +// NewUnknownEndpointError builds and returns UnknownEndpointError. +func NewUnknownEndpointError(p, s, r string, known []string) UnknownEndpointError { + return UnknownEndpointError{ + awsError: awserr.New("UnknownEndpointError", + "could not resolve endpoint", nil), + Partition: p, + Service: s, + Region: r, + Known: known, + } +} + +// String returns the string representation of the error. +func (e UnknownEndpointError) Error() string { + extra := fmt.Sprintf("partition: %q, service: %q, region: %q", + e.Partition, e.Service, e.Region) + if len(e.Known) > 0 { + extra += fmt.Sprintf(", known: %v", e.Known) + } + return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr()) +} + +// String returns the string representation of the error. +func (e UnknownEndpointError) String() string { + return e.Error() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go new file mode 100644 index 000000000..ff6f76db6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go @@ -0,0 +1,307 @@ +package endpoints + +import ( + "fmt" + "regexp" + "strconv" + "strings" +) + +type partitions []partition + +func (ps partitions) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { + var opt Options + opt.Set(opts...) + + for i := 0; i < len(ps); i++ { + if !ps[i].canResolveEndpoint(service, region, opt.StrictMatching) { + continue + } + + return ps[i].EndpointFor(service, region, opts...) + } + + // If loose matching fallback to first partition format to use + // when resolving the endpoint. + if !opt.StrictMatching && len(ps) > 0 { + return ps[0].EndpointFor(service, region, opts...) + } + + return ResolvedEndpoint{}, NewUnknownEndpointError("all partitions", service, region, []string{}) +} + +// Partitions satisfies the EnumPartitions interface and returns a list +// of Partitions representing each partition represented in the SDK's +// endpoints model. +func (ps partitions) Partitions() []Partition { + parts := make([]Partition, 0, len(ps)) + for i := 0; i < len(ps); i++ { + parts = append(parts, ps[i].Partition()) + } + + return parts +} + +type partition struct { + ID string `json:"partition"` + Name string `json:"partitionName"` + DNSSuffix string `json:"dnsSuffix"` + RegionRegex regionRegex `json:"regionRegex"` + Defaults endpoint `json:"defaults"` + Regions regions `json:"regions"` + Services services `json:"services"` +} + +func (p partition) Partition() Partition { + return Partition{ + id: p.ID, + p: &p, + } +} + +func (p partition) canResolveEndpoint(service, region string, strictMatch bool) bool { + s, hasService := p.Services[service] + _, hasEndpoint := s.Endpoints[region] + + if hasEndpoint && hasService { + return true + } + + if strictMatch { + return false + } + + return p.RegionRegex.MatchString(region) +} + +func (p partition) EndpointFor(service, region string, opts ...func(*Options)) (resolved ResolvedEndpoint, err error) { + var opt Options + opt.Set(opts...) + + s, hasService := p.Services[service] + if !(hasService || opt.ResolveUnknownService) { + // Only return error if the resolver will not fallback to creating + // endpoint based on service endpoint ID passed in. + return resolved, NewUnknownServiceError(p.ID, service, serviceList(p.Services)) + } + + e, hasEndpoint := s.endpointForRegion(region) + if !hasEndpoint && opt.StrictMatching { + return resolved, NewUnknownEndpointError(p.ID, service, region, endpointList(s.Endpoints)) + } + + defs := []endpoint{p.Defaults, s.Defaults} + return e.resolve(service, region, p.DNSSuffix, defs, opt), nil +} + +func serviceList(ss services) []string { + list := make([]string, 0, len(ss)) + for k := range ss { + list = append(list, k) + } + return list +} +func endpointList(es endpoints) []string { + list := make([]string, 0, len(es)) + for k := range es { + list = append(list, k) + } + return list +} + +type regionRegex struct { + *regexp.Regexp +} + +func (rr *regionRegex) UnmarshalJSON(b []byte) (err error) { + // Strip leading and trailing quotes + regex, err := strconv.Unquote(string(b)) + if err != nil { + return fmt.Errorf("unable to strip quotes from regex, %v", err) + } + + rr.Regexp, err = regexp.Compile(regex) + if err != nil { + return fmt.Errorf("unable to unmarshal region regex, %v", err) + } + return nil +} + +type regions map[string]region + +type region struct { + Description string `json:"description"` +} + +type services map[string]service + +type service struct { + PartitionEndpoint string `json:"partitionEndpoint"` + IsRegionalized boxedBool `json:"isRegionalized,omitempty"` + Defaults endpoint `json:"defaults"` + Endpoints endpoints `json:"endpoints"` +} + +func (s *service) endpointForRegion(region string) (endpoint, bool) { + if s.IsRegionalized == boxedFalse { + return s.Endpoints[s.PartitionEndpoint], region == s.PartitionEndpoint + } + + if e, ok := s.Endpoints[region]; ok { + return e, true + } + + // Unable to find any matching endpoint, return + // blank that will be used for generic endpoint creation. + return endpoint{}, false +} + +type endpoints map[string]endpoint + +type endpoint struct { + Hostname string `json:"hostname"` + Protocols []string `json:"protocols"` + CredentialScope credentialScope `json:"credentialScope"` + + // Custom fields not modeled + HasDualStack boxedBool `json:"-"` + DualStackHostname string `json:"-"` + + // Signature Version not used + SignatureVersions []string `json:"signatureVersions"` + + // SSLCommonName not used. + SSLCommonName string `json:"sslCommonName"` +} + +const ( + defaultProtocol = "https" + defaultSigner = "v4" +) + +var ( + protocolPriority = []string{"https", "http"} + signerPriority = []string{"v4", "v2"} +) + +func getByPriority(s []string, p []string, def string) string { + if len(s) == 0 { + return def + } + + for i := 0; i < len(p); i++ { + for j := 0; j < len(s); j++ { + if s[j] == p[i] { + return s[j] + } + } + } + + return s[0] +} + +func (e endpoint) resolve(service, region, dnsSuffix string, defs []endpoint, opts Options) ResolvedEndpoint { + var merged endpoint + for _, def := range defs { + merged.mergeIn(def) + } + merged.mergeIn(e) + e = merged + + hostname := e.Hostname + + // Offset the hostname for dualstack if enabled + if opts.UseDualStack && e.HasDualStack == boxedTrue { + hostname = e.DualStackHostname + } + + u := strings.Replace(hostname, "{service}", service, 1) + u = strings.Replace(u, "{region}", region, 1) + u = strings.Replace(u, "{dnsSuffix}", dnsSuffix, 1) + + scheme := getEndpointScheme(e.Protocols, opts.DisableSSL) + u = fmt.Sprintf("%s://%s", scheme, u) + + signingRegion := e.CredentialScope.Region + if len(signingRegion) == 0 { + signingRegion = region + } + + signingName := e.CredentialScope.Service + var signingNameDerived bool + if len(signingName) == 0 { + signingName = service + signingNameDerived = true + } + + return ResolvedEndpoint{ + URL: u, + SigningRegion: signingRegion, + SigningName: signingName, + SigningNameDerived: signingNameDerived, + SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner), + } +} + +func getEndpointScheme(protocols []string, disableSSL bool) string { + if disableSSL { + return "http" + } + + return getByPriority(protocols, protocolPriority, defaultProtocol) +} + +func (e *endpoint) mergeIn(other endpoint) { + if len(other.Hostname) > 0 { + e.Hostname = other.Hostname + } + if len(other.Protocols) > 0 { + e.Protocols = other.Protocols + } + if len(other.SignatureVersions) > 0 { + e.SignatureVersions = other.SignatureVersions + } + if len(other.CredentialScope.Region) > 0 { + e.CredentialScope.Region = other.CredentialScope.Region + } + if len(other.CredentialScope.Service) > 0 { + e.CredentialScope.Service = other.CredentialScope.Service + } + if len(other.SSLCommonName) > 0 { + e.SSLCommonName = other.SSLCommonName + } + if other.HasDualStack != boxedBoolUnset { + e.HasDualStack = other.HasDualStack + } + if len(other.DualStackHostname) > 0 { + e.DualStackHostname = other.DualStackHostname + } +} + +type credentialScope struct { + Region string `json:"region"` + Service string `json:"service"` +} + +type boxedBool int + +func (b *boxedBool) UnmarshalJSON(buf []byte) error { + v, err := strconv.ParseBool(string(buf)) + if err != nil { + return err + } + + if v { + *b = boxedTrue + } else { + *b = boxedFalse + } + + return nil +} + +const ( + boxedBoolUnset boxedBool = iota + boxedFalse + boxedTrue +) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go new file mode 100644 index 000000000..05e92df22 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go @@ -0,0 +1,337 @@ +// +build codegen + +package endpoints + +import ( + "fmt" + "io" + "reflect" + "strings" + "text/template" + "unicode" +) + +// A CodeGenOptions are the options for code generating the endpoints into +// Go code from the endpoints model definition. +type CodeGenOptions struct { + // Options for how the model will be decoded. + DecodeModelOptions DecodeModelOptions +} + +// Set combines all of the option functions together +func (d *CodeGenOptions) Set(optFns ...func(*CodeGenOptions)) { + for _, fn := range optFns { + fn(d) + } +} + +// CodeGenModel given a endpoints model file will decode it and attempt to +// generate Go code from the model definition. Error will be returned if +// the code is unable to be generated, or decoded. +func CodeGenModel(modelFile io.Reader, outFile io.Writer, optFns ...func(*CodeGenOptions)) error { + var opts CodeGenOptions + opts.Set(optFns...) + + resolver, err := DecodeModel(modelFile, func(d *DecodeModelOptions) { + *d = opts.DecodeModelOptions + }) + if err != nil { + return err + } + + tmpl := template.Must(template.New("tmpl").Funcs(funcMap).Parse(v3Tmpl)) + if err := tmpl.ExecuteTemplate(outFile, "defaults", resolver); err != nil { + return fmt.Errorf("failed to execute template, %v", err) + } + + return nil +} + +func toSymbol(v string) string { + out := []rune{} + for _, c := range strings.Title(v) { + if !(unicode.IsNumber(c) || unicode.IsLetter(c)) { + continue + } + + out = append(out, c) + } + + return string(out) +} + +func quoteString(v string) string { + return fmt.Sprintf("%q", v) +} + +func regionConstName(p, r string) string { + return toSymbol(p) + toSymbol(r) +} + +func partitionGetter(id string) string { + return fmt.Sprintf("%sPartition", toSymbol(id)) +} + +func partitionVarName(id string) string { + return fmt.Sprintf("%sPartition", strings.ToLower(toSymbol(id))) +} + +func listPartitionNames(ps partitions) string { + names := []string{} + switch len(ps) { + case 1: + return ps[0].Name + case 2: + return fmt.Sprintf("%s and %s", ps[0].Name, ps[1].Name) + default: + for i, p := range ps { + if i == len(ps)-1 { + names = append(names, "and "+p.Name) + } else { + names = append(names, p.Name) + } + } + return strings.Join(names, ", ") + } +} + +func boxedBoolIfSet(msg string, v boxedBool) string { + switch v { + case boxedTrue: + return fmt.Sprintf(msg, "boxedTrue") + case boxedFalse: + return fmt.Sprintf(msg, "boxedFalse") + default: + return "" + } +} + +func stringIfSet(msg, v string) string { + if len(v) == 0 { + return "" + } + + return fmt.Sprintf(msg, v) +} + +func stringSliceIfSet(msg string, vs []string) string { + if len(vs) == 0 { + return "" + } + + names := []string{} + for _, v := range vs { + names = append(names, `"`+v+`"`) + } + + return fmt.Sprintf(msg, strings.Join(names, ",")) +} + +func endpointIsSet(v endpoint) bool { + return !reflect.DeepEqual(v, endpoint{}) +} + +func serviceSet(ps partitions) map[string]struct{} { + set := map[string]struct{}{} + for _, p := range ps { + for id := range p.Services { + set[id] = struct{}{} + } + } + + return set +} + +var funcMap = template.FuncMap{ + "ToSymbol": toSymbol, + "QuoteString": quoteString, + "RegionConst": regionConstName, + "PartitionGetter": partitionGetter, + "PartitionVarName": partitionVarName, + "ListPartitionNames": listPartitionNames, + "BoxedBoolIfSet": boxedBoolIfSet, + "StringIfSet": stringIfSet, + "StringSliceIfSet": stringSliceIfSet, + "EndpointIsSet": endpointIsSet, + "ServicesSet": serviceSet, +} + +const v3Tmpl = ` +{{ define "defaults" -}} +// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT. + +package endpoints + +import ( + "regexp" +) + + {{ template "partition consts" . }} + + {{ range $_, $partition := . }} + {{ template "partition region consts" $partition }} + {{ end }} + + {{ template "service consts" . }} + + {{ template "endpoint resolvers" . }} +{{- end }} + +{{ define "partition consts" }} + // Partition identifiers + const ( + {{ range $_, $p := . -}} + {{ ToSymbol $p.ID }}PartitionID = {{ QuoteString $p.ID }} // {{ $p.Name }} partition. + {{ end -}} + ) +{{- end }} + +{{ define "partition region consts" }} + // {{ .Name }} partition's regions. + const ( + {{ range $id, $region := .Regions -}} + {{ ToSymbol $id }}RegionID = {{ QuoteString $id }} // {{ $region.Description }}. + {{ end -}} + ) +{{- end }} + +{{ define "service consts" }} + // Service identifiers + const ( + {{ $serviceSet := ServicesSet . -}} + {{ range $id, $_ := $serviceSet -}} + {{ ToSymbol $id }}ServiceID = {{ QuoteString $id }} // {{ ToSymbol $id }}. + {{ end -}} + ) +{{- end }} + +{{ define "endpoint resolvers" }} + // DefaultResolver returns an Endpoint resolver that will be able + // to resolve endpoints for: {{ ListPartitionNames . }}. + // + // Use DefaultPartitions() to get the list of the default partitions. + func DefaultResolver() Resolver { + return defaultPartitions + } + + // DefaultPartitions returns a list of the partitions the SDK is bundled + // with. The available partitions are: {{ ListPartitionNames . }}. + // + // partitions := endpoints.DefaultPartitions + // for _, p := range partitions { + // // ... inspect partitions + // } + func DefaultPartitions() []Partition { + return defaultPartitions.Partitions() + } + + var defaultPartitions = partitions{ + {{ range $_, $partition := . -}} + {{ PartitionVarName $partition.ID }}, + {{ end }} + } + + {{ range $_, $partition := . -}} + {{ $name := PartitionGetter $partition.ID -}} + // {{ $name }} returns the Resolver for {{ $partition.Name }}. + func {{ $name }}() Partition { + return {{ PartitionVarName $partition.ID }}.Partition() + } + var {{ PartitionVarName $partition.ID }} = {{ template "gocode Partition" $partition }} + {{ end }} +{{ end }} + +{{ define "default partitions" }} + func DefaultPartitions() []Partition { + return []partition{ + {{ range $_, $partition := . -}} + // {{ ToSymbol $partition.ID}}Partition(), + {{ end }} + } + } +{{ end }} + +{{ define "gocode Partition" -}} +partition{ + {{ StringIfSet "ID: %q,\n" .ID -}} + {{ StringIfSet "Name: %q,\n" .Name -}} + {{ StringIfSet "DNSSuffix: %q,\n" .DNSSuffix -}} + RegionRegex: {{ template "gocode RegionRegex" .RegionRegex }}, + {{ if EndpointIsSet .Defaults -}} + Defaults: {{ template "gocode Endpoint" .Defaults }}, + {{- end }} + Regions: {{ template "gocode Regions" .Regions }}, + Services: {{ template "gocode Services" .Services }}, +} +{{- end }} + +{{ define "gocode RegionRegex" -}} +regionRegex{ + Regexp: func() *regexp.Regexp{ + reg, _ := regexp.Compile({{ QuoteString .Regexp.String }}) + return reg + }(), +} +{{- end }} + +{{ define "gocode Regions" -}} +regions{ + {{ range $id, $region := . -}} + "{{ $id }}": {{ template "gocode Region" $region }}, + {{ end -}} +} +{{- end }} + +{{ define "gocode Region" -}} +region{ + {{ StringIfSet "Description: %q,\n" .Description -}} +} +{{- end }} + +{{ define "gocode Services" -}} +services{ + {{ range $id, $service := . -}} + "{{ $id }}": {{ template "gocode Service" $service }}, + {{ end }} +} +{{- end }} + +{{ define "gocode Service" -}} +service{ + {{ StringIfSet "PartitionEndpoint: %q,\n" .PartitionEndpoint -}} + {{ BoxedBoolIfSet "IsRegionalized: %s,\n" .IsRegionalized -}} + {{ if EndpointIsSet .Defaults -}} + Defaults: {{ template "gocode Endpoint" .Defaults -}}, + {{- end }} + {{ if .Endpoints -}} + Endpoints: {{ template "gocode Endpoints" .Endpoints }}, + {{- end }} +} +{{- end }} + +{{ define "gocode Endpoints" -}} +endpoints{ + {{ range $id, $endpoint := . -}} + "{{ $id }}": {{ template "gocode Endpoint" $endpoint }}, + {{ end }} +} +{{- end }} + +{{ define "gocode Endpoint" -}} +endpoint{ + {{ StringIfSet "Hostname: %q,\n" .Hostname -}} + {{ StringIfSet "SSLCommonName: %q,\n" .SSLCommonName -}} + {{ StringSliceIfSet "Protocols: []string{%s},\n" .Protocols -}} + {{ StringSliceIfSet "SignatureVersions: []string{%s},\n" .SignatureVersions -}} + {{ if or .CredentialScope.Region .CredentialScope.Service -}} + CredentialScope: credentialScope{ + {{ StringIfSet "Region: %q,\n" .CredentialScope.Region -}} + {{ StringIfSet "Service: %q,\n" .CredentialScope.Service -}} + }, + {{- end }} + {{ BoxedBoolIfSet "HasDualStack: %s,\n" .HasDualStack -}} + {{ StringIfSet "DualStackHostname: %q,\n" .DualStackHostname -}} + +} +{{- end }} +` diff --git a/vendor/github.com/aws/aws-sdk-go/aws/errors.go b/vendor/github.com/aws/aws-sdk-go/aws/errors.go new file mode 100644 index 000000000..576636168 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/errors.go @@ -0,0 +1,17 @@ +package aws + +import "github.com/aws/aws-sdk-go/aws/awserr" + +var ( + // ErrMissingRegion is an error that is returned if region configuration is + // not found. + // + // @readonly + ErrMissingRegion = awserr.New("MissingRegion", "could not find region configuration", nil) + + // ErrMissingEndpoint is an error that is returned if an endpoint cannot be + // resolved for a service. + // + // @readonly + ErrMissingEndpoint = awserr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil) +) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go b/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go new file mode 100644 index 000000000..91a6f277a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go @@ -0,0 +1,12 @@ +package aws + +// JSONValue is a representation of a grab bag type that will be marshaled +// into a json string. This type can be used just like any other map. +// +// Example: +// +// values := aws.JSONValue{ +// "Foo": "Bar", +// } +// values["Baz"] = "Qux" +type JSONValue map[string]interface{} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/logger.go new file mode 100644 index 000000000..6ed15b2ec --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/logger.go @@ -0,0 +1,118 @@ +package aws + +import ( + "log" + "os" +) + +// A LogLevelType defines the level logging should be performed at. Used to instruct +// the SDK which statements should be logged. +type LogLevelType uint + +// LogLevel returns the pointer to a LogLevel. Should be used to workaround +// not being able to take the address of a non-composite literal. +func LogLevel(l LogLevelType) *LogLevelType { + return &l +} + +// Value returns the LogLevel value or the default value LogOff if the LogLevel +// is nil. Safe to use on nil value LogLevelTypes. +func (l *LogLevelType) Value() LogLevelType { + if l != nil { + return *l + } + return LogOff +} + +// Matches returns true if the v LogLevel is enabled by this LogLevel. Should be +// used with logging sub levels. Is safe to use on nil value LogLevelTypes. If +// LogLevel is nil, will default to LogOff comparison. +func (l *LogLevelType) Matches(v LogLevelType) bool { + c := l.Value() + return c&v == v +} + +// AtLeast returns true if this LogLevel is at least high enough to satisfies v. +// Is safe to use on nil value LogLevelTypes. If LogLevel is nil, will default +// to LogOff comparison. +func (l *LogLevelType) AtLeast(v LogLevelType) bool { + c := l.Value() + return c >= v +} + +const ( + // LogOff states that no logging should be performed by the SDK. This is the + // default state of the SDK, and should be use to disable all logging. + LogOff LogLevelType = iota * 0x1000 + + // LogDebug state that debug output should be logged by the SDK. This should + // be used to inspect request made and responses received. + LogDebug +) + +// Debug Logging Sub Levels +const ( + // LogDebugWithSigning states that the SDK should log request signing and + // presigning events. This should be used to log the signing details of + // requests for debugging. Will also enable LogDebug. + LogDebugWithSigning LogLevelType = LogDebug | (1 << iota) + + // LogDebugWithHTTPBody states the SDK should log HTTP request and response + // HTTP bodys in addition to the headers and path. This should be used to + // see the body content of requests and responses made while using the SDK + // Will also enable LogDebug. + LogDebugWithHTTPBody + + // LogDebugWithRequestRetries states the SDK should log when service requests will + // be retried. This should be used to log when you want to log when service + // requests are being retried. Will also enable LogDebug. + LogDebugWithRequestRetries + + // LogDebugWithRequestErrors states the SDK should log when service requests fail + // to build, send, validate, or unmarshal. + LogDebugWithRequestErrors + + // LogDebugWithEventStreamBody states the SDK should log EventStream + // request and response bodys. This should be used to log the EventStream + // wire unmarshaled message content of requests and responses made while + // using the SDK Will also enable LogDebug. + LogDebugWithEventStreamBody +) + +// A Logger is a minimalistic interface for the SDK to log messages to. Should +// be used to provide custom logging writers for the SDK to use. +type Logger interface { + Log(...interface{}) +} + +// A LoggerFunc is a convenience type to convert a function taking a variadic +// list of arguments and wrap it so the Logger interface can be used. +// +// Example: +// s3.New(sess, &aws.Config{Logger: aws.LoggerFunc(func(args ...interface{}) { +// fmt.Fprintln(os.Stdout, args...) +// })}) +type LoggerFunc func(...interface{}) + +// Log calls the wrapped function with the arguments provided +func (f LoggerFunc) Log(args ...interface{}) { + f(args...) +} + +// NewDefaultLogger returns a Logger which will write log messages to stdout, and +// use same formatting runes as the stdlib log.Logger +func NewDefaultLogger() Logger { + return &defaultLogger{ + logger: log.New(os.Stdout, "", log.LstdFlags), + } +} + +// A defaultLogger provides a minimalistic logger satisfying the Logger interface. +type defaultLogger struct { + logger *log.Logger +} + +// Log logs the parameters to the stdlib logger. See log.Println. +func (l defaultLogger) Log(args ...interface{}) { + l.logger.Println(args...) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go new file mode 100644 index 000000000..271da432c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go @@ -0,0 +1,19 @@ +// +build !appengine,!plan9 + +package request + +import ( + "net" + "os" + "syscall" +) + +func isErrConnectionReset(err error) bool { + if opErr, ok := err.(*net.OpError); ok { + if sysErr, ok := opErr.Err.(*os.SyscallError); ok { + return sysErr.Err == syscall.ECONNRESET + } + } + + return false +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_other.go b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_other.go new file mode 100644 index 000000000..daf9eca43 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_other.go @@ -0,0 +1,11 @@ +// +build appengine plan9 + +package request + +import ( + "strings" +) + +func isErrConnectionReset(err error) bool { + return strings.Contains(err.Error(), "connection reset") +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go new file mode 100644 index 000000000..605a72d3c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go @@ -0,0 +1,274 @@ +package request + +import ( + "fmt" + "strings" +) + +// A Handlers provides a collection of request handlers for various +// stages of handling requests. +type Handlers struct { + Validate HandlerList + Build HandlerList + Sign HandlerList + Send HandlerList + ValidateResponse HandlerList + Unmarshal HandlerList + UnmarshalStream HandlerList + UnmarshalMeta HandlerList + UnmarshalError HandlerList + Retry HandlerList + AfterRetry HandlerList + Complete HandlerList +} + +// Copy returns of this handler's lists. +func (h *Handlers) Copy() Handlers { + return Handlers{ + Validate: h.Validate.copy(), + Build: h.Build.copy(), + Sign: h.Sign.copy(), + Send: h.Send.copy(), + ValidateResponse: h.ValidateResponse.copy(), + Unmarshal: h.Unmarshal.copy(), + UnmarshalStream: h.UnmarshalStream.copy(), + UnmarshalError: h.UnmarshalError.copy(), + UnmarshalMeta: h.UnmarshalMeta.copy(), + Retry: h.Retry.copy(), + AfterRetry: h.AfterRetry.copy(), + Complete: h.Complete.copy(), + } +} + +// Clear removes callback functions for all handlers +func (h *Handlers) Clear() { + h.Validate.Clear() + h.Build.Clear() + h.Send.Clear() + h.Sign.Clear() + h.Unmarshal.Clear() + h.UnmarshalStream.Clear() + h.UnmarshalMeta.Clear() + h.UnmarshalError.Clear() + h.ValidateResponse.Clear() + h.Retry.Clear() + h.AfterRetry.Clear() + h.Complete.Clear() +} + +// A HandlerListRunItem represents an entry in the HandlerList which +// is being run. +type HandlerListRunItem struct { + Index int + Handler NamedHandler + Request *Request +} + +// A HandlerList manages zero or more handlers in a list. +type HandlerList struct { + list []NamedHandler + + // Called after each request handler in the list is called. If set + // and the func returns true the HandlerList will continue to iterate + // over the request handlers. If false is returned the HandlerList + // will stop iterating. + // + // Should be used if extra logic to be performed between each handler + // in the list. This can be used to terminate a list's iteration + // based on a condition such as error like, HandlerListStopOnError. + // Or for logging like HandlerListLogItem. + AfterEachFn func(item HandlerListRunItem) bool +} + +// A NamedHandler is a struct that contains a name and function callback. +type NamedHandler struct { + Name string + Fn func(*Request) +} + +// copy creates a copy of the handler list. +func (l *HandlerList) copy() HandlerList { + n := HandlerList{ + AfterEachFn: l.AfterEachFn, + } + if len(l.list) == 0 { + return n + } + + n.list = append(make([]NamedHandler, 0, len(l.list)), l.list...) + return n +} + +// Clear clears the handler list. +func (l *HandlerList) Clear() { + l.list = l.list[0:0] +} + +// Len returns the number of handlers in the list. +func (l *HandlerList) Len() int { + return len(l.list) +} + +// PushBack pushes handler f to the back of the handler list. +func (l *HandlerList) PushBack(f func(*Request)) { + l.PushBackNamed(NamedHandler{"__anonymous", f}) +} + +// PushBackNamed pushes named handler f to the back of the handler list. +func (l *HandlerList) PushBackNamed(n NamedHandler) { + if cap(l.list) == 0 { + l.list = make([]NamedHandler, 0, 5) + } + l.list = append(l.list, n) +} + +// PushFront pushes handler f to the front of the handler list. +func (l *HandlerList) PushFront(f func(*Request)) { + l.PushFrontNamed(NamedHandler{"__anonymous", f}) +} + +// PushFrontNamed pushes named handler f to the front of the handler list. +func (l *HandlerList) PushFrontNamed(n NamedHandler) { + if cap(l.list) == len(l.list) { + // Allocating new list required + l.list = append([]NamedHandler{n}, l.list...) + } else { + // Enough room to prepend into list. + l.list = append(l.list, NamedHandler{}) + copy(l.list[1:], l.list) + l.list[0] = n + } +} + +// Remove removes a NamedHandler n +func (l *HandlerList) Remove(n NamedHandler) { + l.RemoveByName(n.Name) +} + +// RemoveByName removes a NamedHandler by name. +func (l *HandlerList) RemoveByName(name string) { + for i := 0; i < len(l.list); i++ { + m := l.list[i] + if m.Name == name { + // Shift array preventing creating new arrays + copy(l.list[i:], l.list[i+1:]) + l.list[len(l.list)-1] = NamedHandler{} + l.list = l.list[:len(l.list)-1] + + // decrement list so next check to length is correct + i-- + } + } +} + +// SwapNamed will swap out any existing handlers with the same name as the +// passed in NamedHandler returning true if handlers were swapped. False is +// returned otherwise. +func (l *HandlerList) SwapNamed(n NamedHandler) (swapped bool) { + for i := 0; i < len(l.list); i++ { + if l.list[i].Name == n.Name { + l.list[i].Fn = n.Fn + swapped = true + } + } + + return swapped +} + +// Swap will swap out all handlers matching the name passed in. The matched +// handlers will be swapped in. True is returned if the handlers were swapped. +func (l *HandlerList) Swap(name string, replace NamedHandler) bool { + var swapped bool + + for i := 0; i < len(l.list); i++ { + if l.list[i].Name == name { + l.list[i] = replace + swapped = true + } + } + + return swapped +} + +// SetBackNamed will replace the named handler if it exists in the handler list. +// If the handler does not exist the handler will be added to the end of the list. +func (l *HandlerList) SetBackNamed(n NamedHandler) { + if !l.SwapNamed(n) { + l.PushBackNamed(n) + } +} + +// SetFrontNamed will replace the named handler if it exists in the handler list. +// If the handler does not exist the handler will be added to the beginning of +// the list. +func (l *HandlerList) SetFrontNamed(n NamedHandler) { + if !l.SwapNamed(n) { + l.PushFrontNamed(n) + } +} + +// Run executes all handlers in the list with a given request object. +func (l *HandlerList) Run(r *Request) { + for i, h := range l.list { + h.Fn(r) + item := HandlerListRunItem{ + Index: i, Handler: h, Request: r, + } + if l.AfterEachFn != nil && !l.AfterEachFn(item) { + return + } + } +} + +// HandlerListLogItem logs the request handler and the state of the +// request's Error value. Always returns true to continue iterating +// request handlers in a HandlerList. +func HandlerListLogItem(item HandlerListRunItem) bool { + if item.Request.Config.Logger == nil { + return true + } + item.Request.Config.Logger.Log("DEBUG: RequestHandler", + item.Index, item.Handler.Name, item.Request.Error) + + return true +} + +// HandlerListStopOnError returns false to stop the HandlerList iterating +// over request handlers if Request.Error is not nil. True otherwise +// to continue iterating. +func HandlerListStopOnError(item HandlerListRunItem) bool { + return item.Request.Error == nil +} + +// WithAppendUserAgent will add a string to the user agent prefixed with a +// single white space. +func WithAppendUserAgent(s string) Option { + return func(r *Request) { + r.Handlers.Build.PushBack(func(r2 *Request) { + AddToUserAgent(r, s) + }) + } +} + +// MakeAddToUserAgentHandler will add the name/version pair to the User-Agent request +// header. If the extra parameters are provided they will be added as metadata to the +// name/version pair resulting in the following format. +// "name/version (extra0; extra1; ...)" +// The user agent part will be concatenated with this current request's user agent string. +func MakeAddToUserAgentHandler(name, version string, extra ...string) func(*Request) { + ua := fmt.Sprintf("%s/%s", name, version) + if len(extra) > 0 { + ua += fmt.Sprintf(" (%s)", strings.Join(extra, "; ")) + } + return func(r *Request) { + AddToUserAgent(r, ua) + } +} + +// MakeAddToUserAgentFreeFormHandler adds the input to the User-Agent request header. +// The input string will be concatenated with the current request's user agent string. +func MakeAddToUserAgentFreeFormHandler(s string) func(*Request) { + return func(r *Request) { + AddToUserAgent(r, s) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go new file mode 100644 index 000000000..79f79602b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go @@ -0,0 +1,24 @@ +package request + +import ( + "io" + "net/http" + "net/url" +) + +func copyHTTPRequest(r *http.Request, body io.ReadCloser) *http.Request { + req := new(http.Request) + *req = *r + req.URL = &url.URL{} + *req.URL = *r.URL + req.Body = body + + req.Header = http.Header{} + for k, v := range r.Header { + for _, vv := range v { + req.Header.Add(k, vv) + } + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go new file mode 100644 index 000000000..b0c2ef4fe --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go @@ -0,0 +1,60 @@ +package request + +import ( + "io" + "sync" + + "github.com/aws/aws-sdk-go/internal/sdkio" +) + +// offsetReader is a thread-safe io.ReadCloser to prevent racing +// with retrying requests +type offsetReader struct { + buf io.ReadSeeker + lock sync.Mutex + closed bool +} + +func newOffsetReader(buf io.ReadSeeker, offset int64) *offsetReader { + reader := &offsetReader{} + buf.Seek(offset, sdkio.SeekStart) + + reader.buf = buf + return reader +} + +// Close will close the instance of the offset reader's access to +// the underlying io.ReadSeeker. +func (o *offsetReader) Close() error { + o.lock.Lock() + defer o.lock.Unlock() + o.closed = true + return nil +} + +// Read is a thread-safe read of the underlying io.ReadSeeker +func (o *offsetReader) Read(p []byte) (int, error) { + o.lock.Lock() + defer o.lock.Unlock() + + if o.closed { + return 0, io.EOF + } + + return o.buf.Read(p) +} + +// Seek is a thread-safe seeking operation. +func (o *offsetReader) Seek(offset int64, whence int) (int64, error) { + o.lock.Lock() + defer o.lock.Unlock() + + return o.buf.Seek(offset, whence) +} + +// CloseAndCopy will return a new offsetReader with a copy of the old buffer +// and close the old buffer. +func (o *offsetReader) CloseAndCopy(offset int64) *offsetReader { + o.Close() + return newOffsetReader(o.buf, offset) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go new file mode 100644 index 000000000..75f0fe077 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go @@ -0,0 +1,657 @@ +package request + +import ( + "bytes" + "fmt" + "io" + "net" + "net/http" + "net/url" + "reflect" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/internal/sdkio" +) + +const ( + // ErrCodeSerialization is the serialization error code that is received + // during protocol unmarshaling. + ErrCodeSerialization = "SerializationError" + + // ErrCodeRead is an error that is returned during HTTP reads. + ErrCodeRead = "ReadError" + + // ErrCodeResponseTimeout is the connection timeout error that is received + // during body reads. + ErrCodeResponseTimeout = "ResponseTimeout" + + // ErrCodeInvalidPresignExpire is returned when the expire time provided to + // presign is invalid + ErrCodeInvalidPresignExpire = "InvalidPresignExpireError" + + // CanceledErrorCode is the error code that will be returned by an + // API request that was canceled. Requests given a aws.Context may + // return this error when canceled. + CanceledErrorCode = "RequestCanceled" +) + +// A Request is the service request to be made. +type Request struct { + Config aws.Config + ClientInfo metadata.ClientInfo + Handlers Handlers + + Retryer + AttemptTime time.Time + Time time.Time + Operation *Operation + HTTPRequest *http.Request + HTTPResponse *http.Response + Body io.ReadSeeker + BodyStart int64 // offset from beginning of Body that the request body starts + Params interface{} + Error error + Data interface{} + RequestID string + RetryCount int + Retryable *bool + RetryDelay time.Duration + NotHoist bool + SignedHeaderVals http.Header + LastSignedAt time.Time + DisableFollowRedirects bool + + // A value greater than 0 instructs the request to be signed as Presigned URL + // You should not set this field directly. Instead use Request's + // Presign or PresignRequest methods. + ExpireTime time.Duration + + context aws.Context + + built bool + + // Need to persist an intermediate body between the input Body and HTTP + // request body because the HTTP Client's transport can maintain a reference + // to the HTTP request's body after the client has returned. This value is + // safe to use concurrently and wrap the input Body for each HTTP request. + safeBody *offsetReader +} + +// An Operation is the service API operation to be made. +type Operation struct { + Name string + HTTPMethod string + HTTPPath string + *Paginator + + BeforePresignFn func(r *Request) error +} + +// New returns a new Request pointer for the service API +// operation and parameters. +// +// Params is any value of input parameters to be the request payload. +// Data is pointer value to an object which the request's response +// payload will be deserialized to. +func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers, + retryer Retryer, operation *Operation, params interface{}, data interface{}) *Request { + + method := operation.HTTPMethod + if method == "" { + method = "POST" + } + + httpReq, _ := http.NewRequest(method, "", nil) + + var err error + httpReq.URL, err = url.Parse(clientInfo.Endpoint + operation.HTTPPath) + if err != nil { + httpReq.URL = &url.URL{} + err = awserr.New("InvalidEndpointURL", "invalid endpoint uri", err) + } + + SanitizeHostForHeader(httpReq) + + r := &Request{ + Config: cfg, + ClientInfo: clientInfo, + Handlers: handlers.Copy(), + + Retryer: retryer, + AttemptTime: time.Now(), + Time: time.Now(), + ExpireTime: 0, + Operation: operation, + HTTPRequest: httpReq, + Body: nil, + Params: params, + Error: err, + Data: data, + } + r.SetBufferBody([]byte{}) + + return r +} + +// A Option is a functional option that can augment or modify a request when +// using a WithContext API operation method. +type Option func(*Request) + +// WithGetResponseHeader builds a request Option which will retrieve a single +// header value from the HTTP Response. If there are multiple values for the +// header key use WithGetResponseHeaders instead to access the http.Header +// map directly. The passed in val pointer must be non-nil. +// +// This Option can be used multiple times with a single API operation. +// +// var id2, versionID string +// svc.PutObjectWithContext(ctx, params, +// request.WithGetResponseHeader("x-amz-id-2", &id2), +// request.WithGetResponseHeader("x-amz-version-id", &versionID), +// ) +func WithGetResponseHeader(key string, val *string) Option { + return func(r *Request) { + r.Handlers.Complete.PushBack(func(req *Request) { + *val = req.HTTPResponse.Header.Get(key) + }) + } +} + +// WithGetResponseHeaders builds a request Option which will retrieve the +// headers from the HTTP response and assign them to the passed in headers +// variable. The passed in headers pointer must be non-nil. +// +// var headers http.Header +// svc.PutObjectWithContext(ctx, params, request.WithGetResponseHeaders(&headers)) +func WithGetResponseHeaders(headers *http.Header) Option { + return func(r *Request) { + r.Handlers.Complete.PushBack(func(req *Request) { + *headers = req.HTTPResponse.Header + }) + } +} + +// WithLogLevel is a request option that will set the request to use a specific +// log level when the request is made. +// +// svc.PutObjectWithContext(ctx, params, request.WithLogLevel(aws.LogDebugWithHTTPBody) +func WithLogLevel(l aws.LogLevelType) Option { + return func(r *Request) { + r.Config.LogLevel = aws.LogLevel(l) + } +} + +// ApplyOptions will apply each option to the request calling them in the order +// the were provided. +func (r *Request) ApplyOptions(opts ...Option) { + for _, opt := range opts { + opt(r) + } +} + +// Context will always returns a non-nil context. If Request does not have a +// context aws.BackgroundContext will be returned. +func (r *Request) Context() aws.Context { + if r.context != nil { + return r.context + } + return aws.BackgroundContext() +} + +// SetContext adds a Context to the current request that can be used to cancel +// a in-flight request. The Context value must not be nil, or this method will +// panic. +// +// Unlike http.Request.WithContext, SetContext does not return a copy of the +// Request. It is not safe to use use a single Request value for multiple +// requests. A new Request should be created for each API operation request. +// +// Go 1.6 and below: +// The http.Request's Cancel field will be set to the Done() value of +// the context. This will overwrite the Cancel field's value. +// +// Go 1.7 and above: +// The http.Request.WithContext will be used to set the context on the underlying +// http.Request. This will create a shallow copy of the http.Request. The SDK +// may create sub contexts in the future for nested requests such as retries. +func (r *Request) SetContext(ctx aws.Context) { + if ctx == nil { + panic("context cannot be nil") + } + setRequestContext(r, ctx) +} + +// WillRetry returns if the request's can be retried. +func (r *Request) WillRetry() bool { + if !aws.IsReaderSeekable(r.Body) && r.HTTPRequest.Body != NoBody { + return false + } + return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries() +} + +// ParamsFilled returns if the request's parameters have been populated +// and the parameters are valid. False is returned if no parameters are +// provided or invalid. +func (r *Request) ParamsFilled() bool { + return r.Params != nil && reflect.ValueOf(r.Params).Elem().IsValid() +} + +// DataFilled returns true if the request's data for response deserialization +// target has been set and is a valid. False is returned if data is not +// set, or is invalid. +func (r *Request) DataFilled() bool { + return r.Data != nil && reflect.ValueOf(r.Data).Elem().IsValid() +} + +// SetBufferBody will set the request's body bytes that will be sent to +// the service API. +func (r *Request) SetBufferBody(buf []byte) { + r.SetReaderBody(bytes.NewReader(buf)) +} + +// SetStringBody sets the body of the request to be backed by a string. +func (r *Request) SetStringBody(s string) { + r.SetReaderBody(strings.NewReader(s)) +} + +// SetReaderBody will set the request's body reader. +func (r *Request) SetReaderBody(reader io.ReadSeeker) { + r.Body = reader + r.BodyStart, _ = reader.Seek(0, sdkio.SeekCurrent) // Get the Bodies current offset. + r.ResetBody() +} + +// Presign returns the request's signed URL. Error will be returned +// if the signing fails. +// +// It is invalid to create a presigned URL with a expire duration 0 or less. An +// error is returned if expire duration is 0 or less. +func (r *Request) Presign(expire time.Duration) (string, error) { + r = r.copy() + + // Presign requires all headers be hoisted. There is no way to retrieve + // the signed headers not hoisted without this. Making the presigned URL + // useless. + r.NotHoist = false + + u, _, err := getPresignedURL(r, expire) + return u, err +} + +// PresignRequest behaves just like presign, with the addition of returning a +// set of headers that were signed. +// +// It is invalid to create a presigned URL with a expire duration 0 or less. An +// error is returned if expire duration is 0 or less. +// +// Returns the URL string for the API operation with signature in the query string, +// and the HTTP headers that were included in the signature. These headers must +// be included in any HTTP request made with the presigned URL. +// +// To prevent hoisting any headers to the query string set NotHoist to true on +// this Request value prior to calling PresignRequest. +func (r *Request) PresignRequest(expire time.Duration) (string, http.Header, error) { + r = r.copy() + return getPresignedURL(r, expire) +} + +// IsPresigned returns true if the request represents a presigned API url. +func (r *Request) IsPresigned() bool { + return r.ExpireTime != 0 +} + +func getPresignedURL(r *Request, expire time.Duration) (string, http.Header, error) { + if expire <= 0 { + return "", nil, awserr.New( + ErrCodeInvalidPresignExpire, + "presigned URL requires an expire duration greater than 0", + nil, + ) + } + + r.ExpireTime = expire + + if r.Operation.BeforePresignFn != nil { + if err := r.Operation.BeforePresignFn(r); err != nil { + return "", nil, err + } + } + + if err := r.Sign(); err != nil { + return "", nil, err + } + + return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil +} + +func debugLogReqError(r *Request, stage string, retrying bool, err error) { + if !r.Config.LogLevel.Matches(aws.LogDebugWithRequestErrors) { + return + } + + retryStr := "not retrying" + if retrying { + retryStr = "will retry" + } + + r.Config.Logger.Log(fmt.Sprintf("DEBUG: %s %s/%s failed, %s, error %v", + stage, r.ClientInfo.ServiceName, r.Operation.Name, retryStr, err)) +} + +// Build will build the request's object so it can be signed and sent +// to the service. Build will also validate all the request's parameters. +// Any additional build Handlers set on this request will be run +// in the order they were set. +// +// The request will only be built once. Multiple calls to build will have +// no effect. +// +// If any Validate or Build errors occur the build will stop and the error +// which occurred will be returned. +func (r *Request) Build() error { + if !r.built { + r.Handlers.Validate.Run(r) + if r.Error != nil { + debugLogReqError(r, "Validate Request", false, r.Error) + return r.Error + } + r.Handlers.Build.Run(r) + if r.Error != nil { + debugLogReqError(r, "Build Request", false, r.Error) + return r.Error + } + r.built = true + } + + return r.Error +} + +// Sign will sign the request, returning error if errors are encountered. +// +// Sign will build the request prior to signing. All Sign Handlers will +// be executed in the order they were set. +func (r *Request) Sign() error { + r.Build() + if r.Error != nil { + debugLogReqError(r, "Build Request", false, r.Error) + return r.Error + } + + r.Handlers.Sign.Run(r) + return r.Error +} + +func (r *Request) getNextRequestBody() (io.ReadCloser, error) { + if r.safeBody != nil { + r.safeBody.Close() + } + + r.safeBody = newOffsetReader(r.Body, r.BodyStart) + + // Go 1.8 tightened and clarified the rules code needs to use when building + // requests with the http package. Go 1.8 removed the automatic detection + // of if the Request.Body was empty, or actually had bytes in it. The SDK + // always sets the Request.Body even if it is empty and should not actually + // be sent. This is incorrect. + // + // Go 1.8 did add a http.NoBody value that the SDK can use to tell the http + // client that the request really should be sent without a body. The + // Request.Body cannot be set to nil, which is preferable, because the + // field is exported and could introduce nil pointer dereferences for users + // of the SDK if they used that field. + // + // Related golang/go#18257 + l, err := aws.SeekerLen(r.Body) + if err != nil { + return nil, awserr.New(ErrCodeSerialization, "failed to compute request body size", err) + } + + var body io.ReadCloser + if l == 0 { + body = NoBody + } else if l > 0 { + body = r.safeBody + } else { + // Hack to prevent sending bodies for methods where the body + // should be ignored by the server. Sending bodies on these + // methods without an associated ContentLength will cause the + // request to socket timeout because the server does not handle + // Transfer-Encoding: chunked bodies for these methods. + // + // This would only happen if a aws.ReaderSeekerCloser was used with + // a io.Reader that was not also an io.Seeker, or did not implement + // Len() method. + switch r.Operation.HTTPMethod { + case "GET", "HEAD", "DELETE": + body = NoBody + default: + body = r.safeBody + } + } + + return body, nil +} + +// GetBody will return an io.ReadSeeker of the Request's underlying +// input body with a concurrency safe wrapper. +func (r *Request) GetBody() io.ReadSeeker { + return r.safeBody +} + +// Send will send the request, returning error if errors are encountered. +// +// Send will sign the request prior to sending. All Send Handlers will +// be executed in the order they were set. +// +// Canceling a request is non-deterministic. If a request has been canceled, +// then the transport will choose, randomly, one of the state channels during +// reads or getting the connection. +// +// readLoop() and getConn(req *Request, cm connectMethod) +// https://github.com/golang/go/blob/master/src/net/http/transport.go +// +// Send will not close the request.Request's body. +func (r *Request) Send() error { + defer func() { + // Regardless of success or failure of the request trigger the Complete + // request handlers. + r.Handlers.Complete.Run(r) + }() + + for { + r.AttemptTime = time.Now() + if aws.BoolValue(r.Retryable) { + if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) { + r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d", + r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount)) + } + + // The previous http.Request will have a reference to the r.Body + // and the HTTP Client's Transport may still be reading from + // the request's body even though the Client's Do returned. + r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, nil) + r.ResetBody() + + // Closing response body to ensure that no response body is leaked + // between retry attempts. + if r.HTTPResponse != nil && r.HTTPResponse.Body != nil { + r.HTTPResponse.Body.Close() + } + } + + r.Sign() + if r.Error != nil { + return r.Error + } + + r.Retryable = nil + + r.Handlers.Send.Run(r) + if r.Error != nil { + if !shouldRetryCancel(r) { + return r.Error + } + + err := r.Error + r.Handlers.Retry.Run(r) + r.Handlers.AfterRetry.Run(r) + if r.Error != nil { + debugLogReqError(r, "Send Request", false, err) + return r.Error + } + debugLogReqError(r, "Send Request", true, err) + continue + } + r.Handlers.UnmarshalMeta.Run(r) + r.Handlers.ValidateResponse.Run(r) + if r.Error != nil { + r.Handlers.UnmarshalError.Run(r) + err := r.Error + + r.Handlers.Retry.Run(r) + r.Handlers.AfterRetry.Run(r) + if r.Error != nil { + debugLogReqError(r, "Validate Response", false, err) + return r.Error + } + debugLogReqError(r, "Validate Response", true, err) + continue + } + + r.Handlers.Unmarshal.Run(r) + if r.Error != nil { + err := r.Error + r.Handlers.Retry.Run(r) + r.Handlers.AfterRetry.Run(r) + if r.Error != nil { + debugLogReqError(r, "Unmarshal Response", false, err) + return r.Error + } + debugLogReqError(r, "Unmarshal Response", true, err) + continue + } + + break + } + + return nil +} + +// copy will copy a request which will allow for local manipulation of the +// request. +func (r *Request) copy() *Request { + req := &Request{} + *req = *r + req.Handlers = r.Handlers.Copy() + op := *r.Operation + req.Operation = &op + return req +} + +// AddToUserAgent adds the string to the end of the request's current user agent. +func AddToUserAgent(r *Request, s string) { + curUA := r.HTTPRequest.Header.Get("User-Agent") + if len(curUA) > 0 { + s = curUA + " " + s + } + r.HTTPRequest.Header.Set("User-Agent", s) +} + +func shouldRetryCancel(r *Request) bool { + awsErr, ok := r.Error.(awserr.Error) + timeoutErr := false + errStr := r.Error.Error() + if ok { + if awsErr.Code() == CanceledErrorCode { + return false + } + err := awsErr.OrigErr() + netErr, netOK := err.(net.Error) + timeoutErr = netOK && netErr.Temporary() + if urlErr, ok := err.(*url.Error); !timeoutErr && ok { + errStr = urlErr.Err.Error() + } + } + + // There can be two types of canceled errors here. + // The first being a net.Error and the other being an error. + // If the request was timed out, we want to continue the retry + // process. Otherwise, return the canceled error. + return timeoutErr || + (errStr != "net/http: request canceled" && + errStr != "net/http: request canceled while waiting for connection") + +} + +// SanitizeHostForHeader removes default port from host and updates request.Host +func SanitizeHostForHeader(r *http.Request) { + host := getHost(r) + port := portOnly(host) + if port != "" && isDefaultPort(r.URL.Scheme, port) { + r.Host = stripPort(host) + } +} + +// Returns host from request +func getHost(r *http.Request) string { + if r.Host != "" { + return r.Host + } + + return r.URL.Host +} + +// Hostname returns u.Host, without any port number. +// +// If Host is an IPv6 literal with a port number, Hostname returns the +// IPv6 literal without the square brackets. IPv6 literals may include +// a zone identifier. +// +// Copied from the Go 1.8 standard library (net/url) +func stripPort(hostport string) string { + colon := strings.IndexByte(hostport, ':') + if colon == -1 { + return hostport + } + if i := strings.IndexByte(hostport, ']'); i != -1 { + return strings.TrimPrefix(hostport[:i], "[") + } + return hostport[:colon] +} + +// Port returns the port part of u.Host, without the leading colon. +// If u.Host doesn't contain a port, Port returns an empty string. +// +// Copied from the Go 1.8 standard library (net/url) +func portOnly(hostport string) string { + colon := strings.IndexByte(hostport, ':') + if colon == -1 { + return "" + } + if i := strings.Index(hostport, "]:"); i != -1 { + return hostport[i+len("]:"):] + } + if strings.Contains(hostport, "]") { + return "" + } + return hostport[colon+len(":"):] +} + +// Returns true if the specified URI is using the standard port +// (i.e. port 80 for HTTP URIs or 443 for HTTPS URIs) +func isDefaultPort(scheme, port string) bool { + if port == "" { + return true + } + + lowerCaseScheme := strings.ToLower(scheme) + if (lowerCaseScheme == "http" && port == "80") || (lowerCaseScheme == "https" && port == "443") { + return true + } + + return false +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go new file mode 100644 index 000000000..e36e468b7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go @@ -0,0 +1,39 @@ +// +build !go1.8 + +package request + +import "io" + +// NoBody is an io.ReadCloser with no bytes. Read always returns EOF +// and Close always returns nil. It can be used in an outgoing client +// request to explicitly signal that a request has zero bytes. +// An alternative, however, is to simply set Request.Body to nil. +// +// Copy of Go 1.8 NoBody type from net/http/http.go +type noBody struct{} + +func (noBody) Read([]byte) (int, error) { return 0, io.EOF } +func (noBody) Close() error { return nil } +func (noBody) WriteTo(io.Writer) (int64, error) { return 0, nil } + +// NoBody is an empty reader that will trigger the Go HTTP client to not include +// and body in the HTTP request. +var NoBody = noBody{} + +// ResetBody rewinds the request body back to its starting position, and +// sets the HTTP Request body reference. When the body is read prior +// to being sent in the HTTP request it will need to be rewound. +// +// ResetBody will automatically be called by the SDK's build handler, but if +// the request is being used directly ResetBody must be called before the request +// is Sent. SetStringBody, SetBufferBody, and SetReaderBody will automatically +// call ResetBody. +func (r *Request) ResetBody() { + body, err := r.getNextRequestBody() + if err != nil { + r.Error = err + return + } + + r.HTTPRequest.Body = body +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go new file mode 100644 index 000000000..7c6a8000f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go @@ -0,0 +1,33 @@ +// +build go1.8 + +package request + +import ( + "net/http" +) + +// NoBody is a http.NoBody reader instructing Go HTTP client to not include +// and body in the HTTP request. +var NoBody = http.NoBody + +// ResetBody rewinds the request body back to its starting position, and +// sets the HTTP Request body reference. When the body is read prior +// to being sent in the HTTP request it will need to be rewound. +// +// ResetBody will automatically be called by the SDK's build handler, but if +// the request is being used directly ResetBody must be called before the request +// is Sent. SetStringBody, SetBufferBody, and SetReaderBody will automatically +// call ResetBody. +// +// Will also set the Go 1.8's http.Request.GetBody member to allow retrying +// PUT/POST redirects. +func (r *Request) ResetBody() { + body, err := r.getNextRequestBody() + if err != nil { + r.Error = err + return + } + + r.HTTPRequest.Body = body + r.HTTPRequest.GetBody = r.getNextRequestBody +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go new file mode 100644 index 000000000..a7365cd1e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go @@ -0,0 +1,14 @@ +// +build go1.7 + +package request + +import "github.com/aws/aws-sdk-go/aws" + +// setContext updates the Request to use the passed in context for cancellation. +// Context will also be used for request retry delay. +// +// Creates shallow copy of the http.Request with the WithContext method. +func setRequestContext(r *Request, ctx aws.Context) { + r.context = ctx + r.HTTPRequest = r.HTTPRequest.WithContext(ctx) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go new file mode 100644 index 000000000..307fa0705 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go @@ -0,0 +1,14 @@ +// +build !go1.7 + +package request + +import "github.com/aws/aws-sdk-go/aws" + +// setContext updates the Request to use the passed in context for cancellation. +// Context will also be used for request retry delay. +// +// Creates shallow copy of the http.Request with the WithContext method. +func setRequestContext(r *Request, ctx aws.Context) { + r.context = ctx + r.HTTPRequest.Cancel = ctx.Done() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go new file mode 100644 index 000000000..a633ed5ac --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go @@ -0,0 +1,264 @@ +package request + +import ( + "reflect" + "sync/atomic" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" +) + +// A Pagination provides paginating of SDK API operations which are paginatable. +// Generally you should not use this type directly, but use the "Pages" API +// operations method to automatically perform pagination for you. Such as, +// "S3.ListObjectsPages", and "S3.ListObjectsPagesWithContext" methods. +// +// Pagination differs from a Paginator type in that pagination is the type that +// does the pagination between API operations, and Paginator defines the +// configuration that will be used per page request. +// +// cont := true +// for p.Next() && cont { +// data := p.Page().(*s3.ListObjectsOutput) +// // process the page's data +// } +// return p.Err() +// +// See service client API operation Pages methods for examples how the SDK will +// use the Pagination type. +type Pagination struct { + // Function to return a Request value for each pagination request. + // Any configuration or handlers that need to be applied to the request + // prior to getting the next page should be done here before the request + // returned. + // + // NewRequest should always be built from the same API operations. It is + // undefined if different API operations are returned on subsequent calls. + NewRequest func() (*Request, error) + // EndPageOnSameToken, when enabled, will allow the paginator to stop on + // token that are the same as its previous tokens. + EndPageOnSameToken bool + + started bool + prevTokens []interface{} + nextTokens []interface{} + + err error + curPage interface{} +} + +// HasNextPage will return true if Pagination is able to determine that the API +// operation has additional pages. False will be returned if there are no more +// pages remaining. +// +// Will always return true if Next has not been called yet. +func (p *Pagination) HasNextPage() bool { + if !p.started { + return true + } + + hasNextPage := len(p.nextTokens) != 0 + if p.EndPageOnSameToken { + return hasNextPage && !awsutil.DeepEqual(p.nextTokens, p.prevTokens) + } + return hasNextPage +} + +// Err returns the error Pagination encountered when retrieving the next page. +func (p *Pagination) Err() error { + return p.err +} + +// Page returns the current page. Page should only be called after a successful +// call to Next. It is undefined what Page will return if Page is called after +// Next returns false. +func (p *Pagination) Page() interface{} { + return p.curPage +} + +// Next will attempt to retrieve the next page for the API operation. When a page +// is retrieved true will be returned. If the page cannot be retrieved, or there +// are no more pages false will be returned. +// +// Use the Page method to retrieve the current page data. The data will need +// to be cast to the API operation's output type. +// +// Use the Err method to determine if an error occurred if Page returns false. +func (p *Pagination) Next() bool { + if !p.HasNextPage() { + return false + } + + req, err := p.NewRequest() + if err != nil { + p.err = err + return false + } + + if p.started { + for i, intok := range req.Operation.InputTokens { + awsutil.SetValueAtPath(req.Params, intok, p.nextTokens[i]) + } + } + p.started = true + + err = req.Send() + if err != nil { + p.err = err + return false + } + + p.prevTokens = p.nextTokens + p.nextTokens = req.nextPageTokens() + p.curPage = req.Data + + return true +} + +// A Paginator is the configuration data that defines how an API operation +// should be paginated. This type is used by the API service models to define +// the generated pagination config for service APIs. +// +// The Pagination type is what provides iterating between pages of an API. It +// is only used to store the token metadata the SDK should use for performing +// pagination. +type Paginator struct { + InputTokens []string + OutputTokens []string + LimitToken string + TruncationToken string +} + +// nextPageTokens returns the tokens to use when asking for the next page of data. +func (r *Request) nextPageTokens() []interface{} { + if r.Operation.Paginator == nil { + return nil + } + if r.Operation.TruncationToken != "" { + tr, _ := awsutil.ValuesAtPath(r.Data, r.Operation.TruncationToken) + if len(tr) == 0 { + return nil + } + + switch v := tr[0].(type) { + case *bool: + if !aws.BoolValue(v) { + return nil + } + case bool: + if v == false { + return nil + } + } + } + + tokens := []interface{}{} + tokenAdded := false + for _, outToken := range r.Operation.OutputTokens { + vs, _ := awsutil.ValuesAtPath(r.Data, outToken) + if len(vs) == 0 { + tokens = append(tokens, nil) + continue + } + v := vs[0] + + switch tv := v.(type) { + case *string: + if len(aws.StringValue(tv)) == 0 { + tokens = append(tokens, nil) + continue + } + case string: + if len(tv) == 0 { + tokens = append(tokens, nil) + continue + } + } + + tokenAdded = true + tokens = append(tokens, v) + } + if !tokenAdded { + return nil + } + + return tokens +} + +// Ensure a deprecated item is only logged once instead of each time its used. +func logDeprecatedf(logger aws.Logger, flag *int32, msg string) { + if logger == nil { + return + } + if atomic.CompareAndSwapInt32(flag, 0, 1) { + logger.Log(msg) + } +} + +var ( + logDeprecatedHasNextPage int32 + logDeprecatedNextPage int32 + logDeprecatedEachPage int32 +) + +// HasNextPage returns true if this request has more pages of data available. +// +// Deprecated Use Pagination type for configurable pagination of API operations +func (r *Request) HasNextPage() bool { + logDeprecatedf(r.Config.Logger, &logDeprecatedHasNextPage, + "Request.HasNextPage deprecated. Use Pagination type for configurable pagination of API operations") + + return len(r.nextPageTokens()) > 0 +} + +// NextPage returns a new Request that can be executed to return the next +// page of result data. Call .Send() on this request to execute it. +// +// Deprecated Use Pagination type for configurable pagination of API operations +func (r *Request) NextPage() *Request { + logDeprecatedf(r.Config.Logger, &logDeprecatedNextPage, + "Request.NextPage deprecated. Use Pagination type for configurable pagination of API operations") + + tokens := r.nextPageTokens() + if len(tokens) == 0 { + return nil + } + + data := reflect.New(reflect.TypeOf(r.Data).Elem()).Interface() + nr := New(r.Config, r.ClientInfo, r.Handlers, r.Retryer, r.Operation, awsutil.CopyOf(r.Params), data) + for i, intok := range nr.Operation.InputTokens { + awsutil.SetValueAtPath(nr.Params, intok, tokens[i]) + } + return nr +} + +// EachPage iterates over each page of a paginated request object. The fn +// parameter should be a function with the following sample signature: +// +// func(page *T, lastPage bool) bool { +// return true // return false to stop iterating +// } +// +// Where "T" is the structure type matching the output structure of the given +// operation. For example, a request object generated by +// DynamoDB.ListTablesRequest() would expect to see dynamodb.ListTablesOutput +// as the structure "T". The lastPage value represents whether the page is +// the last page of data or not. The return value of this function should +// return true to keep iterating or false to stop. +// +// Deprecated Use Pagination type for configurable pagination of API operations +func (r *Request) EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error { + logDeprecatedf(r.Config.Logger, &logDeprecatedEachPage, + "Request.EachPage deprecated. Use Pagination type for configurable pagination of API operations") + + for page := r; page != nil; page = page.NextPage() { + if err := page.Send(); err != nil { + return err + } + if getNextPage := fn(page.Data, !page.HasNextPage()); !getNextPage { + return page.Error + } + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go new file mode 100644 index 000000000..7d5270298 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go @@ -0,0 +1,161 @@ +package request + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// Retryer is an interface to control retry logic for a given service. +// The default implementation used by most services is the client.DefaultRetryer +// structure, which contains basic retry logic using exponential backoff. +type Retryer interface { + RetryRules(*Request) time.Duration + ShouldRetry(*Request) bool + MaxRetries() int +} + +// WithRetryer sets a config Retryer value to the given Config returning it +// for chaining. +func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config { + cfg.Retryer = retryer + return cfg +} + +// retryableCodes is a collection of service response codes which are retry-able +// without any further action. +var retryableCodes = map[string]struct{}{ + "RequestError": {}, + "RequestTimeout": {}, + ErrCodeResponseTimeout: {}, + "RequestTimeoutException": {}, // Glacier's flavor of RequestTimeout +} + +var throttleCodes = map[string]struct{}{ + "ProvisionedThroughputExceededException": {}, + "Throttling": {}, + "ThrottlingException": {}, + "RequestLimitExceeded": {}, + "RequestThrottled": {}, + "TooManyRequestsException": {}, // Lambda functions + "PriorRequestNotComplete": {}, // Route53 +} + +// credsExpiredCodes is a collection of error codes which signify the credentials +// need to be refreshed. Expired tokens require refreshing of credentials, and +// resigning before the request can be retried. +var credsExpiredCodes = map[string]struct{}{ + "ExpiredToken": {}, + "ExpiredTokenException": {}, + "RequestExpired": {}, // EC2 Only +} + +func isCodeThrottle(code string) bool { + _, ok := throttleCodes[code] + return ok +} + +func isCodeRetryable(code string) bool { + if _, ok := retryableCodes[code]; ok { + return true + } + + return isCodeExpiredCreds(code) +} + +func isCodeExpiredCreds(code string) bool { + _, ok := credsExpiredCodes[code] + return ok +} + +var validParentCodes = map[string]struct{}{ + ErrCodeSerialization: {}, + ErrCodeRead: {}, +} + +type temporaryError interface { + Temporary() bool +} + +func isNestedErrorRetryable(parentErr awserr.Error) bool { + if parentErr == nil { + return false + } + + if _, ok := validParentCodes[parentErr.Code()]; !ok { + return false + } + + err := parentErr.OrigErr() + if err == nil { + return false + } + + if aerr, ok := err.(awserr.Error); ok { + return isCodeRetryable(aerr.Code()) + } + + if t, ok := err.(temporaryError); ok { + return t.Temporary() || isErrConnectionReset(err) + } + + return isErrConnectionReset(err) +} + +// IsErrorRetryable returns whether the error is retryable, based on its Code. +// Returns false if error is nil. +func IsErrorRetryable(err error) bool { + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + return isCodeRetryable(aerr.Code()) || isNestedErrorRetryable(aerr) + } + } + return false +} + +// IsErrorThrottle returns whether the error is to be throttled based on its code. +// Returns false if error is nil. +func IsErrorThrottle(err error) bool { + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + return isCodeThrottle(aerr.Code()) + } + } + return false +} + +// IsErrorExpiredCreds returns whether the error code is a credential expiry error. +// Returns false if error is nil. +func IsErrorExpiredCreds(err error) bool { + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + return isCodeExpiredCreds(aerr.Code()) + } + } + return false +} + +// IsErrorRetryable returns whether the error is retryable, based on its Code. +// Returns false if the request has no Error set. +// +// Alias for the utility function IsErrorRetryable +func (r *Request) IsErrorRetryable() bool { + return IsErrorRetryable(r.Error) +} + +// IsErrorThrottle returns whether the error is to be throttled based on its code. +// Returns false if the request has no Error set +// +// Alias for the utility function IsErrorThrottle +func (r *Request) IsErrorThrottle() bool { + return IsErrorThrottle(r.Error) +} + +// IsErrorExpired returns whether the error code is a credential expiry error. +// Returns false if the request has no Error set. +// +// Alias for the utility function IsErrorExpiredCreds +func (r *Request) IsErrorExpired() bool { + return IsErrorExpiredCreds(r.Error) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go new file mode 100644 index 000000000..09a44eb98 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go @@ -0,0 +1,94 @@ +package request + +import ( + "io" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +var timeoutErr = awserr.New( + ErrCodeResponseTimeout, + "read on body has reached the timeout limit", + nil, +) + +type readResult struct { + n int + err error +} + +// timeoutReadCloser will handle body reads that take too long. +// We will return a ErrReadTimeout error if a timeout occurs. +type timeoutReadCloser struct { + reader io.ReadCloser + duration time.Duration +} + +// Read will spin off a goroutine to call the reader's Read method. We will +// select on the timer's channel or the read's channel. Whoever completes first +// will be returned. +func (r *timeoutReadCloser) Read(b []byte) (int, error) { + timer := time.NewTimer(r.duration) + c := make(chan readResult, 1) + + go func() { + n, err := r.reader.Read(b) + timer.Stop() + c <- readResult{n: n, err: err} + }() + + select { + case data := <-c: + return data.n, data.err + case <-timer.C: + return 0, timeoutErr + } +} + +func (r *timeoutReadCloser) Close() error { + return r.reader.Close() +} + +const ( + // HandlerResponseTimeout is what we use to signify the name of the + // response timeout handler. + HandlerResponseTimeout = "ResponseTimeoutHandler" +) + +// adaptToResponseTimeoutError is a handler that will replace any top level error +// to a ErrCodeResponseTimeout, if its child is that. +func adaptToResponseTimeoutError(req *Request) { + if err, ok := req.Error.(awserr.Error); ok { + aerr, ok := err.OrigErr().(awserr.Error) + if ok && aerr.Code() == ErrCodeResponseTimeout { + req.Error = aerr + } + } +} + +// WithResponseReadTimeout is a request option that will wrap the body in a timeout read closer. +// This will allow for per read timeouts. If a timeout occurred, we will return the +// ErrCodeResponseTimeout. +// +// svc.PutObjectWithContext(ctx, params, request.WithTimeoutReadCloser(30 * time.Second) +func WithResponseReadTimeout(duration time.Duration) Option { + return func(r *Request) { + + var timeoutHandler = NamedHandler{ + HandlerResponseTimeout, + func(req *Request) { + req.HTTPResponse.Body = &timeoutReadCloser{ + reader: req.HTTPResponse.Body, + duration: duration, + } + }} + + // remove the handler so we are not stomping over any new durations. + r.Handlers.Send.RemoveByName(HandlerResponseTimeout) + r.Handlers.Send.PushBackNamed(timeoutHandler) + + r.Handlers.Unmarshal.PushBack(adaptToResponseTimeoutError) + r.Handlers.UnmarshalError.PushBack(adaptToResponseTimeoutError) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go new file mode 100644 index 000000000..bcfd947a3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go @@ -0,0 +1,261 @@ +package request + +import ( + "bytes" + "fmt" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +const ( + // InvalidParameterErrCode is the error code for invalid parameters errors + InvalidParameterErrCode = "InvalidParameter" + // ParamRequiredErrCode is the error code for required parameter errors + ParamRequiredErrCode = "ParamRequiredError" + // ParamMinValueErrCode is the error code for fields with too low of a + // number value. + ParamMinValueErrCode = "ParamMinValueError" + // ParamMinLenErrCode is the error code for fields without enough elements. + ParamMinLenErrCode = "ParamMinLenError" + + // ParamFormatErrCode is the error code for a field with invalid + // format or characters. + ParamFormatErrCode = "ParamFormatInvalidError" +) + +// Validator provides a way for types to perform validation logic on their +// input values that external code can use to determine if a type's values +// are valid. +type Validator interface { + Validate() error +} + +// An ErrInvalidParams provides wrapping of invalid parameter errors found when +// validating API operation input parameters. +type ErrInvalidParams struct { + // Context is the base context of the invalid parameter group. + Context string + errs []ErrInvalidParam +} + +// Add adds a new invalid parameter error to the collection of invalid +// parameters. The context of the invalid parameter will be updated to reflect +// this collection. +func (e *ErrInvalidParams) Add(err ErrInvalidParam) { + err.SetContext(e.Context) + e.errs = append(e.errs, err) +} + +// AddNested adds the invalid parameter errors from another ErrInvalidParams +// value into this collection. The nested errors will have their nested context +// updated and base context to reflect the merging. +// +// Use for nested validations errors. +func (e *ErrInvalidParams) AddNested(nestedCtx string, nested ErrInvalidParams) { + for _, err := range nested.errs { + err.SetContext(e.Context) + err.AddNestedContext(nestedCtx) + e.errs = append(e.errs, err) + } +} + +// Len returns the number of invalid parameter errors +func (e ErrInvalidParams) Len() int { + return len(e.errs) +} + +// Code returns the code of the error +func (e ErrInvalidParams) Code() string { + return InvalidParameterErrCode +} + +// Message returns the message of the error +func (e ErrInvalidParams) Message() string { + return fmt.Sprintf("%d validation error(s) found.", len(e.errs)) +} + +// Error returns the string formatted form of the invalid parameters. +func (e ErrInvalidParams) Error() string { + w := &bytes.Buffer{} + fmt.Fprintf(w, "%s: %s\n", e.Code(), e.Message()) + + for _, err := range e.errs { + fmt.Fprintf(w, "- %s\n", err.Message()) + } + + return w.String() +} + +// OrigErr returns the invalid parameters as a awserr.BatchedErrors value +func (e ErrInvalidParams) OrigErr() error { + return awserr.NewBatchError( + InvalidParameterErrCode, e.Message(), e.OrigErrs()) +} + +// OrigErrs returns a slice of the invalid parameters +func (e ErrInvalidParams) OrigErrs() []error { + errs := make([]error, len(e.errs)) + for i := 0; i < len(errs); i++ { + errs[i] = e.errs[i] + } + + return errs +} + +// An ErrInvalidParam represents an invalid parameter error type. +type ErrInvalidParam interface { + awserr.Error + + // Field name the error occurred on. + Field() string + + // SetContext updates the context of the error. + SetContext(string) + + // AddNestedContext updates the error's context to include a nested level. + AddNestedContext(string) +} + +type errInvalidParam struct { + context string + nestedContext string + field string + code string + msg string +} + +// Code returns the error code for the type of invalid parameter. +func (e *errInvalidParam) Code() string { + return e.code +} + +// Message returns the reason the parameter was invalid, and its context. +func (e *errInvalidParam) Message() string { + return fmt.Sprintf("%s, %s.", e.msg, e.Field()) +} + +// Error returns the string version of the invalid parameter error. +func (e *errInvalidParam) Error() string { + return fmt.Sprintf("%s: %s", e.code, e.Message()) +} + +// OrigErr returns nil, Implemented for awserr.Error interface. +func (e *errInvalidParam) OrigErr() error { + return nil +} + +// Field Returns the field and context the error occurred. +func (e *errInvalidParam) Field() string { + field := e.context + if len(field) > 0 { + field += "." + } + if len(e.nestedContext) > 0 { + field += fmt.Sprintf("%s.", e.nestedContext) + } + field += e.field + + return field +} + +// SetContext updates the base context of the error. +func (e *errInvalidParam) SetContext(ctx string) { + e.context = ctx +} + +// AddNestedContext prepends a context to the field's path. +func (e *errInvalidParam) AddNestedContext(ctx string) { + if len(e.nestedContext) == 0 { + e.nestedContext = ctx + } else { + e.nestedContext = fmt.Sprintf("%s.%s", ctx, e.nestedContext) + } + +} + +// An ErrParamRequired represents an required parameter error. +type ErrParamRequired struct { + errInvalidParam +} + +// NewErrParamRequired creates a new required parameter error. +func NewErrParamRequired(field string) *ErrParamRequired { + return &ErrParamRequired{ + errInvalidParam{ + code: ParamRequiredErrCode, + field: field, + msg: fmt.Sprintf("missing required field"), + }, + } +} + +// An ErrParamMinValue represents a minimum value parameter error. +type ErrParamMinValue struct { + errInvalidParam + min float64 +} + +// NewErrParamMinValue creates a new minimum value parameter error. +func NewErrParamMinValue(field string, min float64) *ErrParamMinValue { + return &ErrParamMinValue{ + errInvalidParam: errInvalidParam{ + code: ParamMinValueErrCode, + field: field, + msg: fmt.Sprintf("minimum field value of %v", min), + }, + min: min, + } +} + +// MinValue returns the field's require minimum value. +// +// float64 is returned for both int and float min values. +func (e *ErrParamMinValue) MinValue() float64 { + return e.min +} + +// An ErrParamMinLen represents a minimum length parameter error. +type ErrParamMinLen struct { + errInvalidParam + min int +} + +// NewErrParamMinLen creates a new minimum length parameter error. +func NewErrParamMinLen(field string, min int) *ErrParamMinLen { + return &ErrParamMinLen{ + errInvalidParam: errInvalidParam{ + code: ParamMinLenErrCode, + field: field, + msg: fmt.Sprintf("minimum field size of %v", min), + }, + min: min, + } +} + +// MinLen returns the field's required minimum length. +func (e *ErrParamMinLen) MinLen() int { + return e.min +} + +// An ErrParamFormat represents a invalid format parameter error. +type ErrParamFormat struct { + errInvalidParam + format string +} + +// NewErrParamFormat creates a new invalid format parameter error. +func NewErrParamFormat(field string, format, value string) *ErrParamFormat { + return &ErrParamFormat{ + errInvalidParam: errInvalidParam{ + code: ParamFormatErrCode, + field: field, + msg: fmt.Sprintf("format %v, %v", format, value), + }, + format: format, + } +} + +// Format returns the field's required format. +func (e *ErrParamFormat) Format() string { + return e.format +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go b/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go new file mode 100644 index 000000000..4601f883c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go @@ -0,0 +1,295 @@ +package request + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" +) + +// WaiterResourceNotReadyErrorCode is the error code returned by a waiter when +// the waiter's max attempts have been exhausted. +const WaiterResourceNotReadyErrorCode = "ResourceNotReady" + +// A WaiterOption is a function that will update the Waiter value's fields to +// configure the waiter. +type WaiterOption func(*Waiter) + +// WithWaiterMaxAttempts returns the maximum number of times the waiter should +// attempt to check the resource for the target state. +func WithWaiterMaxAttempts(max int) WaiterOption { + return func(w *Waiter) { + w.MaxAttempts = max + } +} + +// WaiterDelay will return a delay the waiter should pause between attempts to +// check the resource state. The passed in attempt is the number of times the +// Waiter has checked the resource state. +// +// Attempt is the number of attempts the Waiter has made checking the resource +// state. +type WaiterDelay func(attempt int) time.Duration + +// ConstantWaiterDelay returns a WaiterDelay that will always return a constant +// delay the waiter should use between attempts. It ignores the number of +// attempts made. +func ConstantWaiterDelay(delay time.Duration) WaiterDelay { + return func(attempt int) time.Duration { + return delay + } +} + +// WithWaiterDelay will set the Waiter to use the WaiterDelay passed in. +func WithWaiterDelay(delayer WaiterDelay) WaiterOption { + return func(w *Waiter) { + w.Delay = delayer + } +} + +// WithWaiterLogger returns a waiter option to set the logger a waiter +// should use to log warnings and errors to. +func WithWaiterLogger(logger aws.Logger) WaiterOption { + return func(w *Waiter) { + w.Logger = logger + } +} + +// WithWaiterRequestOptions returns a waiter option setting the request +// options for each request the waiter makes. Appends to waiter's request +// options already set. +func WithWaiterRequestOptions(opts ...Option) WaiterOption { + return func(w *Waiter) { + w.RequestOptions = append(w.RequestOptions, opts...) + } +} + +// A Waiter provides the functionality to perform a blocking call which will +// wait for a resource state to be satisfied by a service. +// +// This type should not be used directly. The API operations provided in the +// service packages prefixed with "WaitUntil" should be used instead. +type Waiter struct { + Name string + Acceptors []WaiterAcceptor + Logger aws.Logger + + MaxAttempts int + Delay WaiterDelay + + RequestOptions []Option + NewRequest func([]Option) (*Request, error) + SleepWithContext func(aws.Context, time.Duration) error +} + +// ApplyOptions updates the waiter with the list of waiter options provided. +func (w *Waiter) ApplyOptions(opts ...WaiterOption) { + for _, fn := range opts { + fn(w) + } +} + +// WaiterState are states the waiter uses based on WaiterAcceptor definitions +// to identify if the resource state the waiter is waiting on has occurred. +type WaiterState int + +// String returns the string representation of the waiter state. +func (s WaiterState) String() string { + switch s { + case SuccessWaiterState: + return "success" + case FailureWaiterState: + return "failure" + case RetryWaiterState: + return "retry" + default: + return "unknown waiter state" + } +} + +// States the waiter acceptors will use to identify target resource states. +const ( + SuccessWaiterState WaiterState = iota // waiter successful + FailureWaiterState // waiter failed + RetryWaiterState // waiter needs to be retried +) + +// WaiterMatchMode is the mode that the waiter will use to match the WaiterAcceptor +// definition's Expected attribute. +type WaiterMatchMode int + +// Modes the waiter will use when inspecting API response to identify target +// resource states. +const ( + PathAllWaiterMatch WaiterMatchMode = iota // match on all paths + PathWaiterMatch // match on specific path + PathAnyWaiterMatch // match on any path + PathListWaiterMatch // match on list of paths + StatusWaiterMatch // match on status code + ErrorWaiterMatch // match on error +) + +// String returns the string representation of the waiter match mode. +func (m WaiterMatchMode) String() string { + switch m { + case PathAllWaiterMatch: + return "pathAll" + case PathWaiterMatch: + return "path" + case PathAnyWaiterMatch: + return "pathAny" + case PathListWaiterMatch: + return "pathList" + case StatusWaiterMatch: + return "status" + case ErrorWaiterMatch: + return "error" + default: + return "unknown waiter match mode" + } +} + +// WaitWithContext will make requests for the API operation using NewRequest to +// build API requests. The request's response will be compared against the +// Waiter's Acceptors to determine the successful state of the resource the +// waiter is inspecting. +// +// The passed in context must not be nil. If it is nil a panic will occur. The +// Context will be used to cancel the waiter's pending requests and retry delays. +// Use aws.BackgroundContext if no context is available. +// +// The waiter will continue until the target state defined by the Acceptors, +// or the max attempts expires. +// +// Will return the WaiterResourceNotReadyErrorCode error code if the waiter's +// retryer ShouldRetry returns false. This normally will happen when the max +// wait attempts expires. +func (w Waiter) WaitWithContext(ctx aws.Context) error { + + for attempt := 1; ; attempt++ { + req, err := w.NewRequest(w.RequestOptions) + if err != nil { + waiterLogf(w.Logger, "unable to create request %v", err) + return err + } + req.Handlers.Build.PushBack(MakeAddToUserAgentFreeFormHandler("Waiter")) + err = req.Send() + + // See if any of the acceptors match the request's response, or error + for _, a := range w.Acceptors { + if matched, matchErr := a.match(w.Name, w.Logger, req, err); matched { + return matchErr + } + } + + // The Waiter should only check the resource state MaxAttempts times + // This is here instead of in the for loop above to prevent delaying + // unnecessary when the waiter will not retry. + if attempt == w.MaxAttempts { + break + } + + // Delay to wait before inspecting the resource again + delay := w.Delay(attempt) + if sleepFn := req.Config.SleepDelay; sleepFn != nil { + // Support SleepDelay for backwards compatibility and testing + sleepFn(delay) + } else { + sleepCtxFn := w.SleepWithContext + if sleepCtxFn == nil { + sleepCtxFn = aws.SleepWithContext + } + + if err := sleepCtxFn(ctx, delay); err != nil { + return awserr.New(CanceledErrorCode, "waiter context canceled", err) + } + } + } + + return awserr.New(WaiterResourceNotReadyErrorCode, "exceeded wait attempts", nil) +} + +// A WaiterAcceptor provides the information needed to wait for an API operation +// to complete. +type WaiterAcceptor struct { + State WaiterState + Matcher WaiterMatchMode + Argument string + Expected interface{} +} + +// match returns if the acceptor found a match with the passed in request +// or error. True is returned if the acceptor made a match, error is returned +// if there was an error attempting to perform the match. +func (a *WaiterAcceptor) match(name string, l aws.Logger, req *Request, err error) (bool, error) { + result := false + var vals []interface{} + + switch a.Matcher { + case PathAllWaiterMatch, PathWaiterMatch: + // Require all matches to be equal for result to match + vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) + if len(vals) == 0 { + break + } + result = true + for _, val := range vals { + if !awsutil.DeepEqual(val, a.Expected) { + result = false + break + } + } + case PathAnyWaiterMatch: + // Only a single match needs to equal for the result to match + vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) + for _, val := range vals { + if awsutil.DeepEqual(val, a.Expected) { + result = true + break + } + } + case PathListWaiterMatch: + // ignored matcher + case StatusWaiterMatch: + s := a.Expected.(int) + result = s == req.HTTPResponse.StatusCode + case ErrorWaiterMatch: + if aerr, ok := err.(awserr.Error); ok { + result = aerr.Code() == a.Expected.(string) + } + default: + waiterLogf(l, "WARNING: Waiter %s encountered unexpected matcher: %s", + name, a.Matcher) + } + + if !result { + // If there was no matching result found there is nothing more to do + // for this response, retry the request. + return false, nil + } + + switch a.State { + case SuccessWaiterState: + // waiter completed + return true, nil + case FailureWaiterState: + // Waiter failure state triggered + return true, awserr.New(WaiterResourceNotReadyErrorCode, + "failed waiting for successful resource state", err) + case RetryWaiterState: + // clear the error and retry the operation + return false, nil + default: + waiterLogf(l, "WARNING: Waiter %s encountered unexpected state: %s", + name, a.State) + return false, nil + } +} + +func waiterLogf(logger aws.Logger, msg string, args ...interface{}) { + if logger != nil { + logger.Log(fmt.Sprintf(msg, args...)) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go new file mode 100644 index 000000000..98d420fd6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go @@ -0,0 +1,273 @@ +/* +Package session provides configuration for the SDK's service clients. + +Sessions can be shared across all service clients that share the same base +configuration. The Session is built from the SDK's default configuration and +request handlers. + +Sessions should be cached when possible, because creating a new Session will +load all configuration values from the environment, and config files each time +the Session is created. Sharing the Session value across all of your service +clients will ensure the configuration is loaded the fewest number of times possible. + +Concurrency + +Sessions are safe to use concurrently as long as the Session is not being +modified. The SDK will not modify the Session once the Session has been created. +Creating service clients concurrently from a shared Session is safe. + +Sessions from Shared Config + +Sessions can be created using the method above that will only load the +additional config if the AWS_SDK_LOAD_CONFIG environment variable is set. +Alternatively you can explicitly create a Session with shared config enabled. +To do this you can use NewSessionWithOptions to configure how the Session will +be created. Using the NewSessionWithOptions with SharedConfigState set to +SharedConfigEnable will create the session as if the AWS_SDK_LOAD_CONFIG +environment variable was set. + +Creating Sessions + +When creating Sessions optional aws.Config values can be passed in that will +override the default, or loaded config values the Session is being created +with. This allows you to provide additional, or case based, configuration +as needed. + +By default NewSession will only load credentials from the shared credentials +file (~/.aws/credentials). If the AWS_SDK_LOAD_CONFIG environment variable is +set to a truthy value the Session will be created from the configuration +values from the shared config (~/.aws/config) and shared credentials +(~/.aws/credentials) files. See the section Sessions from Shared Config for +more information. + +Create a Session with the default config and request handlers. With credentials +region, and profile loaded from the environment and shared config automatically. +Requires the AWS_PROFILE to be set, or "default" is used. + + // Create Session + sess := session.Must(session.NewSession()) + + // Create a Session with a custom region + sess := session.Must(session.NewSession(&aws.Config{ + Region: aws.String("us-east-1"), + })) + + // Create a S3 client instance from a session + sess := session.Must(session.NewSession()) + + svc := s3.New(sess) + +Create Session With Option Overrides + +In addition to NewSession, Sessions can be created using NewSessionWithOptions. +This func allows you to control and override how the Session will be created +through code instead of being driven by environment variables only. + +Use NewSessionWithOptions when you want to provide the config profile, or +override the shared config state (AWS_SDK_LOAD_CONFIG). + + // Equivalent to session.NewSession() + sess := session.Must(session.NewSessionWithOptions(session.Options{ + // Options + })) + + // Specify profile to load for the session's config + sess := session.Must(session.NewSessionWithOptions(session.Options{ + Profile: "profile_name", + })) + + // Specify profile for config and region for requests + sess := session.Must(session.NewSessionWithOptions(session.Options{ + Config: aws.Config{Region: aws.String("us-east-1")}, + Profile: "profile_name", + })) + + // Force enable Shared Config support + sess := session.Must(session.NewSessionWithOptions(session.Options{ + SharedConfigState: session.SharedConfigEnable, + })) + +Adding Handlers + +You can add handlers to a session for processing HTTP requests. All service +clients that use the session inherit the handlers. For example, the following +handler logs every request and its payload made by a service client: + + // Create a session, and add additional handlers for all service + // clients created with the Session to inherit. Adds logging handler. + sess := session.Must(session.NewSession()) + + sess.Handlers.Send.PushFront(func(r *request.Request) { + // Log every request made and its payload + logger.Println("Request: %s/%s, Payload: %s", + r.ClientInfo.ServiceName, r.Operation, r.Params) + }) + +Deprecated "New" function + +The New session function has been deprecated because it does not provide good +way to return errors that occur when loading the configuration files and values. +Because of this, NewSession was created so errors can be retrieved when +creating a session fails. + +Shared Config Fields + +By default the SDK will only load the shared credentials file's (~/.aws/credentials) +credentials values, and all other config is provided by the environment variables, +SDK defaults, and user provided aws.Config values. + +If the AWS_SDK_LOAD_CONFIG environment variable is set, or SharedConfigEnable +option is used to create the Session the full shared config values will be +loaded. This includes credentials, region, and support for assume role. In +addition the Session will load its configuration from both the shared config +file (~/.aws/config) and shared credentials file (~/.aws/credentials). Both +files have the same format. + +If both config files are present the configuration from both files will be +read. The Session will be created from configuration values from the shared +credentials file (~/.aws/credentials) over those in the shared config file (~/.aws/config). + +Credentials are the values the SDK should use for authenticating requests with +AWS Services. They are from a configuration file will need to include both +aws_access_key_id and aws_secret_access_key must be provided together in the +same file to be considered valid. The values will be ignored if not a complete +group. aws_session_token is an optional field that can be provided if both of +the other two fields are also provided. + + aws_access_key_id = AKID + aws_secret_access_key = SECRET + aws_session_token = TOKEN + +Assume Role values allow you to configure the SDK to assume an IAM role using +a set of credentials provided in a config file via the source_profile field. +Both "role_arn" and "source_profile" are required. The SDK supports assuming +a role with MFA token if the session option AssumeRoleTokenProvider +is set. + + role_arn = arn:aws:iam:::role/ + source_profile = profile_with_creds + external_id = 1234 + mfa_serial = + role_session_name = session_name + +Region is the region the SDK should use for looking up AWS service endpoints +and signing requests. + + region = us-east-1 + +Assume Role with MFA token + +To create a session with support for assuming an IAM role with MFA set the +session option AssumeRoleTokenProvider to a function that will prompt for the +MFA token code when the SDK assumes the role and refreshes the role's credentials. +This allows you to configure the SDK via the shared config to assumea role +with MFA tokens. + +In order for the SDK to assume a role with MFA the SharedConfigState +session option must be set to SharedConfigEnable, or AWS_SDK_LOAD_CONFIG +environment variable set. + +The shared configuration instructs the SDK to assume an IAM role with MFA +when the mfa_serial configuration field is set in the shared config +(~/.aws/config) or shared credentials (~/.aws/credentials) file. + +If mfa_serial is set in the configuration, the SDK will assume the role, and +the AssumeRoleTokenProvider session option is not set an an error will +be returned when creating the session. + + sess := session.Must(session.NewSessionWithOptions(session.Options{ + AssumeRoleTokenProvider: stscreds.StdinTokenProvider, + })) + + // Create service client value configured for credentials + // from assumed role. + svc := s3.New(sess) + +To setup assume role outside of a session see the stscrds.AssumeRoleProvider +documentation. + +Environment Variables + +When a Session is created several environment variables can be set to adjust +how the SDK functions, and what configuration data it loads when creating +Sessions. All environment values are optional, but some values like credentials +require multiple of the values to set or the partial values will be ignored. +All environment variable values are strings unless otherwise noted. + +Environment configuration values. If set both Access Key ID and Secret Access +Key must be provided. Session Token and optionally also be provided, but is +not required. + + # Access Key ID + AWS_ACCESS_KEY_ID=AKID + AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set. + + # Secret Access Key + AWS_SECRET_ACCESS_KEY=SECRET + AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set. + + # Session Token + AWS_SESSION_TOKEN=TOKEN + +Region value will instruct the SDK where to make service API requests to. If is +not provided in the environment the region must be provided before a service +client request is made. + + AWS_REGION=us-east-1 + + # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set, + # and AWS_REGION is not also set. + AWS_DEFAULT_REGION=us-east-1 + +Profile name the SDK should load use when loading shared config from the +configuration files. If not provided "default" will be used as the profile name. + + AWS_PROFILE=my_profile + + # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set, + # and AWS_PROFILE is not also set. + AWS_DEFAULT_PROFILE=my_profile + +SDK load config instructs the SDK to load the shared config in addition to +shared credentials. This also expands the configuration loaded so the shared +credentials will have parity with the shared config file. This also enables +Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE +env values as well. + + AWS_SDK_LOAD_CONFIG=1 + +Shared credentials file path can be set to instruct the SDK to use an alternative +file for the shared credentials. If not set the file will be loaded from +$HOME/.aws/credentials on Linux/Unix based systems, and +%USERPROFILE%\.aws\credentials on Windows. + + AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials + +Shared config file path can be set to instruct the SDK to use an alternative +file for the shared config. If not set the file will be loaded from +$HOME/.aws/config on Linux/Unix based systems, and +%USERPROFILE%\.aws\config on Windows. + + AWS_CONFIG_FILE=$HOME/my_shared_config + +Path to a custom Credentials Authority (CA) bundle PEM file that the SDK +will use instead of the default system's root CA bundle. Use this only +if you want to replace the CA bundle the SDK uses for TLS requests. + + AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle + +Enabling this option will attempt to merge the Transport into the SDK's HTTP +client. If the client's Transport is not a http.Transport an error will be +returned. If the Transport's TLS config is set this option will cause the SDK +to overwrite the Transport's TLS config's RootCAs value. If the CA bundle file +contains multiple certificates all of them will be loaded. + +The Session option CustomCABundle is also available when creating sessions +to also enable this feature. CustomCABundle session option field has priority +over the AWS_CA_BUNDLE environment variable, and will be used if both are set. + +Setting a custom HTTPClient in the aws.Config options will override this setting. +To use this option and custom HTTP client, the HTTP client needs to be provided +when creating the session. Not the service client. +*/ +package session diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go new file mode 100644 index 000000000..82e04d76c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go @@ -0,0 +1,219 @@ +package session + +import ( + "os" + "strconv" + + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/defaults" +) + +// EnvProviderName provides a name of the provider when config is loaded from environment. +const EnvProviderName = "EnvConfigCredentials" + +// envConfig is a collection of environment values the SDK will read +// setup config from. All environment values are optional. But some values +// such as credentials require multiple values to be complete or the values +// will be ignored. +type envConfig struct { + // Environment configuration values. If set both Access Key ID and Secret Access + // Key must be provided. Session Token and optionally also be provided, but is + // not required. + // + // # Access Key ID + // AWS_ACCESS_KEY_ID=AKID + // AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set. + // + // # Secret Access Key + // AWS_SECRET_ACCESS_KEY=SECRET + // AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set. + // + // # Session Token + // AWS_SESSION_TOKEN=TOKEN + Creds credentials.Value + + // Region value will instruct the SDK where to make service API requests to. If is + // not provided in the environment the region must be provided before a service + // client request is made. + // + // AWS_REGION=us-east-1 + // + // # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set, + // # and AWS_REGION is not also set. + // AWS_DEFAULT_REGION=us-east-1 + Region string + + // Profile name the SDK should load use when loading shared configuration from the + // shared configuration files. If not provided "default" will be used as the + // profile name. + // + // AWS_PROFILE=my_profile + // + // # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set, + // # and AWS_PROFILE is not also set. + // AWS_DEFAULT_PROFILE=my_profile + Profile string + + // SDK load config instructs the SDK to load the shared config in addition to + // shared credentials. This also expands the configuration loaded from the shared + // credentials to have parity with the shared config file. This also enables + // Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE + // env values as well. + // + // AWS_SDK_LOAD_CONFIG=1 + EnableSharedConfig bool + + // Shared credentials file path can be set to instruct the SDK to use an alternate + // file for the shared credentials. If not set the file will be loaded from + // $HOME/.aws/credentials on Linux/Unix based systems, and + // %USERPROFILE%\.aws\credentials on Windows. + // + // AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials + SharedCredentialsFile string + + // Shared config file path can be set to instruct the SDK to use an alternate + // file for the shared config. If not set the file will be loaded from + // $HOME/.aws/config on Linux/Unix based systems, and + // %USERPROFILE%\.aws\config on Windows. + // + // AWS_CONFIG_FILE=$HOME/my_shared_config + SharedConfigFile string + + // Sets the path to a custom Credentials Authroity (CA) Bundle PEM file + // that the SDK will use instead of the system's root CA bundle. + // Only use this if you want to configure the SDK to use a custom set + // of CAs. + // + // Enabling this option will attempt to merge the Transport + // into the SDK's HTTP client. If the client's Transport is + // not a http.Transport an error will be returned. If the + // Transport's TLS config is set this option will cause the + // SDK to overwrite the Transport's TLS config's RootCAs value. + // + // Setting a custom HTTPClient in the aws.Config options will override this setting. + // To use this option and custom HTTP client, the HTTP client needs to be provided + // when creating the session. Not the service client. + // + // AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle + CustomCABundle string + + csmEnabled string + CSMEnabled bool + CSMPort string + CSMClientID string +} + +var ( + csmEnabledEnvKey = []string{ + "AWS_CSM_ENABLED", + } + csmPortEnvKey = []string{ + "AWS_CSM_PORT", + } + csmClientIDEnvKey = []string{ + "AWS_CSM_CLIENT_ID", + } + credAccessEnvKey = []string{ + "AWS_ACCESS_KEY_ID", + "AWS_ACCESS_KEY", + } + credSecretEnvKey = []string{ + "AWS_SECRET_ACCESS_KEY", + "AWS_SECRET_KEY", + } + credSessionEnvKey = []string{ + "AWS_SESSION_TOKEN", + } + + regionEnvKeys = []string{ + "AWS_REGION", + "AWS_DEFAULT_REGION", // Only read if AWS_SDK_LOAD_CONFIG is also set + } + profileEnvKeys = []string{ + "AWS_PROFILE", + "AWS_DEFAULT_PROFILE", // Only read if AWS_SDK_LOAD_CONFIG is also set + } + sharedCredsFileEnvKey = []string{ + "AWS_SHARED_CREDENTIALS_FILE", + } + sharedConfigFileEnvKey = []string{ + "AWS_CONFIG_FILE", + } +) + +// loadEnvConfig retrieves the SDK's environment configuration. +// See `envConfig` for the values that will be retrieved. +// +// If the environment variable `AWS_SDK_LOAD_CONFIG` is set to a truthy value +// the shared SDK config will be loaded in addition to the SDK's specific +// configuration values. +func loadEnvConfig() envConfig { + enableSharedConfig, _ := strconv.ParseBool(os.Getenv("AWS_SDK_LOAD_CONFIG")) + return envConfigLoad(enableSharedConfig) +} + +// loadEnvSharedConfig retrieves the SDK's environment configuration, and the +// SDK shared config. See `envConfig` for the values that will be retrieved. +// +// Loads the shared configuration in addition to the SDK's specific configuration. +// This will load the same values as `loadEnvConfig` if the `AWS_SDK_LOAD_CONFIG` +// environment variable is set. +func loadSharedEnvConfig() envConfig { + return envConfigLoad(true) +} + +func envConfigLoad(enableSharedConfig bool) envConfig { + cfg := envConfig{} + + cfg.EnableSharedConfig = enableSharedConfig + + setFromEnvVal(&cfg.Creds.AccessKeyID, credAccessEnvKey) + setFromEnvVal(&cfg.Creds.SecretAccessKey, credSecretEnvKey) + setFromEnvVal(&cfg.Creds.SessionToken, credSessionEnvKey) + + // CSM environment variables + setFromEnvVal(&cfg.csmEnabled, csmEnabledEnvKey) + setFromEnvVal(&cfg.CSMPort, csmPortEnvKey) + setFromEnvVal(&cfg.CSMClientID, csmClientIDEnvKey) + cfg.CSMEnabled = len(cfg.csmEnabled) > 0 + + // Require logical grouping of credentials + if len(cfg.Creds.AccessKeyID) == 0 || len(cfg.Creds.SecretAccessKey) == 0 { + cfg.Creds = credentials.Value{} + } else { + cfg.Creds.ProviderName = EnvProviderName + } + + regionKeys := regionEnvKeys + profileKeys := profileEnvKeys + if !cfg.EnableSharedConfig { + regionKeys = regionKeys[:1] + profileKeys = profileKeys[:1] + } + + setFromEnvVal(&cfg.Region, regionKeys) + setFromEnvVal(&cfg.Profile, profileKeys) + + setFromEnvVal(&cfg.SharedCredentialsFile, sharedCredsFileEnvKey) + setFromEnvVal(&cfg.SharedConfigFile, sharedConfigFileEnvKey) + + if len(cfg.SharedCredentialsFile) == 0 { + cfg.SharedCredentialsFile = defaults.SharedCredentialsFilename() + } + if len(cfg.SharedConfigFile) == 0 { + cfg.SharedConfigFile = defaults.SharedConfigFilename() + } + + cfg.CustomCABundle = os.Getenv("AWS_CA_BUNDLE") + + return cfg +} + +func setFromEnvVal(dst *string, keys []string) { + for _, k := range keys { + if v := os.Getenv(k); len(v) > 0 { + *dst = v + break + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go new file mode 100644 index 000000000..51f305563 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go @@ -0,0 +1,628 @@ +package session + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/stscreds" + "github.com/aws/aws-sdk-go/aws/csm" + "github.com/aws/aws-sdk-go/aws/defaults" + "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/aws/aws-sdk-go/aws/request" +) + +// A Session provides a central location to create service clients from and +// store configurations and request handlers for those services. +// +// Sessions are safe to create service clients concurrently, but it is not safe +// to mutate the Session concurrently. +// +// The Session satisfies the service client's client.ConfigProvider. +type Session struct { + Config *aws.Config + Handlers request.Handlers +} + +// New creates a new instance of the handlers merging in the provided configs +// on top of the SDK's default configurations. Once the Session is created it +// can be mutated to modify the Config or Handlers. The Session is safe to be +// read concurrently, but it should not be written to concurrently. +// +// If the AWS_SDK_LOAD_CONFIG environment is set to a truthy value, the New +// method could now encounter an error when loading the configuration. When +// The environment variable is set, and an error occurs, New will return a +// session that will fail all requests reporting the error that occurred while +// loading the session. Use NewSession to get the error when creating the +// session. +// +// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value +// the shared config file (~/.aws/config) will also be loaded, in addition to +// the shared credentials file (~/.aws/credentials). Values set in both the +// shared config, and shared credentials will be taken from the shared +// credentials file. +// +// Deprecated: Use NewSession functions to create sessions instead. NewSession +// has the same functionality as New except an error can be returned when the +// func is called instead of waiting to receive an error until a request is made. +func New(cfgs ...*aws.Config) *Session { + // load initial config from environment + envCfg := loadEnvConfig() + + if envCfg.EnableSharedConfig { + var cfg aws.Config + cfg.MergeIn(cfgs...) + s, err := NewSessionWithOptions(Options{ + Config: cfg, + SharedConfigState: SharedConfigEnable, + }) + if err != nil { + // Old session.New expected all errors to be discovered when + // a request is made, and would report the errors then. This + // needs to be replicated if an error occurs while creating + // the session. + msg := "failed to create session with AWS_SDK_LOAD_CONFIG enabled. " + + "Use session.NewSession to handle errors occurring during session creation." + + // Session creation failed, need to report the error and prevent + // any requests from succeeding. + s = &Session{Config: defaults.Config()} + s.Config.MergeIn(cfgs...) + s.Config.Logger.Log("ERROR:", msg, "Error:", err) + s.Handlers.Validate.PushBack(func(r *request.Request) { + r.Error = err + }) + } + + return s + } + + s := deprecatedNewSession(cfgs...) + if envCfg.CSMEnabled { + enableCSM(&s.Handlers, envCfg.CSMClientID, envCfg.CSMPort, s.Config.Logger) + } + + return s +} + +// NewSession returns a new Session created from SDK defaults, config files, +// environment, and user provided config files. Once the Session is created +// it can be mutated to modify the Config or Handlers. The Session is safe to +// be read concurrently, but it should not be written to concurrently. +// +// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value +// the shared config file (~/.aws/config) will also be loaded in addition to +// the shared credentials file (~/.aws/credentials). Values set in both the +// shared config, and shared credentials will be taken from the shared +// credentials file. Enabling the Shared Config will also allow the Session +// to be built with retrieving credentials with AssumeRole set in the config. +// +// See the NewSessionWithOptions func for information on how to override or +// control through code how the Session will be created. Such as specifying the +// config profile, and controlling if shared config is enabled or not. +func NewSession(cfgs ...*aws.Config) (*Session, error) { + opts := Options{} + opts.Config.MergeIn(cfgs...) + + return NewSessionWithOptions(opts) +} + +// SharedConfigState provides the ability to optionally override the state +// of the session's creation based on the shared config being enabled or +// disabled. +type SharedConfigState int + +const ( + // SharedConfigStateFromEnv does not override any state of the + // AWS_SDK_LOAD_CONFIG env var. It is the default value of the + // SharedConfigState type. + SharedConfigStateFromEnv SharedConfigState = iota + + // SharedConfigDisable overrides the AWS_SDK_LOAD_CONFIG env var value + // and disables the shared config functionality. + SharedConfigDisable + + // SharedConfigEnable overrides the AWS_SDK_LOAD_CONFIG env var value + // and enables the shared config functionality. + SharedConfigEnable +) + +// Options provides the means to control how a Session is created and what +// configuration values will be loaded. +// +type Options struct { + // Provides config values for the SDK to use when creating service clients + // and making API requests to services. Any value set in with this field + // will override the associated value provided by the SDK defaults, + // environment or config files where relevant. + // + // If not set, configuration values from from SDK defaults, environment, + // config will be used. + Config aws.Config + + // Overrides the config profile the Session should be created from. If not + // set the value of the environment variable will be loaded (AWS_PROFILE, + // or AWS_DEFAULT_PROFILE if the Shared Config is enabled). + // + // If not set and environment variables are not set the "default" + // (DefaultSharedConfigProfile) will be used as the profile to load the + // session config from. + Profile string + + // Instructs how the Session will be created based on the AWS_SDK_LOAD_CONFIG + // environment variable. By default a Session will be created using the + // value provided by the AWS_SDK_LOAD_CONFIG environment variable. + // + // Setting this value to SharedConfigEnable or SharedConfigDisable + // will allow you to override the AWS_SDK_LOAD_CONFIG environment variable + // and enable or disable the shared config functionality. + SharedConfigState SharedConfigState + + // Ordered list of files the session will load configuration from. + // It will override environment variable AWS_SHARED_CREDENTIALS_FILE, AWS_CONFIG_FILE. + SharedConfigFiles []string + + // When the SDK's shared config is configured to assume a role with MFA + // this option is required in order to provide the mechanism that will + // retrieve the MFA token. There is no default value for this field. If + // it is not set an error will be returned when creating the session. + // + // This token provider will be called when ever the assumed role's + // credentials need to be refreshed. Within the context of service clients + // all sharing the same session the SDK will ensure calls to the token + // provider are atomic. When sharing a token provider across multiple + // sessions additional synchronization logic is needed to ensure the + // token providers do not introduce race conditions. It is recommend to + // share the session where possible. + // + // stscreds.StdinTokenProvider is a basic implementation that will prompt + // from stdin for the MFA token code. + // + // This field is only used if the shared configuration is enabled, and + // the config enables assume role wit MFA via the mfa_serial field. + AssumeRoleTokenProvider func() (string, error) + + // Reader for a custom Credentials Authority (CA) bundle in PEM format that + // the SDK will use instead of the default system's root CA bundle. Use this + // only if you want to replace the CA bundle the SDK uses for TLS requests. + // + // Enabling this option will attempt to merge the Transport into the SDK's HTTP + // client. If the client's Transport is not a http.Transport an error will be + // returned. If the Transport's TLS config is set this option will cause the SDK + // to overwrite the Transport's TLS config's RootCAs value. If the CA + // bundle reader contains multiple certificates all of them will be loaded. + // + // The Session option CustomCABundle is also available when creating sessions + // to also enable this feature. CustomCABundle session option field has priority + // over the AWS_CA_BUNDLE environment variable, and will be used if both are set. + CustomCABundle io.Reader +} + +// NewSessionWithOptions returns a new Session created from SDK defaults, config files, +// environment, and user provided config files. This func uses the Options +// values to configure how the Session is created. +// +// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value +// the shared config file (~/.aws/config) will also be loaded in addition to +// the shared credentials file (~/.aws/credentials). Values set in both the +// shared config, and shared credentials will be taken from the shared +// credentials file. Enabling the Shared Config will also allow the Session +// to be built with retrieving credentials with AssumeRole set in the config. +// +// // Equivalent to session.New +// sess := session.Must(session.NewSessionWithOptions(session.Options{})) +// +// // Specify profile to load for the session's config +// sess := session.Must(session.NewSessionWithOptions(session.Options{ +// Profile: "profile_name", +// })) +// +// // Specify profile for config and region for requests +// sess := session.Must(session.NewSessionWithOptions(session.Options{ +// Config: aws.Config{Region: aws.String("us-east-1")}, +// Profile: "profile_name", +// })) +// +// // Force enable Shared Config support +// sess := session.Must(session.NewSessionWithOptions(session.Options{ +// SharedConfigState: session.SharedConfigEnable, +// })) +func NewSessionWithOptions(opts Options) (*Session, error) { + var envCfg envConfig + if opts.SharedConfigState == SharedConfigEnable { + envCfg = loadSharedEnvConfig() + } else { + envCfg = loadEnvConfig() + } + + if len(opts.Profile) > 0 { + envCfg.Profile = opts.Profile + } + + switch opts.SharedConfigState { + case SharedConfigDisable: + envCfg.EnableSharedConfig = false + case SharedConfigEnable: + envCfg.EnableSharedConfig = true + } + + // Only use AWS_CA_BUNDLE if session option is not provided. + if len(envCfg.CustomCABundle) != 0 && opts.CustomCABundle == nil { + f, err := os.Open(envCfg.CustomCABundle) + if err != nil { + return nil, awserr.New("LoadCustomCABundleError", + "failed to open custom CA bundle PEM file", err) + } + defer f.Close() + opts.CustomCABundle = f + } + + return newSession(opts, envCfg, &opts.Config) +} + +// Must is a helper function to ensure the Session is valid and there was no +// error when calling a NewSession function. +// +// This helper is intended to be used in variable initialization to load the +// Session and configuration at startup. Such as: +// +// var sess = session.Must(session.NewSession()) +func Must(sess *Session, err error) *Session { + if err != nil { + panic(err) + } + + return sess +} + +func deprecatedNewSession(cfgs ...*aws.Config) *Session { + cfg := defaults.Config() + handlers := defaults.Handlers() + + // Apply the passed in configs so the configuration can be applied to the + // default credential chain + cfg.MergeIn(cfgs...) + if cfg.EndpointResolver == nil { + // An endpoint resolver is required for a session to be able to provide + // endpoints for service client configurations. + cfg.EndpointResolver = endpoints.DefaultResolver() + } + cfg.Credentials = defaults.CredChain(cfg, handlers) + + // Reapply any passed in configs to override credentials if set + cfg.MergeIn(cfgs...) + + s := &Session{ + Config: cfg, + Handlers: handlers, + } + + initHandlers(s) + return s +} + +func enableCSM(handlers *request.Handlers, clientID string, port string, logger aws.Logger) { + logger.Log("Enabling CSM") + if len(port) == 0 { + port = csm.DefaultPort + } + + r, err := csm.Start(clientID, "127.0.0.1:"+port) + if err != nil { + return + } + r.InjectHandlers(handlers) +} + +func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, error) { + cfg := defaults.Config() + handlers := defaults.Handlers() + + // Get a merged version of the user provided config to determine if + // credentials were. + userCfg := &aws.Config{} + userCfg.MergeIn(cfgs...) + + // Ordered config files will be loaded in with later files overwriting + // previous config file values. + var cfgFiles []string + if opts.SharedConfigFiles != nil { + cfgFiles = opts.SharedConfigFiles + } else { + cfgFiles = []string{envCfg.SharedConfigFile, envCfg.SharedCredentialsFile} + if !envCfg.EnableSharedConfig { + // The shared config file (~/.aws/config) is only loaded if instructed + // to load via the envConfig.EnableSharedConfig (AWS_SDK_LOAD_CONFIG). + cfgFiles = cfgFiles[1:] + } + } + + // Load additional config from file(s) + sharedCfg, err := loadSharedConfig(envCfg.Profile, cfgFiles) + if err != nil { + return nil, err + } + + if err := mergeConfigSrcs(cfg, userCfg, envCfg, sharedCfg, handlers, opts); err != nil { + return nil, err + } + + s := &Session{ + Config: cfg, + Handlers: handlers, + } + + initHandlers(s) + if envCfg.CSMEnabled { + enableCSM(&s.Handlers, envCfg.CSMClientID, envCfg.CSMPort, s.Config.Logger) + } + + // Setup HTTP client with custom cert bundle if enabled + if opts.CustomCABundle != nil { + if err := loadCustomCABundle(s, opts.CustomCABundle); err != nil { + return nil, err + } + } + + return s, nil +} + +func loadCustomCABundle(s *Session, bundle io.Reader) error { + var t *http.Transport + switch v := s.Config.HTTPClient.Transport.(type) { + case *http.Transport: + t = v + default: + if s.Config.HTTPClient.Transport != nil { + return awserr.New("LoadCustomCABundleError", + "unable to load custom CA bundle, HTTPClient's transport unsupported type", nil) + } + } + if t == nil { + t = &http.Transport{} + } + + p, err := loadCertPool(bundle) + if err != nil { + return err + } + if t.TLSClientConfig == nil { + t.TLSClientConfig = &tls.Config{} + } + t.TLSClientConfig.RootCAs = p + + s.Config.HTTPClient.Transport = t + + return nil +} + +func loadCertPool(r io.Reader) (*x509.CertPool, error) { + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, awserr.New("LoadCustomCABundleError", + "failed to read custom CA bundle PEM file", err) + } + + p := x509.NewCertPool() + if !p.AppendCertsFromPEM(b) { + return nil, awserr.New("LoadCustomCABundleError", + "failed to load custom CA bundle PEM file", err) + } + + return p, nil +} + +func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg sharedConfig, handlers request.Handlers, sessOpts Options) error { + // Merge in user provided configuration + cfg.MergeIn(userCfg) + + // Region if not already set by user + if len(aws.StringValue(cfg.Region)) == 0 { + if len(envCfg.Region) > 0 { + cfg.WithRegion(envCfg.Region) + } else if envCfg.EnableSharedConfig && len(sharedCfg.Region) > 0 { + cfg.WithRegion(sharedCfg.Region) + } + } + + // Configure credentials if not already set + if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil { + if len(envCfg.Creds.AccessKeyID) > 0 { + cfg.Credentials = credentials.NewStaticCredentialsFromCreds( + envCfg.Creds, + ) + } else if envCfg.EnableSharedConfig && len(sharedCfg.AssumeRole.RoleARN) > 0 && sharedCfg.AssumeRoleSource != nil { + cfgCp := *cfg + cfgCp.Credentials = credentials.NewStaticCredentialsFromCreds( + sharedCfg.AssumeRoleSource.Creds, + ) + if len(sharedCfg.AssumeRole.MFASerial) > 0 && sessOpts.AssumeRoleTokenProvider == nil { + // AssumeRole Token provider is required if doing Assume Role + // with MFA. + return AssumeRoleTokenProviderNotSetError{} + } + cfg.Credentials = stscreds.NewCredentials( + &Session{ + Config: &cfgCp, + Handlers: handlers.Copy(), + }, + sharedCfg.AssumeRole.RoleARN, + func(opt *stscreds.AssumeRoleProvider) { + opt.RoleSessionName = sharedCfg.AssumeRole.RoleSessionName + + // Assume role with external ID + if len(sharedCfg.AssumeRole.ExternalID) > 0 { + opt.ExternalID = aws.String(sharedCfg.AssumeRole.ExternalID) + } + + // Assume role with MFA + if len(sharedCfg.AssumeRole.MFASerial) > 0 { + opt.SerialNumber = aws.String(sharedCfg.AssumeRole.MFASerial) + opt.TokenProvider = sessOpts.AssumeRoleTokenProvider + } + }, + ) + } else if len(sharedCfg.Creds.AccessKeyID) > 0 { + cfg.Credentials = credentials.NewStaticCredentialsFromCreds( + sharedCfg.Creds, + ) + } else { + // Fallback to default credentials provider, include mock errors + // for the credential chain so user can identify why credentials + // failed to be retrieved. + cfg.Credentials = credentials.NewCredentials(&credentials.ChainProvider{ + VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), + Providers: []credentials.Provider{ + &credProviderError{Err: awserr.New("EnvAccessKeyNotFound", "failed to find credentials in the environment.", nil)}, + &credProviderError{Err: awserr.New("SharedCredsLoad", fmt.Sprintf("failed to load profile, %s.", envCfg.Profile), nil)}, + defaults.RemoteCredProvider(*cfg, handlers), + }, + }) + } + } + + return nil +} + +// AssumeRoleTokenProviderNotSetError is an error returned when creating a session when the +// MFAToken option is not set when shared config is configured load assume a +// role with an MFA token. +type AssumeRoleTokenProviderNotSetError struct{} + +// Code is the short id of the error. +func (e AssumeRoleTokenProviderNotSetError) Code() string { + return "AssumeRoleTokenProviderNotSetError" +} + +// Message is the description of the error +func (e AssumeRoleTokenProviderNotSetError) Message() string { + return fmt.Sprintf("assume role with MFA enabled, but AssumeRoleTokenProvider session option not set.") +} + +// OrigErr is the underlying error that caused the failure. +func (e AssumeRoleTokenProviderNotSetError) OrigErr() error { + return nil +} + +// Error satisfies the error interface. +func (e AssumeRoleTokenProviderNotSetError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", nil) +} + +type credProviderError struct { + Err error +} + +var emptyCreds = credentials.Value{} + +func (c credProviderError) Retrieve() (credentials.Value, error) { + return credentials.Value{}, c.Err +} +func (c credProviderError) IsExpired() bool { + return true +} + +func initHandlers(s *Session) { + // Add the Validate parameter handler if it is not disabled. + s.Handlers.Validate.Remove(corehandlers.ValidateParametersHandler) + if !aws.BoolValue(s.Config.DisableParamValidation) { + s.Handlers.Validate.PushBackNamed(corehandlers.ValidateParametersHandler) + } +} + +// Copy creates and returns a copy of the current Session, coping the config +// and handlers. If any additional configs are provided they will be merged +// on top of the Session's copied config. +// +// // Create a copy of the current Session, configured for the us-west-2 region. +// sess.Copy(&aws.Config{Region: aws.String("us-west-2")}) +func (s *Session) Copy(cfgs ...*aws.Config) *Session { + newSession := &Session{ + Config: s.Config.Copy(cfgs...), + Handlers: s.Handlers.Copy(), + } + + initHandlers(newSession) + + return newSession +} + +// ClientConfig satisfies the client.ConfigProvider interface and is used to +// configure the service client instances. Passing the Session to the service +// client's constructor (New) will use this method to configure the client. +func (s *Session) ClientConfig(serviceName string, cfgs ...*aws.Config) client.Config { + // Backwards compatibility, the error will be eaten if user calls ClientConfig + // directly. All SDK services will use ClientconfigWithError. + cfg, _ := s.clientConfigWithErr(serviceName, cfgs...) + + return cfg +} + +func (s *Session) clientConfigWithErr(serviceName string, cfgs ...*aws.Config) (client.Config, error) { + s = s.Copy(cfgs...) + + var resolved endpoints.ResolvedEndpoint + var err error + + region := aws.StringValue(s.Config.Region) + + if endpoint := aws.StringValue(s.Config.Endpoint); len(endpoint) != 0 { + resolved.URL = endpoints.AddScheme(endpoint, aws.BoolValue(s.Config.DisableSSL)) + resolved.SigningRegion = region + } else { + resolved, err = s.Config.EndpointResolver.EndpointFor( + serviceName, region, + func(opt *endpoints.Options) { + opt.DisableSSL = aws.BoolValue(s.Config.DisableSSL) + opt.UseDualStack = aws.BoolValue(s.Config.UseDualStack) + + // Support the condition where the service is modeled but its + // endpoint metadata is not available. + opt.ResolveUnknownService = true + }, + ) + } + + return client.Config{ + Config: s.Config, + Handlers: s.Handlers, + Endpoint: resolved.URL, + SigningRegion: resolved.SigningRegion, + SigningNameDerived: resolved.SigningNameDerived, + SigningName: resolved.SigningName, + }, err +} + +// ClientConfigNoResolveEndpoint is the same as ClientConfig with the exception +// that the EndpointResolver will not be used to resolve the endpoint. The only +// endpoint set must come from the aws.Config.Endpoint field. +func (s *Session) ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) client.Config { + s = s.Copy(cfgs...) + + var resolved endpoints.ResolvedEndpoint + + region := aws.StringValue(s.Config.Region) + + if ep := aws.StringValue(s.Config.Endpoint); len(ep) > 0 { + resolved.URL = endpoints.AddScheme(ep, aws.BoolValue(s.Config.DisableSSL)) + resolved.SigningRegion = region + } + + return client.Config{ + Config: s.Config, + Handlers: s.Handlers, + Endpoint: resolved.URL, + SigningRegion: resolved.SigningRegion, + SigningNameDerived: resolved.SigningNameDerived, + SigningName: resolved.SigningName, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go new file mode 100644 index 000000000..09c8e5bc7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go @@ -0,0 +1,295 @@ +package session + +import ( + "fmt" + "io/ioutil" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/go-ini/ini" +) + +const ( + // Static Credentials group + accessKeyIDKey = `aws_access_key_id` // group required + secretAccessKey = `aws_secret_access_key` // group required + sessionTokenKey = `aws_session_token` // optional + + // Assume Role Credentials group + roleArnKey = `role_arn` // group required + sourceProfileKey = `source_profile` // group required + externalIDKey = `external_id` // optional + mfaSerialKey = `mfa_serial` // optional + roleSessionNameKey = `role_session_name` // optional + + // Additional Config fields + regionKey = `region` + + // DefaultSharedConfigProfile is the default profile to be used when + // loading configuration from the config files if another profile name + // is not provided. + DefaultSharedConfigProfile = `default` +) + +type assumeRoleConfig struct { + RoleARN string + SourceProfile string + ExternalID string + MFASerial string + RoleSessionName string +} + +// sharedConfig represents the configuration fields of the SDK config files. +type sharedConfig struct { + // Credentials values from the config file. Both aws_access_key_id + // and aws_secret_access_key must be provided together in the same file + // to be considered valid. The values will be ignored if not a complete group. + // aws_session_token is an optional field that can be provided if both of the + // other two fields are also provided. + // + // aws_access_key_id + // aws_secret_access_key + // aws_session_token + Creds credentials.Value + + AssumeRole assumeRoleConfig + AssumeRoleSource *sharedConfig + + // Region is the region the SDK should use for looking up AWS service endpoints + // and signing requests. + // + // region + Region string +} + +type sharedConfigFile struct { + Filename string + IniData *ini.File +} + +// loadSharedConfig retrieves the configuration from the list of files +// using the profile provided. The order the files are listed will determine +// precedence. Values in subsequent files will overwrite values defined in +// earlier files. +// +// For example, given two files A and B. Both define credentials. If the order +// of the files are A then B, B's credential values will be used instead of A's. +// +// See sharedConfig.setFromFile for information how the config files +// will be loaded. +func loadSharedConfig(profile string, filenames []string) (sharedConfig, error) { + if len(profile) == 0 { + profile = DefaultSharedConfigProfile + } + + files, err := loadSharedConfigIniFiles(filenames) + if err != nil { + return sharedConfig{}, err + } + + cfg := sharedConfig{} + if err = cfg.setFromIniFiles(profile, files); err != nil { + return sharedConfig{}, err + } + + if len(cfg.AssumeRole.SourceProfile) > 0 { + if err := cfg.setAssumeRoleSource(profile, files); err != nil { + return sharedConfig{}, err + } + } + + return cfg, nil +} + +func loadSharedConfigIniFiles(filenames []string) ([]sharedConfigFile, error) { + files := make([]sharedConfigFile, 0, len(filenames)) + + for _, filename := range filenames { + b, err := ioutil.ReadFile(filename) + if err != nil { + // Skip files which can't be opened and read for whatever reason + continue + } + + f, err := ini.Load(b) + if err != nil { + return nil, SharedConfigLoadError{Filename: filename, Err: err} + } + + files = append(files, sharedConfigFile{ + Filename: filename, IniData: f, + }) + } + + return files, nil +} + +func (cfg *sharedConfig) setAssumeRoleSource(origProfile string, files []sharedConfigFile) error { + var assumeRoleSrc sharedConfig + + // Multiple level assume role chains are not support + if cfg.AssumeRole.SourceProfile == origProfile { + assumeRoleSrc = *cfg + assumeRoleSrc.AssumeRole = assumeRoleConfig{} + } else { + err := assumeRoleSrc.setFromIniFiles(cfg.AssumeRole.SourceProfile, files) + if err != nil { + return err + } + } + + if len(assumeRoleSrc.Creds.AccessKeyID) == 0 { + return SharedConfigAssumeRoleError{RoleARN: cfg.AssumeRole.RoleARN} + } + + cfg.AssumeRoleSource = &assumeRoleSrc + + return nil +} + +func (cfg *sharedConfig) setFromIniFiles(profile string, files []sharedConfigFile) error { + // Trim files from the list that don't exist. + for _, f := range files { + if err := cfg.setFromIniFile(profile, f); err != nil { + if _, ok := err.(SharedConfigProfileNotExistsError); ok { + // Ignore proviles missings + continue + } + return err + } + } + + return nil +} + +// setFromFile loads the configuration from the file using +// the profile provided. A sharedConfig pointer type value is used so that +// multiple config file loadings can be chained. +// +// Only loads complete logically grouped values, and will not set fields in cfg +// for incomplete grouped values in the config. Such as credentials. For example +// if a config file only includes aws_access_key_id but no aws_secret_access_key +// the aws_access_key_id will be ignored. +func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile) error { + section, err := file.IniData.GetSection(profile) + if err != nil { + // Fallback to to alternate profile name: profile + section, err = file.IniData.GetSection(fmt.Sprintf("profile %s", profile)) + if err != nil { + return SharedConfigProfileNotExistsError{Profile: profile, Err: err} + } + } + + // Shared Credentials + akid := section.Key(accessKeyIDKey).String() + secret := section.Key(secretAccessKey).String() + if len(akid) > 0 && len(secret) > 0 { + cfg.Creds = credentials.Value{ + AccessKeyID: akid, + SecretAccessKey: secret, + SessionToken: section.Key(sessionTokenKey).String(), + ProviderName: fmt.Sprintf("SharedConfigCredentials: %s", file.Filename), + } + } + + // Assume Role + roleArn := section.Key(roleArnKey).String() + srcProfile := section.Key(sourceProfileKey).String() + if len(roleArn) > 0 && len(srcProfile) > 0 { + cfg.AssumeRole = assumeRoleConfig{ + RoleARN: roleArn, + SourceProfile: srcProfile, + ExternalID: section.Key(externalIDKey).String(), + MFASerial: section.Key(mfaSerialKey).String(), + RoleSessionName: section.Key(roleSessionNameKey).String(), + } + } + + // Region + if v := section.Key(regionKey).String(); len(v) > 0 { + cfg.Region = v + } + + return nil +} + +// SharedConfigLoadError is an error for the shared config file failed to load. +type SharedConfigLoadError struct { + Filename string + Err error +} + +// Code is the short id of the error. +func (e SharedConfigLoadError) Code() string { + return "SharedConfigLoadError" +} + +// Message is the description of the error +func (e SharedConfigLoadError) Message() string { + return fmt.Sprintf("failed to load config file, %s", e.Filename) +} + +// OrigErr is the underlying error that caused the failure. +func (e SharedConfigLoadError) OrigErr() error { + return e.Err +} + +// Error satisfies the error interface. +func (e SharedConfigLoadError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", e.Err) +} + +// SharedConfigProfileNotExistsError is an error for the shared config when +// the profile was not find in the config file. +type SharedConfigProfileNotExistsError struct { + Profile string + Err error +} + +// Code is the short id of the error. +func (e SharedConfigProfileNotExistsError) Code() string { + return "SharedConfigProfileNotExistsError" +} + +// Message is the description of the error +func (e SharedConfigProfileNotExistsError) Message() string { + return fmt.Sprintf("failed to get profile, %s", e.Profile) +} + +// OrigErr is the underlying error that caused the failure. +func (e SharedConfigProfileNotExistsError) OrigErr() error { + return e.Err +} + +// Error satisfies the error interface. +func (e SharedConfigProfileNotExistsError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", e.Err) +} + +// SharedConfigAssumeRoleError is an error for the shared config when the +// profile contains assume role information, but that information is invalid +// or not complete. +type SharedConfigAssumeRoleError struct { + RoleARN string +} + +// Code is the short id of the error. +func (e SharedConfigAssumeRoleError) Code() string { + return "SharedConfigAssumeRoleError" +} + +// Message is the description of the error +func (e SharedConfigAssumeRoleError) Message() string { + return fmt.Sprintf("failed to load assume role for %s, source profile has no shared credentials", + e.RoleARN) +} + +// OrigErr is the underlying error that caused the failure. +func (e SharedConfigAssumeRoleError) OrigErr() error { + return nil +} + +// Error satisfies the error interface. +func (e SharedConfigAssumeRoleError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", nil) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go new file mode 100644 index 000000000..244c86da0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go @@ -0,0 +1,82 @@ +package v4 + +import ( + "net/http" + "strings" +) + +// validator houses a set of rule needed for validation of a +// string value +type rules []rule + +// rule interface allows for more flexible rules and just simply +// checks whether or not a value adheres to that rule +type rule interface { + IsValid(value string) bool +} + +// IsValid will iterate through all rules and see if any rules +// apply to the value and supports nested rules +func (r rules) IsValid(value string) bool { + for _, rule := range r { + if rule.IsValid(value) { + return true + } + } + return false +} + +// mapRule generic rule for maps +type mapRule map[string]struct{} + +// IsValid for the map rule satisfies whether it exists in the map +func (m mapRule) IsValid(value string) bool { + _, ok := m[value] + return ok +} + +// whitelist is a generic rule for whitelisting +type whitelist struct { + rule +} + +// IsValid for whitelist checks if the value is within the whitelist +func (w whitelist) IsValid(value string) bool { + return w.rule.IsValid(value) +} + +// blacklist is a generic rule for blacklisting +type blacklist struct { + rule +} + +// IsValid for whitelist checks if the value is within the whitelist +func (b blacklist) IsValid(value string) bool { + return !b.rule.IsValid(value) +} + +type patterns []string + +// IsValid for patterns checks each pattern and returns if a match has +// been found +func (p patterns) IsValid(value string) bool { + for _, pattern := range p { + if strings.HasPrefix(http.CanonicalHeaderKey(value), pattern) { + return true + } + } + return false +} + +// inclusiveRules rules allow for rules to depend on one another +type inclusiveRules []rule + +// IsValid will return true if all rules are true +func (r inclusiveRules) IsValid(value string) bool { + for _, rule := range r { + if !rule.IsValid(value) { + return false + } + } + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go new file mode 100644 index 000000000..6aa2ed241 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go @@ -0,0 +1,7 @@ +package v4 + +// WithUnsignedPayload will enable and set the UnsignedPayload field to +// true of the signer. +func WithUnsignedPayload(v4 *Signer) { + v4.UnsignedPayload = true +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go new file mode 100644 index 000000000..bd082e9d1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go @@ -0,0 +1,24 @@ +// +build go1.5 + +package v4 + +import ( + "net/url" + "strings" +) + +func getURIPath(u *url.URL) string { + var uri string + + if len(u.Opaque) > 0 { + uri = "/" + strings.Join(strings.Split(u.Opaque, "/")[3:], "/") + } else { + uri = u.EscapedPath() + } + + if len(uri) == 0 { + uri = "/" + } + + return uri +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go new file mode 100644 index 000000000..5b52ab221 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go @@ -0,0 +1,796 @@ +// Package v4 implements signing for AWS V4 signer +// +// Provides request signing for request that need to be signed with +// AWS V4 Signatures. +// +// Standalone Signer +// +// Generally using the signer outside of the SDK should not require any additional +// logic when using Go v1.5 or higher. The signer does this by taking advantage +// of the URL.EscapedPath method. If your request URI requires additional escaping +// you many need to use the URL.Opaque to define what the raw URI should be sent +// to the service as. +// +// The signer will first check the URL.Opaque field, and use its value if set. +// The signer does require the URL.Opaque field to be set in the form of: +// +// "///" +// +// // e.g. +// "//example.com/some/path" +// +// The leading "//" and hostname are required or the URL.Opaque escaping will +// not work correctly. +// +// If URL.Opaque is not set the signer will fallback to the URL.EscapedPath() +// method and using the returned value. If you're using Go v1.4 you must set +// URL.Opaque if the URI path needs escaping. If URL.Opaque is not set with +// Go v1.5 the signer will fallback to URL.Path. +// +// AWS v4 signature validation requires that the canonical string's URI path +// element must be the URI escaped form of the HTTP request's path. +// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html +// +// The Go HTTP client will perform escaping automatically on the request. Some +// of these escaping may cause signature validation errors because the HTTP +// request differs from the URI path or query that the signature was generated. +// https://golang.org/pkg/net/url/#URL.EscapedPath +// +// Because of this, it is recommended that when using the signer outside of the +// SDK that explicitly escaping the request prior to being signed is preferable, +// and will help prevent signature validation errors. This can be done by setting +// the URL.Opaque or URL.RawPath. The SDK will use URL.Opaque first and then +// call URL.EscapedPath() if Opaque is not set. +// +// If signing a request intended for HTTP2 server, and you're using Go 1.6.2 +// through 1.7.4 you should use the URL.RawPath as the pre-escaped form of the +// request URL. https://github.com/golang/go/issues/16847 points to a bug in +// Go pre 1.8 that fails to make HTTP2 requests using absolute URL in the HTTP +// message. URL.Opaque generally will force Go to make requests with absolute URL. +// URL.RawPath does not do this, but RawPath must be a valid escaping of Path +// or url.EscapedPath will ignore the RawPath escaping. +// +// Test `TestStandaloneSign` provides a complete example of using the signer +// outside of the SDK and pre-escaping the URI path. +package v4 + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "sort" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkio" + "github.com/aws/aws-sdk-go/private/protocol/rest" +) + +const ( + authHeaderPrefix = "AWS4-HMAC-SHA256" + timeFormat = "20060102T150405Z" + shortTimeFormat = "20060102" + + // emptyStringSHA256 is a SHA256 of an empty string + emptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855` +) + +var ignoredHeaders = rules{ + blacklist{ + mapRule{ + "Authorization": struct{}{}, + "User-Agent": struct{}{}, + "X-Amzn-Trace-Id": struct{}{}, + }, + }, +} + +// requiredSignedHeaders is a whitelist for build canonical headers. +var requiredSignedHeaders = rules{ + whitelist{ + mapRule{ + "Cache-Control": struct{}{}, + "Content-Disposition": struct{}{}, + "Content-Encoding": struct{}{}, + "Content-Language": struct{}{}, + "Content-Md5": struct{}{}, + "Content-Type": struct{}{}, + "Expires": struct{}{}, + "If-Match": struct{}{}, + "If-Modified-Since": struct{}{}, + "If-None-Match": struct{}{}, + "If-Unmodified-Since": struct{}{}, + "Range": struct{}{}, + "X-Amz-Acl": struct{}{}, + "X-Amz-Copy-Source": struct{}{}, + "X-Amz-Copy-Source-If-Match": struct{}{}, + "X-Amz-Copy-Source-If-Modified-Since": struct{}{}, + "X-Amz-Copy-Source-If-None-Match": struct{}{}, + "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{}, + "X-Amz-Copy-Source-Range": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Grant-Full-control": struct{}{}, + "X-Amz-Grant-Read": struct{}{}, + "X-Amz-Grant-Read-Acp": struct{}{}, + "X-Amz-Grant-Write": struct{}{}, + "X-Amz-Grant-Write-Acp": struct{}{}, + "X-Amz-Metadata-Directive": struct{}{}, + "X-Amz-Mfa": struct{}{}, + "X-Amz-Request-Payer": struct{}{}, + "X-Amz-Server-Side-Encryption": struct{}{}, + "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Storage-Class": struct{}{}, + "X-Amz-Website-Redirect-Location": struct{}{}, + "X-Amz-Content-Sha256": struct{}{}, + }, + }, + patterns{"X-Amz-Meta-"}, +} + +// allowedHoisting is a whitelist for build query headers. The boolean value +// represents whether or not it is a pattern. +var allowedQueryHoisting = inclusiveRules{ + blacklist{requiredSignedHeaders}, + patterns{"X-Amz-"}, +} + +// Signer applies AWS v4 signing to given request. Use this to sign requests +// that need to be signed with AWS V4 Signatures. +type Signer struct { + // The authentication credentials the request will be signed against. + // This value must be set to sign requests. + Credentials *credentials.Credentials + + // Sets the log level the signer should use when reporting information to + // the logger. If the logger is nil nothing will be logged. See + // aws.LogLevelType for more information on available logging levels + // + // By default nothing will be logged. + Debug aws.LogLevelType + + // The logger loging information will be written to. If there the logger + // is nil, nothing will be logged. + Logger aws.Logger + + // Disables the Signer's moving HTTP header key/value pairs from the HTTP + // request header to the request's query string. This is most commonly used + // with pre-signed requests preventing headers from being added to the + // request's query string. + DisableHeaderHoisting bool + + // Disables the automatic escaping of the URI path of the request for the + // siganture's canonical string's path. For services that do not need additional + // escaping then use this to disable the signer escaping the path. + // + // S3 is an example of a service that does not need additional escaping. + // + // http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html + DisableURIPathEscaping bool + + // Disales the automatical setting of the HTTP request's Body field with the + // io.ReadSeeker passed in to the signer. This is useful if you're using a + // custom wrapper around the body for the io.ReadSeeker and want to preserve + // the Body value on the Request.Body. + // + // This does run the risk of signing a request with a body that will not be + // sent in the request. Need to ensure that the underlying data of the Body + // values are the same. + DisableRequestBodyOverwrite bool + + // currentTimeFn returns the time value which represents the current time. + // This value should only be used for testing. If it is nil the default + // time.Now will be used. + currentTimeFn func() time.Time + + // UnsignedPayload will prevent signing of the payload. This will only + // work for services that have support for this. + UnsignedPayload bool +} + +// NewSigner returns a Signer pointer configured with the credentials and optional +// option values provided. If not options are provided the Signer will use its +// default configuration. +func NewSigner(credentials *credentials.Credentials, options ...func(*Signer)) *Signer { + v4 := &Signer{ + Credentials: credentials, + } + + for _, option := range options { + option(v4) + } + + return v4 +} + +type signingCtx struct { + ServiceName string + Region string + Request *http.Request + Body io.ReadSeeker + Query url.Values + Time time.Time + ExpireTime time.Duration + SignedHeaderVals http.Header + + DisableURIPathEscaping bool + + credValues credentials.Value + isPresign bool + formattedTime string + formattedShortTime string + unsignedPayload bool + + bodyDigest string + signedHeaders string + canonicalHeaders string + canonicalString string + credentialString string + stringToSign string + signature string + authorization string +} + +// Sign signs AWS v4 requests with the provided body, service name, region the +// request is made to, and time the request is signed at. The signTime allows +// you to specify that a request is signed for the future, and cannot be +// used until then. +// +// Returns a list of HTTP headers that were included in the signature or an +// error if signing the request failed. Generally for signed requests this value +// is not needed as the full request context will be captured by the http.Request +// value. It is included for reference though. +// +// Sign will set the request's Body to be the `body` parameter passed in. If +// the body is not already an io.ReadCloser, it will be wrapped within one. If +// a `nil` body parameter passed to Sign, the request's Body field will be +// also set to nil. Its important to note that this functionality will not +// change the request's ContentLength of the request. +// +// Sign differs from Presign in that it will sign the request using HTTP +// header values. This type of signing is intended for http.Request values that +// will not be shared, or are shared in a way the header values on the request +// will not be lost. +// +// The requests body is an io.ReadSeeker so the SHA256 of the body can be +// generated. To bypass the signer computing the hash you can set the +// "X-Amz-Content-Sha256" header with a precomputed value. The signer will +// only compute the hash if the request header value is empty. +func (v4 Signer) Sign(r *http.Request, body io.ReadSeeker, service, region string, signTime time.Time) (http.Header, error) { + return v4.signWithBody(r, body, service, region, 0, false, signTime) +} + +// Presign signs AWS v4 requests with the provided body, service name, region +// the request is made to, and time the request is signed at. The signTime +// allows you to specify that a request is signed for the future, and cannot +// be used until then. +// +// Returns a list of HTTP headers that were included in the signature or an +// error if signing the request failed. For presigned requests these headers +// and their values must be included on the HTTP request when it is made. This +// is helpful to know what header values need to be shared with the party the +// presigned request will be distributed to. +// +// Presign differs from Sign in that it will sign the request using query string +// instead of header values. This allows you to share the Presigned Request's +// URL with third parties, or distribute it throughout your system with minimal +// dependencies. +// +// Presign also takes an exp value which is the duration the +// signed request will be valid after the signing time. This is allows you to +// set when the request will expire. +// +// The requests body is an io.ReadSeeker so the SHA256 of the body can be +// generated. To bypass the signer computing the hash you can set the +// "X-Amz-Content-Sha256" header with a precomputed value. The signer will +// only compute the hash if the request header value is empty. +// +// Presigning a S3 request will not compute the body's SHA256 hash by default. +// This is done due to the general use case for S3 presigned URLs is to share +// PUT/GET capabilities. If you would like to include the body's SHA256 in the +// presigned request's signature you can set the "X-Amz-Content-Sha256" +// HTTP header and that will be included in the request's signature. +func (v4 Signer) Presign(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) { + return v4.signWithBody(r, body, service, region, exp, true, signTime) +} + +func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, isPresign bool, signTime time.Time) (http.Header, error) { + currentTimeFn := v4.currentTimeFn + if currentTimeFn == nil { + currentTimeFn = time.Now + } + + ctx := &signingCtx{ + Request: r, + Body: body, + Query: r.URL.Query(), + Time: signTime, + ExpireTime: exp, + isPresign: isPresign, + ServiceName: service, + Region: region, + DisableURIPathEscaping: v4.DisableURIPathEscaping, + unsignedPayload: v4.UnsignedPayload, + } + + for key := range ctx.Query { + sort.Strings(ctx.Query[key]) + } + + if ctx.isRequestSigned() { + ctx.Time = currentTimeFn() + ctx.handlePresignRemoval() + } + + var err error + ctx.credValues, err = v4.Credentials.Get() + if err != nil { + return http.Header{}, err + } + + ctx.sanitizeHostForHeader() + ctx.assignAmzQueryValues() + if err := ctx.build(v4.DisableHeaderHoisting); err != nil { + return nil, err + } + + // If the request is not presigned the body should be attached to it. This + // prevents the confusion of wanting to send a signed request without + // the body the request was signed for attached. + if !(v4.DisableRequestBodyOverwrite || ctx.isPresign) { + var reader io.ReadCloser + if body != nil { + var ok bool + if reader, ok = body.(io.ReadCloser); !ok { + reader = ioutil.NopCloser(body) + } + } + r.Body = reader + } + + if v4.Debug.Matches(aws.LogDebugWithSigning) { + v4.logSigningInfo(ctx) + } + + return ctx.SignedHeaderVals, nil +} + +func (ctx *signingCtx) sanitizeHostForHeader() { + request.SanitizeHostForHeader(ctx.Request) +} + +func (ctx *signingCtx) handlePresignRemoval() { + if !ctx.isPresign { + return + } + + // The credentials have expired for this request. The current signing + // is invalid, and needs to be request because the request will fail. + ctx.removePresign() + + // Update the request's query string to ensure the values stays in + // sync in the case retrieving the new credentials fails. + ctx.Request.URL.RawQuery = ctx.Query.Encode() +} + +func (ctx *signingCtx) assignAmzQueryValues() { + if ctx.isPresign { + ctx.Query.Set("X-Amz-Algorithm", authHeaderPrefix) + if ctx.credValues.SessionToken != "" { + ctx.Query.Set("X-Amz-Security-Token", ctx.credValues.SessionToken) + } else { + ctx.Query.Del("X-Amz-Security-Token") + } + + return + } + + if ctx.credValues.SessionToken != "" { + ctx.Request.Header.Set("X-Amz-Security-Token", ctx.credValues.SessionToken) + } +} + +// SignRequestHandler is a named request handler the SDK will use to sign +// service client request with using the V4 signature. +var SignRequestHandler = request.NamedHandler{ + Name: "v4.SignRequestHandler", Fn: SignSDKRequest, +} + +// SignSDKRequest signs an AWS request with the V4 signature. This +// request handler should only be used with the SDK's built in service client's +// API operation requests. +// +// This function should not be used on its on its own, but in conjunction with +// an AWS service client's API operation call. To sign a standalone request +// not created by a service client's API operation method use the "Sign" or +// "Presign" functions of the "Signer" type. +// +// If the credentials of the request's config are set to +// credentials.AnonymousCredentials the request will not be signed. +func SignSDKRequest(req *request.Request) { + signSDKRequestWithCurrTime(req, time.Now) +} + +// BuildNamedHandler will build a generic handler for signing. +func BuildNamedHandler(name string, opts ...func(*Signer)) request.NamedHandler { + return request.NamedHandler{ + Name: name, + Fn: func(req *request.Request) { + signSDKRequestWithCurrTime(req, time.Now, opts...) + }, + } +} + +func signSDKRequestWithCurrTime(req *request.Request, curTimeFn func() time.Time, opts ...func(*Signer)) { + // If the request does not need to be signed ignore the signing of the + // request if the AnonymousCredentials object is used. + if req.Config.Credentials == credentials.AnonymousCredentials { + return + } + + region := req.ClientInfo.SigningRegion + if region == "" { + region = aws.StringValue(req.Config.Region) + } + + name := req.ClientInfo.SigningName + if name == "" { + name = req.ClientInfo.ServiceName + } + + v4 := NewSigner(req.Config.Credentials, func(v4 *Signer) { + v4.Debug = req.Config.LogLevel.Value() + v4.Logger = req.Config.Logger + v4.DisableHeaderHoisting = req.NotHoist + v4.currentTimeFn = curTimeFn + if name == "s3" { + // S3 service should not have any escaping applied + v4.DisableURIPathEscaping = true + } + // Prevents setting the HTTPRequest's Body. Since the Body could be + // wrapped in a custom io.Closer that we do not want to be stompped + // on top of by the signer. + v4.DisableRequestBodyOverwrite = true + }) + + for _, opt := range opts { + opt(v4) + } + + signingTime := req.Time + if !req.LastSignedAt.IsZero() { + signingTime = req.LastSignedAt + } + + signedHeaders, err := v4.signWithBody(req.HTTPRequest, req.GetBody(), + name, region, req.ExpireTime, req.ExpireTime > 0, signingTime, + ) + if err != nil { + req.Error = err + req.SignedHeaderVals = nil + return + } + + req.SignedHeaderVals = signedHeaders + req.LastSignedAt = curTimeFn() +} + +const logSignInfoMsg = `DEBUG: Request Signature: +---[ CANONICAL STRING ]----------------------------- +%s +---[ STRING TO SIGN ]-------------------------------- +%s%s +-----------------------------------------------------` +const logSignedURLMsg = ` +---[ SIGNED URL ]------------------------------------ +%s` + +func (v4 *Signer) logSigningInfo(ctx *signingCtx) { + signedURLMsg := "" + if ctx.isPresign { + signedURLMsg = fmt.Sprintf(logSignedURLMsg, ctx.Request.URL.String()) + } + msg := fmt.Sprintf(logSignInfoMsg, ctx.canonicalString, ctx.stringToSign, signedURLMsg) + v4.Logger.Log(msg) +} + +func (ctx *signingCtx) build(disableHeaderHoisting bool) error { + ctx.buildTime() // no depends + ctx.buildCredentialString() // no depends + + if err := ctx.buildBodyDigest(); err != nil { + return err + } + + unsignedHeaders := ctx.Request.Header + if ctx.isPresign { + if !disableHeaderHoisting { + urlValues := url.Values{} + urlValues, unsignedHeaders = buildQuery(allowedQueryHoisting, unsignedHeaders) // no depends + for k := range urlValues { + ctx.Query[k] = urlValues[k] + } + } + } + + ctx.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders) + ctx.buildCanonicalString() // depends on canon headers / signed headers + ctx.buildStringToSign() // depends on canon string + ctx.buildSignature() // depends on string to sign + + if ctx.isPresign { + ctx.Request.URL.RawQuery += "&X-Amz-Signature=" + ctx.signature + } else { + parts := []string{ + authHeaderPrefix + " Credential=" + ctx.credValues.AccessKeyID + "/" + ctx.credentialString, + "SignedHeaders=" + ctx.signedHeaders, + "Signature=" + ctx.signature, + } + ctx.Request.Header.Set("Authorization", strings.Join(parts, ", ")) + } + + return nil +} + +func (ctx *signingCtx) buildTime() { + ctx.formattedTime = ctx.Time.UTC().Format(timeFormat) + ctx.formattedShortTime = ctx.Time.UTC().Format(shortTimeFormat) + + if ctx.isPresign { + duration := int64(ctx.ExpireTime / time.Second) + ctx.Query.Set("X-Amz-Date", ctx.formattedTime) + ctx.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10)) + } else { + ctx.Request.Header.Set("X-Amz-Date", ctx.formattedTime) + } +} + +func (ctx *signingCtx) buildCredentialString() { + ctx.credentialString = strings.Join([]string{ + ctx.formattedShortTime, + ctx.Region, + ctx.ServiceName, + "aws4_request", + }, "/") + + if ctx.isPresign { + ctx.Query.Set("X-Amz-Credential", ctx.credValues.AccessKeyID+"/"+ctx.credentialString) + } +} + +func buildQuery(r rule, header http.Header) (url.Values, http.Header) { + query := url.Values{} + unsignedHeaders := http.Header{} + for k, h := range header { + if r.IsValid(k) { + query[k] = h + } else { + unsignedHeaders[k] = h + } + } + + return query, unsignedHeaders +} +func (ctx *signingCtx) buildCanonicalHeaders(r rule, header http.Header) { + var headers []string + headers = append(headers, "host") + for k, v := range header { + canonicalKey := http.CanonicalHeaderKey(k) + if !r.IsValid(canonicalKey) { + continue // ignored header + } + if ctx.SignedHeaderVals == nil { + ctx.SignedHeaderVals = make(http.Header) + } + + lowerCaseKey := strings.ToLower(k) + if _, ok := ctx.SignedHeaderVals[lowerCaseKey]; ok { + // include additional values + ctx.SignedHeaderVals[lowerCaseKey] = append(ctx.SignedHeaderVals[lowerCaseKey], v...) + continue + } + + headers = append(headers, lowerCaseKey) + ctx.SignedHeaderVals[lowerCaseKey] = v + } + sort.Strings(headers) + + ctx.signedHeaders = strings.Join(headers, ";") + + if ctx.isPresign { + ctx.Query.Set("X-Amz-SignedHeaders", ctx.signedHeaders) + } + + headerValues := make([]string, len(headers)) + for i, k := range headers { + if k == "host" { + if ctx.Request.Host != "" { + headerValues[i] = "host:" + ctx.Request.Host + } else { + headerValues[i] = "host:" + ctx.Request.URL.Host + } + } else { + headerValues[i] = k + ":" + + strings.Join(ctx.SignedHeaderVals[k], ",") + } + } + stripExcessSpaces(headerValues) + ctx.canonicalHeaders = strings.Join(headerValues, "\n") +} + +func (ctx *signingCtx) buildCanonicalString() { + ctx.Request.URL.RawQuery = strings.Replace(ctx.Query.Encode(), "+", "%20", -1) + + uri := getURIPath(ctx.Request.URL) + + if !ctx.DisableURIPathEscaping { + uri = rest.EscapePath(uri, false) + } + + ctx.canonicalString = strings.Join([]string{ + ctx.Request.Method, + uri, + ctx.Request.URL.RawQuery, + ctx.canonicalHeaders + "\n", + ctx.signedHeaders, + ctx.bodyDigest, + }, "\n") +} + +func (ctx *signingCtx) buildStringToSign() { + ctx.stringToSign = strings.Join([]string{ + authHeaderPrefix, + ctx.formattedTime, + ctx.credentialString, + hex.EncodeToString(makeSha256([]byte(ctx.canonicalString))), + }, "\n") +} + +func (ctx *signingCtx) buildSignature() { + secret := ctx.credValues.SecretAccessKey + date := makeHmac([]byte("AWS4"+secret), []byte(ctx.formattedShortTime)) + region := makeHmac(date, []byte(ctx.Region)) + service := makeHmac(region, []byte(ctx.ServiceName)) + credentials := makeHmac(service, []byte("aws4_request")) + signature := makeHmac(credentials, []byte(ctx.stringToSign)) + ctx.signature = hex.EncodeToString(signature) +} + +func (ctx *signingCtx) buildBodyDigest() error { + hash := ctx.Request.Header.Get("X-Amz-Content-Sha256") + if hash == "" { + includeSHA256Header := ctx.unsignedPayload || + ctx.ServiceName == "s3" || + ctx.ServiceName == "glacier" + + s3Presign := ctx.isPresign && ctx.ServiceName == "s3" + + if ctx.unsignedPayload || s3Presign { + hash = "UNSIGNED-PAYLOAD" + includeSHA256Header = !s3Presign + } else if ctx.Body == nil { + hash = emptyStringSHA256 + } else { + if !aws.IsReaderSeekable(ctx.Body) { + return fmt.Errorf("cannot use unseekable request body %T, for signed request with body", ctx.Body) + } + hash = hex.EncodeToString(makeSha256Reader(ctx.Body)) + } + + if includeSHA256Header { + ctx.Request.Header.Set("X-Amz-Content-Sha256", hash) + } + } + ctx.bodyDigest = hash + + return nil +} + +// isRequestSigned returns if the request is currently signed or presigned +func (ctx *signingCtx) isRequestSigned() bool { + if ctx.isPresign && ctx.Query.Get("X-Amz-Signature") != "" { + return true + } + if ctx.Request.Header.Get("Authorization") != "" { + return true + } + + return false +} + +// unsign removes signing flags for both signed and presigned requests. +func (ctx *signingCtx) removePresign() { + ctx.Query.Del("X-Amz-Algorithm") + ctx.Query.Del("X-Amz-Signature") + ctx.Query.Del("X-Amz-Security-Token") + ctx.Query.Del("X-Amz-Date") + ctx.Query.Del("X-Amz-Expires") + ctx.Query.Del("X-Amz-Credential") + ctx.Query.Del("X-Amz-SignedHeaders") +} + +func makeHmac(key []byte, data []byte) []byte { + hash := hmac.New(sha256.New, key) + hash.Write(data) + return hash.Sum(nil) +} + +func makeSha256(data []byte) []byte { + hash := sha256.New() + hash.Write(data) + return hash.Sum(nil) +} + +func makeSha256Reader(reader io.ReadSeeker) []byte { + hash := sha256.New() + start, _ := reader.Seek(0, sdkio.SeekCurrent) + defer reader.Seek(start, sdkio.SeekStart) + + // Use CopyN to avoid allocating the 32KB buffer in io.Copy for bodies + // smaller than 32KB. Fall back to io.Copy if we fail to determine the size. + size, err := aws.SeekerLen(reader) + if err != nil { + io.Copy(hash, reader) + } else { + io.CopyN(hash, reader, size) + } + + return hash.Sum(nil) +} + +const doubleSpace = " " + +// stripExcessSpaces will rewrite the passed in slice's string values to not +// contain muliple side-by-side spaces. +func stripExcessSpaces(vals []string) { + var j, k, l, m, spaces int + for i, str := range vals { + // Trim trailing spaces + for j = len(str) - 1; j >= 0 && str[j] == ' '; j-- { + } + + // Trim leading spaces + for k = 0; k < j && str[k] == ' '; k++ { + } + str = str[k : j+1] + + // Strip multiple spaces. + j = strings.Index(str, doubleSpace) + if j < 0 { + vals[i] = str + continue + } + + buf := []byte(str) + for k, m, l = j, j, len(buf); k < l; k++ { + if buf[k] == ' ' { + if spaces == 0 { + // First space. + buf[m] = buf[k] + m++ + } + spaces++ + } else { + // End of multiple spaces. + spaces = 0 + buf[m] = buf[k] + m++ + } + } + + vals[i] = string(buf[:m]) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/types.go b/vendor/github.com/aws/aws-sdk-go/aws/types.go new file mode 100644 index 000000000..8b6f23425 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/types.go @@ -0,0 +1,201 @@ +package aws + +import ( + "io" + "sync" + + "github.com/aws/aws-sdk-go/internal/sdkio" +) + +// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Should +// only be used with an io.Reader that is also an io.Seeker. Doing so may +// cause request signature errors, or request body's not sent for GET, HEAD +// and DELETE HTTP methods. +// +// Deprecated: Should only be used with io.ReadSeeker. If using for +// S3 PutObject to stream content use s3manager.Uploader instead. +func ReadSeekCloser(r io.Reader) ReaderSeekerCloser { + return ReaderSeekerCloser{r} +} + +// ReaderSeekerCloser represents a reader that can also delegate io.Seeker and +// io.Closer interfaces to the underlying object if they are available. +type ReaderSeekerCloser struct { + r io.Reader +} + +// IsReaderSeekable returns if the underlying reader type can be seeked. A +// io.Reader might not actually be seekable if it is the ReaderSeekerCloser +// type. +func IsReaderSeekable(r io.Reader) bool { + switch v := r.(type) { + case ReaderSeekerCloser: + return v.IsSeeker() + case *ReaderSeekerCloser: + return v.IsSeeker() + case io.ReadSeeker: + return true + default: + return false + } +} + +// Read reads from the reader up to size of p. The number of bytes read, and +// error if it occurred will be returned. +// +// If the reader is not an io.Reader zero bytes read, and nil error will be returned. +// +// Performs the same functionality as io.Reader Read +func (r ReaderSeekerCloser) Read(p []byte) (int, error) { + switch t := r.r.(type) { + case io.Reader: + return t.Read(p) + } + return 0, nil +} + +// Seek sets the offset for the next Read to offset, interpreted according to +// whence: 0 means relative to the origin of the file, 1 means relative to the +// current offset, and 2 means relative to the end. Seek returns the new offset +// and an error, if any. +// +// If the ReaderSeekerCloser is not an io.Seeker nothing will be done. +func (r ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) { + switch t := r.r.(type) { + case io.Seeker: + return t.Seek(offset, whence) + } + return int64(0), nil +} + +// IsSeeker returns if the underlying reader is also a seeker. +func (r ReaderSeekerCloser) IsSeeker() bool { + _, ok := r.r.(io.Seeker) + return ok +} + +// HasLen returns the length of the underlying reader if the value implements +// the Len() int method. +func (r ReaderSeekerCloser) HasLen() (int, bool) { + type lenner interface { + Len() int + } + + if lr, ok := r.r.(lenner); ok { + return lr.Len(), true + } + + return 0, false +} + +// GetLen returns the length of the bytes remaining in the underlying reader. +// Checks first for Len(), then io.Seeker to determine the size of the +// underlying reader. +// +// Will return -1 if the length cannot be determined. +func (r ReaderSeekerCloser) GetLen() (int64, error) { + if l, ok := r.HasLen(); ok { + return int64(l), nil + } + + if s, ok := r.r.(io.Seeker); ok { + return seekerLen(s) + } + + return -1, nil +} + +// SeekerLen attempts to get the number of bytes remaining at the seeker's +// current position. Returns the number of bytes remaining or error. +func SeekerLen(s io.Seeker) (int64, error) { + // Determine if the seeker is actually seekable. ReaderSeekerCloser + // hides the fact that a io.Readers might not actually be seekable. + switch v := s.(type) { + case ReaderSeekerCloser: + return v.GetLen() + case *ReaderSeekerCloser: + return v.GetLen() + } + + return seekerLen(s) +} + +func seekerLen(s io.Seeker) (int64, error) { + curOffset, err := s.Seek(0, sdkio.SeekCurrent) + if err != nil { + return 0, err + } + + endOffset, err := s.Seek(0, sdkio.SeekEnd) + if err != nil { + return 0, err + } + + _, err = s.Seek(curOffset, sdkio.SeekStart) + if err != nil { + return 0, err + } + + return endOffset - curOffset, nil +} + +// Close closes the ReaderSeekerCloser. +// +// If the ReaderSeekerCloser is not an io.Closer nothing will be done. +func (r ReaderSeekerCloser) Close() error { + switch t := r.r.(type) { + case io.Closer: + return t.Close() + } + return nil +} + +// A WriteAtBuffer provides a in memory buffer supporting the io.WriterAt interface +// Can be used with the s3manager.Downloader to download content to a buffer +// in memory. Safe to use concurrently. +type WriteAtBuffer struct { + buf []byte + m sync.Mutex + + // GrowthCoeff defines the growth rate of the internal buffer. By + // default, the growth rate is 1, where expanding the internal + // buffer will allocate only enough capacity to fit the new expected + // length. + GrowthCoeff float64 +} + +// NewWriteAtBuffer creates a WriteAtBuffer with an internal buffer +// provided by buf. +func NewWriteAtBuffer(buf []byte) *WriteAtBuffer { + return &WriteAtBuffer{buf: buf} +} + +// WriteAt writes a slice of bytes to a buffer starting at the position provided +// The number of bytes written will be returned, or error. Can overwrite previous +// written slices if the write ats overlap. +func (b *WriteAtBuffer) WriteAt(p []byte, pos int64) (n int, err error) { + pLen := len(p) + expLen := pos + int64(pLen) + b.m.Lock() + defer b.m.Unlock() + if int64(len(b.buf)) < expLen { + if int64(cap(b.buf)) < expLen { + if b.GrowthCoeff < 1 { + b.GrowthCoeff = 1 + } + newBuf := make([]byte, expLen, int64(b.GrowthCoeff*float64(expLen))) + copy(newBuf, b.buf) + b.buf = newBuf + } + b.buf = b.buf[:expLen] + } + copy(b.buf[pos:], p) + return pLen, nil +} + +// Bytes returns a slice of bytes written to the buffer. +func (b *WriteAtBuffer) Bytes() []byte { + b.m.Lock() + defer b.m.Unlock() + return b.buf +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/url.go b/vendor/github.com/aws/aws-sdk-go/aws/url.go new file mode 100644 index 000000000..6192b2455 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/url.go @@ -0,0 +1,12 @@ +// +build go1.8 + +package aws + +import "net/url" + +// URLHostname will extract the Hostname without port from the URL value. +// +// Wrapper of net/url#URL.Hostname for backwards Go version compatibility. +func URLHostname(url *url.URL) string { + return url.Hostname() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go new file mode 100644 index 000000000..0210d2720 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go @@ -0,0 +1,29 @@ +// +build !go1.8 + +package aws + +import ( + "net/url" + "strings" +) + +// URLHostname will extract the Hostname without port from the URL value. +// +// Copy of Go 1.8's net/url#URL.Hostname functionality. +func URLHostname(url *url.URL) string { + return stripPort(url.Host) + +} + +// stripPort is copy of Go 1.8 url#URL.Hostname functionality. +// https://golang.org/src/net/url/url.go +func stripPort(hostport string) string { + colon := strings.IndexByte(hostport, ':') + if colon == -1 { + return hostport + } + if i := strings.IndexByte(hostport, ']'); i != -1 { + return strings.TrimPrefix(hostport[:i], "[") + } + return hostport[:colon] +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go new file mode 100644 index 000000000..29112cbbd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -0,0 +1,8 @@ +// Package aws provides core functionality for making requests to AWS services. +package aws + +// SDKName is the name of this AWS SDK +const SDKName = "aws-sdk-go" + +// SDKVersion is the version of this SDK +const SDKVersion = "1.15.52" diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go new file mode 100644 index 000000000..5aa9137e0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go @@ -0,0 +1,10 @@ +// +build !go1.7 + +package sdkio + +// Copy of Go 1.7 io package's Seeker constants. +const ( + SeekStart = 0 // seek relative to the origin of the file + SeekCurrent = 1 // seek relative to the current offset + SeekEnd = 2 // seek relative to the end +) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go new file mode 100644 index 000000000..e5f005613 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go @@ -0,0 +1,12 @@ +// +build go1.7 + +package sdkio + +import "io" + +// Alias for Go 1.7 io package Seeker constants +const ( + SeekStart = io.SeekStart // seek relative to the origin of the file + SeekCurrent = io.SeekCurrent // seek relative to the current offset + SeekEnd = io.SeekEnd // seek relative to the end +) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go new file mode 100644 index 000000000..0c9802d87 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go @@ -0,0 +1,29 @@ +package sdkrand + +import ( + "math/rand" + "sync" + "time" +) + +// lockedSource is a thread-safe implementation of rand.Source +type lockedSource struct { + lk sync.Mutex + src rand.Source +} + +func (r *lockedSource) Int63() (n int64) { + r.lk.Lock() + n = r.src.Int63() + r.lk.Unlock() + return +} + +func (r *lockedSource) Seed(seed int64) { + r.lk.Lock() + r.src.Seed(seed) + r.lk.Unlock() +} + +// SeededRand is a new RNG using a thread safe implementation of rand.Source +var SeededRand = rand.New(&lockedSource{src: rand.NewSource(time.Now().UnixNano())}) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go new file mode 100644 index 000000000..38ea61afe --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go @@ -0,0 +1,23 @@ +package sdkuri + +import ( + "path" + "strings" +) + +// PathJoin will join the elements of the path delimited by the "/" +// character. Similar to path.Join with the exception the trailing "/" +// character is preserved if present. +func PathJoin(elems ...string) string { + if len(elems) == 0 { + return "" + } + + hasTrailing := strings.HasSuffix(elems[len(elems)-1], "/") + str := path.Join(elems...) + if hasTrailing && str != "/" { + str += "/" + } + + return str +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go new file mode 100644 index 000000000..ebcbc2b40 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go @@ -0,0 +1,40 @@ +package shareddefaults + +import ( + "os" + "path/filepath" + "runtime" +) + +// SharedCredentialsFilename returns the SDK's default file path +// for the shared credentials file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/credentials +// - Windows: %USERPROFILE%\.aws\credentials +func SharedCredentialsFilename() string { + return filepath.Join(UserHomeDir(), ".aws", "credentials") +} + +// SharedConfigFilename returns the SDK's default file path for +// the shared config file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/config +// - Windows: %USERPROFILE%\.aws\config +func SharedConfigFilename() string { + return filepath.Join(UserHomeDir(), ".aws", "config") +} + +// UserHomeDir returns the home directory for the user the process is +// running under. +func UserHomeDir() string { + if runtime.GOOS == "windows" { // Windows + return os.Getenv("USERPROFILE") + } + + // *nix + return os.Getenv("HOME") +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go new file mode 100644 index 000000000..f06f44ee1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go @@ -0,0 +1,21 @@ +package protocol + +// ValidHostLabel returns if the label is a valid RFC 1123 Section 2.1 domain +// host label name. +func ValidHostLabel(label string) bool { + if l := len(label); l == 0 || l > 63 { + return false + } + for _, r := range label { + switch { + case r >= '0' && r <= '9': + case r >= 'A' && r <= 'Z': + case r >= 'a' && r <= 'z': + case r == '-': + default: + return false + } + } + + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go new file mode 100644 index 000000000..53831dff9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go @@ -0,0 +1,75 @@ +package protocol + +import ( + "crypto/rand" + "fmt" + "reflect" +) + +// RandReader is the random reader the protocol package will use to read +// random bytes from. This is exported for testing, and should not be used. +var RandReader = rand.Reader + +const idempotencyTokenFillTag = `idempotencyToken` + +// CanSetIdempotencyToken returns true if the struct field should be +// automatically populated with a Idempotency token. +// +// Only *string and string type fields that are tagged with idempotencyToken +// which are not already set can be auto filled. +func CanSetIdempotencyToken(v reflect.Value, f reflect.StructField) bool { + switch u := v.Interface().(type) { + // To auto fill an Idempotency token the field must be a string, + // tagged for auto fill, and have a zero value. + case *string: + return u == nil && len(f.Tag.Get(idempotencyTokenFillTag)) != 0 + case string: + return len(u) == 0 && len(f.Tag.Get(idempotencyTokenFillTag)) != 0 + } + + return false +} + +// GetIdempotencyToken returns a randomly generated idempotency token. +func GetIdempotencyToken() string { + b := make([]byte, 16) + RandReader.Read(b) + + return UUIDVersion4(b) +} + +// SetIdempotencyToken will set the value provided with a Idempotency Token. +// Given that the value can be set. Will panic if value is not setable. +func SetIdempotencyToken(v reflect.Value) { + if v.Kind() == reflect.Ptr { + if v.IsNil() && v.CanSet() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + v = reflect.Indirect(v) + + if !v.CanSet() { + panic(fmt.Sprintf("unable to set idempotnecy token %v", v)) + } + + b := make([]byte, 16) + _, err := rand.Read(b) + if err != nil { + // TODO handle error + return + } + + v.Set(reflect.ValueOf(UUIDVersion4(b))) +} + +// UUIDVersion4 returns a Version 4 random UUID from the byte slice provided +func UUIDVersion4(u []byte) string { + // https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_.28random.29 + // 13th character is "4" + u[6] = (u[6] | 0x40) & 0x4F + // 17th character is "8", "9", "a", or "b" + u[8] = (u[8] | 0x80) & 0xBF + + return fmt.Sprintf(`%X-%X-%X-%X-%X`, u[0:4], u[4:6], u[6:8], u[8:10], u[10:]) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go new file mode 100644 index 000000000..776d11018 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go @@ -0,0 +1,76 @@ +package protocol + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "strconv" + + "github.com/aws/aws-sdk-go/aws" +) + +// EscapeMode is the mode that should be use for escaping a value +type EscapeMode uint + +// The modes for escaping a value before it is marshaled, and unmarshaled. +const ( + NoEscape EscapeMode = iota + Base64Escape + QuotedEscape +) + +// EncodeJSONValue marshals the value into a JSON string, and optionally base64 +// encodes the string before returning it. +// +// Will panic if the escape mode is unknown. +func EncodeJSONValue(v aws.JSONValue, escape EscapeMode) (string, error) { + b, err := json.Marshal(v) + if err != nil { + return "", err + } + + switch escape { + case NoEscape: + return string(b), nil + case Base64Escape: + return base64.StdEncoding.EncodeToString(b), nil + case QuotedEscape: + return strconv.Quote(string(b)), nil + } + + panic(fmt.Sprintf("EncodeJSONValue called with unknown EscapeMode, %v", escape)) +} + +// DecodeJSONValue will attempt to decode the string input as a JSONValue. +// Optionally decoding base64 the value first before JSON unmarshaling. +// +// Will panic if the escape mode is unknown. +func DecodeJSONValue(v string, escape EscapeMode) (aws.JSONValue, error) { + var b []byte + var err error + + switch escape { + case NoEscape: + b = []byte(v) + case Base64Escape: + b, err = base64.StdEncoding.DecodeString(v) + case QuotedEscape: + var u string + u, err = strconv.Unquote(v) + b = []byte(u) + default: + panic(fmt.Sprintf("DecodeJSONValue called with unknown EscapeMode, %v", escape)) + } + + if err != nil { + return nil, err + } + + m := aws.JSONValue{} + err = json.Unmarshal(b, &m) + if err != nil { + return nil, err + } + + return m, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go new file mode 100644 index 000000000..e21614a12 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go @@ -0,0 +1,81 @@ +package protocol + +import ( + "io" + "io/ioutil" + "net/http" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" +) + +// PayloadUnmarshaler provides the interface for unmarshaling a payload's +// reader into a SDK shape. +type PayloadUnmarshaler interface { + UnmarshalPayload(io.Reader, interface{}) error +} + +// HandlerPayloadUnmarshal implements the PayloadUnmarshaler from a +// HandlerList. This provides the support for unmarshaling a payload reader to +// a shape without needing a SDK request first. +type HandlerPayloadUnmarshal struct { + Unmarshalers request.HandlerList +} + +// UnmarshalPayload unmarshals the io.Reader payload into the SDK shape using +// the Unmarshalers HandlerList provided. Returns an error if unable +// unmarshaling fails. +func (h HandlerPayloadUnmarshal) UnmarshalPayload(r io.Reader, v interface{}) error { + req := &request.Request{ + HTTPRequest: &http.Request{}, + HTTPResponse: &http.Response{ + StatusCode: 200, + Header: http.Header{}, + Body: ioutil.NopCloser(r), + }, + Data: v, + } + + h.Unmarshalers.Run(req) + + return req.Error +} + +// PayloadMarshaler provides the interface for marshaling a SDK shape into and +// io.Writer. +type PayloadMarshaler interface { + MarshalPayload(io.Writer, interface{}) error +} + +// HandlerPayloadMarshal implements the PayloadMarshaler from a HandlerList. +// This provides support for marshaling a SDK shape into an io.Writer without +// needing a SDK request first. +type HandlerPayloadMarshal struct { + Marshalers request.HandlerList +} + +// MarshalPayload marshals the SDK shape into the io.Writer using the +// Marshalers HandlerList provided. Returns an error if unable if marshal +// fails. +func (h HandlerPayloadMarshal) MarshalPayload(w io.Writer, v interface{}) error { + req := request.New( + aws.Config{}, + metadata.ClientInfo{}, + request.Handlers{}, + nil, + &request.Operation{HTTPMethod: "GET"}, + v, + nil, + ) + + h.Marshalers.Run(req) + + if req.Error != nil { + return req.Error + } + + io.Copy(w, req.GetBody()) + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go new file mode 100644 index 000000000..60e5b09d5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go @@ -0,0 +1,36 @@ +// Package query provides serialization of AWS query requests, and responses. +package query + +//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/query.json build_test.go + +import ( + "net/url" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/query/queryutil" +) + +// BuildHandler is a named request handler for building query protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.query.Build", Fn: Build} + +// Build builds a request for an AWS Query service. +func Build(r *request.Request) { + body := url.Values{ + "Action": {r.Operation.Name}, + "Version": {r.ClientInfo.APIVersion}, + } + if err := queryutil.Parse(body, r.Params, false); err != nil { + r.Error = awserr.New("SerializationError", "failed encoding Query request", err) + return + } + + if !r.IsPresigned() { + r.HTTPRequest.Method = "POST" + r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") + r.SetBufferBody([]byte(body.Encode())) + } else { // This is a pre-signed request + r.HTTPRequest.Method = "GET" + r.HTTPRequest.URL.RawQuery = body.Encode() + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go new file mode 100644 index 000000000..75866d012 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go @@ -0,0 +1,246 @@ +package queryutil + +import ( + "encoding/base64" + "fmt" + "net/url" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/private/protocol" +) + +// Parse parses an object i and fills a url.Values object. The isEC2 flag +// indicates if this is the EC2 Query sub-protocol. +func Parse(body url.Values, i interface{}, isEC2 bool) error { + q := queryParser{isEC2: isEC2} + return q.parseValue(body, reflect.ValueOf(i), "", "") +} + +func elemOf(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value +} + +type queryParser struct { + isEC2 bool +} + +func (q *queryParser) parseValue(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + value = elemOf(value) + + // no need to handle zero values + if !value.IsValid() { + return nil + } + + t := tag.Get("type") + if t == "" { + switch value.Kind() { + case reflect.Struct: + t = "structure" + case reflect.Slice: + t = "list" + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + return q.parseStruct(v, value, prefix) + case "list": + return q.parseList(v, value, prefix, tag) + case "map": + return q.parseMap(v, value, prefix, tag) + default: + return q.parseScalar(v, value, prefix, tag) + } +} + +func (q *queryParser) parseStruct(v url.Values, value reflect.Value, prefix string) error { + if !value.IsValid() { + return nil + } + + t := value.Type() + for i := 0; i < value.NumField(); i++ { + elemValue := elemOf(value.Field(i)) + field := t.Field(i) + + if field.PkgPath != "" { + continue // ignore unexported fields + } + if field.Tag.Get("ignore") != "" { + continue + } + + if protocol.CanSetIdempotencyToken(value.Field(i), field) { + token := protocol.GetIdempotencyToken() + elemValue = reflect.ValueOf(token) + } + + var name string + if q.isEC2 { + name = field.Tag.Get("queryName") + } + if name == "" { + if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" { + name = field.Tag.Get("locationNameList") + } else if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + if name != "" && q.isEC2 { + name = strings.ToUpper(name[0:1]) + name[1:] + } + } + if name == "" { + name = field.Name + } + + if prefix != "" { + name = prefix + "." + name + } + + if err := q.parseValue(v, elemValue, name, field.Tag); err != nil { + return err + } + } + return nil +} + +func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + // If it's empty, generate an empty value + if !value.IsNil() && value.Len() == 0 { + v.Set(prefix, "") + return nil + } + + if _, ok := value.Interface().([]byte); ok { + return q.parseScalar(v, value, prefix, tag) + } + + // check for unflattened list member + if !q.isEC2 && tag.Get("flattened") == "" { + if listName := tag.Get("locationNameList"); listName == "" { + prefix += ".member" + } else { + prefix += "." + listName + } + } + + for i := 0; i < value.Len(); i++ { + slicePrefix := prefix + if slicePrefix == "" { + slicePrefix = strconv.Itoa(i + 1) + } else { + slicePrefix = slicePrefix + "." + strconv.Itoa(i+1) + } + if err := q.parseValue(v, value.Index(i), slicePrefix, ""); err != nil { + return err + } + } + return nil +} + +func (q *queryParser) parseMap(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + // If it's empty, generate an empty value + if !value.IsNil() && value.Len() == 0 { + v.Set(prefix, "") + return nil + } + + // check for unflattened list member + if !q.isEC2 && tag.Get("flattened") == "" { + prefix += ".entry" + } + + // sort keys for improved serialization consistency. + // this is not strictly necessary for protocol support. + mapKeyValues := value.MapKeys() + mapKeys := map[string]reflect.Value{} + mapKeyNames := make([]string, len(mapKeyValues)) + for i, mapKey := range mapKeyValues { + name := mapKey.String() + mapKeys[name] = mapKey + mapKeyNames[i] = name + } + sort.Strings(mapKeyNames) + + for i, mapKeyName := range mapKeyNames { + mapKey := mapKeys[mapKeyName] + mapValue := value.MapIndex(mapKey) + + kname := tag.Get("locationNameKey") + if kname == "" { + kname = "key" + } + vname := tag.Get("locationNameValue") + if vname == "" { + vname = "value" + } + + // serialize key + var keyName string + if prefix == "" { + keyName = strconv.Itoa(i+1) + "." + kname + } else { + keyName = prefix + "." + strconv.Itoa(i+1) + "." + kname + } + + if err := q.parseValue(v, mapKey, keyName, ""); err != nil { + return err + } + + // serialize value + var valueName string + if prefix == "" { + valueName = strconv.Itoa(i+1) + "." + vname + } else { + valueName = prefix + "." + strconv.Itoa(i+1) + "." + vname + } + + if err := q.parseValue(v, mapValue, valueName, ""); err != nil { + return err + } + } + + return nil +} + +func (q *queryParser) parseScalar(v url.Values, r reflect.Value, name string, tag reflect.StructTag) error { + switch value := r.Interface().(type) { + case string: + v.Set(name, value) + case []byte: + if !r.IsNil() { + v.Set(name, base64.StdEncoding.EncodeToString(value)) + } + case bool: + v.Set(name, strconv.FormatBool(value)) + case int64: + v.Set(name, strconv.FormatInt(value, 10)) + case int: + v.Set(name, strconv.Itoa(value)) + case float64: + v.Set(name, strconv.FormatFloat(value, 'f', -1, 64)) + case float32: + v.Set(name, strconv.FormatFloat(float64(value), 'f', -1, 32)) + case time.Time: + const ISO8601UTC = "2006-01-02T15:04:05Z" + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.ISO8601TimeFormatName + } + + v.Set(name, protocol.FormatTime(format, value)) + default: + return fmt.Errorf("unsupported value for param %s: %v (%s)", name, r.Interface(), r.Type().Name()) + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go new file mode 100644 index 000000000..3495c7307 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go @@ -0,0 +1,39 @@ +package query + +//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/query.json unmarshal_test.go + +import ( + "encoding/xml" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" +) + +// UnmarshalHandler is a named request handler for unmarshaling query protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.query.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling query protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalMeta", Fn: UnmarshalMeta} + +// Unmarshal unmarshals a response for an AWS Query service. +func Unmarshal(r *request.Request) { + defer r.HTTPResponse.Body.Close() + if r.DataFilled() { + decoder := xml.NewDecoder(r.HTTPResponse.Body) + err := xmlutil.UnmarshalXML(r.Data, decoder, r.Operation.Name+"Result") + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New("SerializationError", "failed decoding Query response", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + } +} + +// UnmarshalMeta unmarshals header response values for an AWS Query service. +func UnmarshalMeta(r *request.Request) { + r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid") +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go new file mode 100644 index 000000000..46d354e82 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go @@ -0,0 +1,74 @@ +package query + +import ( + "encoding/xml" + "io/ioutil" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +type xmlErrorResponse struct { + XMLName xml.Name `xml:"ErrorResponse"` + Code string `xml:"Error>Code"` + Message string `xml:"Error>Message"` + RequestID string `xml:"RequestId"` +} + +type xmlServiceUnavailableResponse struct { + XMLName xml.Name `xml:"ServiceUnavailableException"` +} + +// UnmarshalErrorHandler is a name request handler to unmarshal request errors +var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalError", Fn: UnmarshalError} + +// UnmarshalError unmarshals an error response for an AWS Query service. +func UnmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + bodyBytes, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New("SerializationError", "failed to read from query HTTP response body", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + + // First check for specific error + resp := xmlErrorResponse{} + decodeErr := xml.Unmarshal(bodyBytes, &resp) + if decodeErr == nil { + reqID := resp.RequestID + if reqID == "" { + reqID = r.RequestID + } + r.Error = awserr.NewRequestFailure( + awserr.New(resp.Code, resp.Message, nil), + r.HTTPResponse.StatusCode, + reqID, + ) + return + } + + // Check for unhandled error + servUnavailResp := xmlServiceUnavailableResponse{} + unavailErr := xml.Unmarshal(bodyBytes, &servUnavailResp) + if unavailErr == nil { + r.Error = awserr.NewRequestFailure( + awserr.New("ServiceUnavailableException", "service is unavailable", nil), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + + // Failed to retrieve any error message from the response body + r.Error = awserr.NewRequestFailure( + awserr.New("SerializationError", + "failed to decode query XML error response", decodeErr), + r.HTTPResponse.StatusCode, + r.RequestID, + ) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go new file mode 100644 index 000000000..b34f5258a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go @@ -0,0 +1,295 @@ +// Package rest provides RESTful serialization of AWS requests and responses. +package rest + +import ( + "bytes" + "encoding/base64" + "fmt" + "io" + "net/http" + "net/url" + "path" + "reflect" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" +) + +// Whether the byte value can be sent without escaping in AWS URLs +var noEscape [256]bool + +var errValueNotSet = fmt.Errorf("value not set") + +func init() { + for i := 0; i < len(noEscape); i++ { + // AWS expects every character except these to be escaped + noEscape[i] = (i >= 'A' && i <= 'Z') || + (i >= 'a' && i <= 'z') || + (i >= '0' && i <= '9') || + i == '-' || + i == '.' || + i == '_' || + i == '~' + } +} + +// BuildHandler is a named request handler for building rest protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.rest.Build", Fn: Build} + +// Build builds the REST component of a service request. +func Build(r *request.Request) { + if r.ParamsFilled() { + v := reflect.ValueOf(r.Params).Elem() + buildLocationElements(r, v, false) + buildBody(r, v) + } +} + +// BuildAsGET builds the REST component of a service request with the ability to hoist +// data from the body. +func BuildAsGET(r *request.Request) { + if r.ParamsFilled() { + v := reflect.ValueOf(r.Params).Elem() + buildLocationElements(r, v, true) + buildBody(r, v) + } +} + +func buildLocationElements(r *request.Request, v reflect.Value, buildGETQuery bool) { + query := r.HTTPRequest.URL.Query() + + // Setup the raw path to match the base path pattern. This is needed + // so that when the path is mutated a custom escaped version can be + // stored in RawPath that will be used by the Go client. + r.HTTPRequest.URL.RawPath = r.HTTPRequest.URL.Path + + for i := 0; i < v.NumField(); i++ { + m := v.Field(i) + if n := v.Type().Field(i).Name; n[0:1] == strings.ToLower(n[0:1]) { + continue + } + + if m.IsValid() { + field := v.Type().Field(i) + name := field.Tag.Get("locationName") + if name == "" { + name = field.Name + } + if kind := m.Kind(); kind == reflect.Ptr { + m = m.Elem() + } else if kind == reflect.Interface { + if !m.Elem().IsValid() { + continue + } + } + if !m.IsValid() { + continue + } + if field.Tag.Get("ignore") != "" { + continue + } + + var err error + switch field.Tag.Get("location") { + case "headers": // header maps + err = buildHeaderMap(&r.HTTPRequest.Header, m, field.Tag) + case "header": + err = buildHeader(&r.HTTPRequest.Header, m, name, field.Tag) + case "uri": + err = buildURI(r.HTTPRequest.URL, m, name, field.Tag) + case "querystring": + err = buildQueryString(query, m, name, field.Tag) + default: + if buildGETQuery { + err = buildQueryString(query, m, name, field.Tag) + } + } + r.Error = err + } + if r.Error != nil { + return + } + } + + r.HTTPRequest.URL.RawQuery = query.Encode() + if !aws.BoolValue(r.Config.DisableRestProtocolURICleaning) { + cleanPath(r.HTTPRequest.URL) + } +} + +func buildBody(r *request.Request, v reflect.Value) { + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + pfield, _ := v.Type().FieldByName(payloadName) + if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { + payload := reflect.Indirect(v.FieldByName(payloadName)) + if payload.IsValid() && payload.Interface() != nil { + switch reader := payload.Interface().(type) { + case io.ReadSeeker: + r.SetReaderBody(reader) + case []byte: + r.SetBufferBody(reader) + case string: + r.SetStringBody(reader) + default: + r.Error = awserr.New("SerializationError", + "failed to encode REST request", + fmt.Errorf("unknown payload type %s", payload.Type())) + } + } + } + } + } +} + +func buildHeader(header *http.Header, v reflect.Value, name string, tag reflect.StructTag) error { + str, err := convertType(v, tag) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New("SerializationError", "failed to encode REST request", err) + } + + header.Add(name, str) + + return nil +} + +func buildHeaderMap(header *http.Header, v reflect.Value, tag reflect.StructTag) error { + prefix := tag.Get("locationName") + for _, key := range v.MapKeys() { + str, err := convertType(v.MapIndex(key), tag) + if err == errValueNotSet { + continue + } else if err != nil { + return awserr.New("SerializationError", "failed to encode REST request", err) + + } + + header.Add(prefix+key.String(), str) + } + return nil +} + +func buildURI(u *url.URL, v reflect.Value, name string, tag reflect.StructTag) error { + value, err := convertType(v, tag) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New("SerializationError", "failed to encode REST request", err) + } + + u.Path = strings.Replace(u.Path, "{"+name+"}", value, -1) + u.Path = strings.Replace(u.Path, "{"+name+"+}", value, -1) + + u.RawPath = strings.Replace(u.RawPath, "{"+name+"}", EscapePath(value, true), -1) + u.RawPath = strings.Replace(u.RawPath, "{"+name+"+}", EscapePath(value, false), -1) + + return nil +} + +func buildQueryString(query url.Values, v reflect.Value, name string, tag reflect.StructTag) error { + switch value := v.Interface().(type) { + case []*string: + for _, item := range value { + query.Add(name, *item) + } + case map[string]*string: + for key, item := range value { + query.Add(key, *item) + } + case map[string][]*string: + for key, items := range value { + for _, item := range items { + query.Add(key, *item) + } + } + default: + str, err := convertType(v, tag) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New("SerializationError", "failed to encode REST request", err) + } + query.Set(name, str) + } + + return nil +} + +func cleanPath(u *url.URL) { + hasSlash := strings.HasSuffix(u.Path, "/") + + // clean up path, removing duplicate `/` + u.Path = path.Clean(u.Path) + u.RawPath = path.Clean(u.RawPath) + + if hasSlash && !strings.HasSuffix(u.Path, "/") { + u.Path += "/" + u.RawPath += "/" + } +} + +// EscapePath escapes part of a URL path in Amazon style +func EscapePath(path string, encodeSep bool) string { + var buf bytes.Buffer + for i := 0; i < len(path); i++ { + c := path[i] + if noEscape[c] || (c == '/' && !encodeSep) { + buf.WriteByte(c) + } else { + fmt.Fprintf(&buf, "%%%02X", c) + } + } + return buf.String() +} + +func convertType(v reflect.Value, tag reflect.StructTag) (str string, err error) { + v = reflect.Indirect(v) + if !v.IsValid() { + return "", errValueNotSet + } + + switch value := v.Interface().(type) { + case string: + str = value + case []byte: + str = base64.StdEncoding.EncodeToString(value) + case bool: + str = strconv.FormatBool(value) + case int64: + str = strconv.FormatInt(value, 10) + case float64: + str = strconv.FormatFloat(value, 'f', -1, 64) + case time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.RFC822TimeFormatName + if tag.Get("location") == "querystring" { + format = protocol.ISO8601TimeFormatName + } + } + str = protocol.FormatTime(format, value) + case aws.JSONValue: + if len(value) == 0 { + return "", errValueNotSet + } + escaping := protocol.NoEscape + if tag.Get("location") == "header" { + escaping = protocol.Base64Escape + } + str, err = protocol.EncodeJSONValue(value, escaping) + if err != nil { + return "", fmt.Errorf("unable to encode JSONValue, %v", err) + } + default: + err := fmt.Errorf("unsupported value for param %v (%s)", v.Interface(), v.Type()) + return "", err + } + return str, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go new file mode 100644 index 000000000..4366de2e1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go @@ -0,0 +1,45 @@ +package rest + +import "reflect" + +// PayloadMember returns the payload field member of i if there is one, or nil. +func PayloadMember(i interface{}) interface{} { + if i == nil { + return nil + } + + v := reflect.ValueOf(i).Elem() + if !v.IsValid() { + return nil + } + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + field, _ := v.Type().FieldByName(payloadName) + if field.Tag.Get("type") != "structure" { + return nil + } + + payload := v.FieldByName(payloadName) + if payload.IsValid() || (payload.Kind() == reflect.Ptr && !payload.IsNil()) { + return payload.Interface() + } + } + } + return nil +} + +// PayloadType returns the type of a payload field member of i if there is one, or "". +func PayloadType(i interface{}) string { + v := reflect.Indirect(reflect.ValueOf(i)) + if !v.IsValid() { + return "" + } + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + if member, ok := v.Type().FieldByName(payloadName); ok { + return member.Tag.Get("type") + } + } + } + return "" +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go new file mode 100644 index 000000000..33fd53b12 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go @@ -0,0 +1,225 @@ +package rest + +import ( + "bytes" + "encoding/base64" + "fmt" + "io" + "io/ioutil" + "net/http" + "reflect" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" +) + +// UnmarshalHandler is a named request handler for unmarshaling rest protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.rest.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling rest protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.rest.UnmarshalMeta", Fn: UnmarshalMeta} + +// Unmarshal unmarshals the REST component of a response in a REST service. +func Unmarshal(r *request.Request) { + if r.DataFilled() { + v := reflect.Indirect(reflect.ValueOf(r.Data)) + unmarshalBody(r, v) + } +} + +// UnmarshalMeta unmarshals the REST metadata of a response in a REST service +func UnmarshalMeta(r *request.Request) { + r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid") + if r.RequestID == "" { + // Alternative version of request id in the header + r.RequestID = r.HTTPResponse.Header.Get("X-Amz-Request-Id") + } + if r.DataFilled() { + v := reflect.Indirect(reflect.ValueOf(r.Data)) + unmarshalLocationElements(r, v) + } +} + +func unmarshalBody(r *request.Request, v reflect.Value) { + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + pfield, _ := v.Type().FieldByName(payloadName) + if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { + payload := v.FieldByName(payloadName) + if payload.IsValid() { + switch payload.Interface().(type) { + case []byte: + defer r.HTTPResponse.Body.Close() + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + } else { + payload.Set(reflect.ValueOf(b)) + } + case *string: + defer r.HTTPResponse.Body.Close() + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + } else { + str := string(b) + payload.Set(reflect.ValueOf(&str)) + } + default: + switch payload.Type().String() { + case "io.ReadCloser": + payload.Set(reflect.ValueOf(r.HTTPResponse.Body)) + case "io.ReadSeeker": + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", + "failed to read response body", err) + return + } + payload.Set(reflect.ValueOf(ioutil.NopCloser(bytes.NewReader(b)))) + default: + io.Copy(ioutil.Discard, r.HTTPResponse.Body) + defer r.HTTPResponse.Body.Close() + r.Error = awserr.New("SerializationError", + "failed to decode REST response", + fmt.Errorf("unknown payload type %s", payload.Type())) + } + } + } + } + } + } +} + +func unmarshalLocationElements(r *request.Request, v reflect.Value) { + for i := 0; i < v.NumField(); i++ { + m, field := v.Field(i), v.Type().Field(i) + if n := field.Name; n[0:1] == strings.ToLower(n[0:1]) { + continue + } + + if m.IsValid() { + name := field.Tag.Get("locationName") + if name == "" { + name = field.Name + } + + switch field.Tag.Get("location") { + case "statusCode": + unmarshalStatusCode(m, r.HTTPResponse.StatusCode) + case "header": + err := unmarshalHeader(m, r.HTTPResponse.Header.Get(name), field.Tag) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + break + } + case "headers": + prefix := field.Tag.Get("locationName") + err := unmarshalHeaderMap(m, r.HTTPResponse.Header, prefix) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + break + } + } + } + if r.Error != nil { + return + } + } +} + +func unmarshalStatusCode(v reflect.Value, statusCode int) { + if !v.IsValid() { + return + } + + switch v.Interface().(type) { + case *int64: + s := int64(statusCode) + v.Set(reflect.ValueOf(&s)) + } +} + +func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) error { + switch r.Interface().(type) { + case map[string]*string: // we only support string map value types + out := map[string]*string{} + for k, v := range headers { + k = http.CanonicalHeaderKey(k) + if strings.HasPrefix(strings.ToLower(k), strings.ToLower(prefix)) { + out[k[len(prefix):]] = &v[0] + } + } + r.Set(reflect.ValueOf(out)) + } + return nil +} + +func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) error { + isJSONValue := tag.Get("type") == "jsonvalue" + if isJSONValue { + if len(header) == 0 { + return nil + } + } else if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) { + return nil + } + + switch v.Interface().(type) { + case *string: + v.Set(reflect.ValueOf(&header)) + case []byte: + b, err := base64.StdEncoding.DecodeString(header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&b)) + case *bool: + b, err := strconv.ParseBool(header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&b)) + case *int64: + i, err := strconv.ParseInt(header, 10, 64) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&i)) + case *float64: + f, err := strconv.ParseFloat(header, 64) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&f)) + case *time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.RFC822TimeFormatName + } + t, err := protocol.ParseTime(format, header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&t)) + case aws.JSONValue: + escaping := protocol.NoEscape + if tag.Get("location") == "header" { + escaping = protocol.Base64Escape + } + m, err := protocol.DecodeJSONValue(header, escaping) + if err != nil { + return err + } + v.Set(reflect.ValueOf(m)) + default: + err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) + return err + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go new file mode 100644 index 000000000..b7ed6c6f8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go @@ -0,0 +1,72 @@ +package protocol + +import ( + "strconv" + "time" +) + +// Names of time formats supported by the SDK +const ( + RFC822TimeFormatName = "rfc822" + ISO8601TimeFormatName = "iso8601" + UnixTimeFormatName = "unixTimestamp" +) + +// Time formats supported by the SDK +const ( + // RFC 7231#section-7.1.1.1 timetamp format. e.g Tue, 29 Apr 2014 18:30:38 GMT + RFC822TimeFormat = "Mon, 2 Jan 2006 15:04:05 GMT" + + // RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z + ISO8601TimeFormat = "2006-01-02T15:04:05Z" +) + +// IsKnownTimestampFormat returns if the timestamp format name +// is know to the SDK's protocols. +func IsKnownTimestampFormat(name string) bool { + switch name { + case RFC822TimeFormatName: + fallthrough + case ISO8601TimeFormatName: + fallthrough + case UnixTimeFormatName: + return true + default: + return false + } +} + +// FormatTime returns a string value of the time. +func FormatTime(name string, t time.Time) string { + t = t.UTC() + + switch name { + case RFC822TimeFormatName: + return t.Format(RFC822TimeFormat) + case ISO8601TimeFormatName: + return t.Format(ISO8601TimeFormat) + case UnixTimeFormatName: + return strconv.FormatInt(t.Unix(), 10) + default: + panic("unknown timestamp format name, " + name) + } +} + +// ParseTime attempts to parse the time given the format. Returns +// the time if it was able to be parsed, and fails otherwise. +func ParseTime(formatName, value string) (time.Time, error) { + switch formatName { + case RFC822TimeFormatName: + return time.Parse(RFC822TimeFormat, value) + case ISO8601TimeFormatName: + return time.Parse(ISO8601TimeFormat, value) + case UnixTimeFormatName: + v, err := strconv.ParseFloat(value, 64) + if err != nil { + return time.Time{}, err + } + return time.Unix(int64(v), 0), nil + default: + panic("unknown timestamp format name, " + formatName) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go new file mode 100644 index 000000000..da1a68111 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go @@ -0,0 +1,21 @@ +package protocol + +import ( + "io" + "io/ioutil" + + "github.com/aws/aws-sdk-go/aws/request" +) + +// UnmarshalDiscardBodyHandler is a named request handler to empty and close a response's body +var UnmarshalDiscardBodyHandler = request.NamedHandler{Name: "awssdk.shared.UnmarshalDiscardBody", Fn: UnmarshalDiscardBody} + +// UnmarshalDiscardBody is a request handler to empty a response's body and closing it. +func UnmarshalDiscardBody(r *request.Request) { + if r.HTTPResponse == nil || r.HTTPResponse.Body == nil { + return + } + + io.Copy(ioutil.Discard, r.HTTPResponse.Body) + r.HTTPResponse.Body.Close() +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go new file mode 100644 index 000000000..1bfe45f65 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go @@ -0,0 +1,306 @@ +// Package xmlutil provides XML serialization of AWS requests and responses. +package xmlutil + +import ( + "encoding/base64" + "encoding/xml" + "fmt" + "reflect" + "sort" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/private/protocol" +) + +// BuildXML will serialize params into an xml.Encoder. Error will be returned +// if the serialization of any of the params or nested values fails. +func BuildXML(params interface{}, e *xml.Encoder) error { + return buildXML(params, e, false) +} + +func buildXML(params interface{}, e *xml.Encoder, sorted bool) error { + b := xmlBuilder{encoder: e, namespaces: map[string]string{}} + root := NewXMLElement(xml.Name{}) + if err := b.buildValue(reflect.ValueOf(params), root, ""); err != nil { + return err + } + for _, c := range root.Children { + for _, v := range c { + return StructToXML(e, v, sorted) + } + } + return nil +} + +// Returns the reflection element of a value, if it is a pointer. +func elemOf(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value +} + +// A xmlBuilder serializes values from Go code to XML +type xmlBuilder struct { + encoder *xml.Encoder + namespaces map[string]string +} + +// buildValue generic XMLNode builder for any type. Will build value for their specific type +// struct, list, map, scalar. +// +// Also takes a "type" tag value to set what type a value should be converted to XMLNode as. If +// type is not provided reflect will be used to determine the value's type. +func (b *xmlBuilder) buildValue(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + value = elemOf(value) + if !value.IsValid() { // no need to handle zero values + return nil + } else if tag.Get("location") != "" { // don't handle non-body location values + return nil + } + + t := tag.Get("type") + if t == "" { + switch value.Kind() { + case reflect.Struct: + t = "structure" + case reflect.Slice: + t = "list" + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + if field, ok := value.Type().FieldByName("_"); ok { + tag = tag + reflect.StructTag(" ") + field.Tag + } + return b.buildStruct(value, current, tag) + case "list": + return b.buildList(value, current, tag) + case "map": + return b.buildMap(value, current, tag) + default: + return b.buildScalar(value, current, tag) + } +} + +// buildStruct adds a struct and its fields to the current XMLNode. All fields any any nested +// types are converted to XMLNodes also. +func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if !value.IsValid() { + return nil + } + + // unwrap payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := value.Type().FieldByName(payload) + tag = field.Tag + value = elemOf(value.FieldByName(payload)) + + if !value.IsValid() { + return nil + } + } + + child := NewXMLElement(xml.Name{Local: tag.Get("locationName")}) + + // there is an xmlNamespace associated with this struct + if prefix, uri := tag.Get("xmlPrefix"), tag.Get("xmlURI"); uri != "" { + ns := xml.Attr{ + Name: xml.Name{Local: "xmlns"}, + Value: uri, + } + if prefix != "" { + b.namespaces[prefix] = uri // register the namespace + ns.Name.Local = "xmlns:" + prefix + } + + child.Attr = append(child.Attr, ns) + } + + var payloadFields, nonPayloadFields int + + t := value.Type() + for i := 0; i < value.NumField(); i++ { + member := elemOf(value.Field(i)) + field := t.Field(i) + + if field.PkgPath != "" { + continue // ignore unexported fields + } + if field.Tag.Get("ignore") != "" { + continue + } + + mTag := field.Tag + if mTag.Get("location") != "" { // skip non-body members + nonPayloadFields++ + continue + } + payloadFields++ + + if protocol.CanSetIdempotencyToken(value.Field(i), field) { + token := protocol.GetIdempotencyToken() + member = reflect.ValueOf(token) + } + + memberName := mTag.Get("locationName") + if memberName == "" { + memberName = field.Name + mTag = reflect.StructTag(string(mTag) + ` locationName:"` + memberName + `"`) + } + if err := b.buildValue(member, child, mTag); err != nil { + return err + } + } + + // Only case where the child shape is not added is if the shape only contains + // non-payload fields, e.g headers/query. + if !(payloadFields == 0 && nonPayloadFields > 0) { + current.AddChild(child) + } + + return nil +} + +// buildList adds the value's list items to the current XMLNode as children nodes. All +// nested values in the list are converted to XMLNodes also. +func (b *xmlBuilder) buildList(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if value.IsNil() { // don't build omitted lists + return nil + } + + // check for unflattened list member + flattened := tag.Get("flattened") != "" + + xname := xml.Name{Local: tag.Get("locationName")} + if flattened { + for i := 0; i < value.Len(); i++ { + child := NewXMLElement(xname) + current.AddChild(child) + if err := b.buildValue(value.Index(i), child, ""); err != nil { + return err + } + } + } else { + list := NewXMLElement(xname) + current.AddChild(list) + + for i := 0; i < value.Len(); i++ { + iname := tag.Get("locationNameList") + if iname == "" { + iname = "member" + } + + child := NewXMLElement(xml.Name{Local: iname}) + list.AddChild(child) + if err := b.buildValue(value.Index(i), child, ""); err != nil { + return err + } + } + } + + return nil +} + +// buildMap adds the value's key/value pairs to the current XMLNode as children nodes. All +// nested values in the map are converted to XMLNodes also. +// +// Error will be returned if it is unable to build the map's values into XMLNodes +func (b *xmlBuilder) buildMap(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if value.IsNil() { // don't build omitted maps + return nil + } + + maproot := NewXMLElement(xml.Name{Local: tag.Get("locationName")}) + current.AddChild(maproot) + current = maproot + + kname, vname := "key", "value" + if n := tag.Get("locationNameKey"); n != "" { + kname = n + } + if n := tag.Get("locationNameValue"); n != "" { + vname = n + } + + // sorting is not required for compliance, but it makes testing easier + keys := make([]string, value.Len()) + for i, k := range value.MapKeys() { + keys[i] = k.String() + } + sort.Strings(keys) + + for _, k := range keys { + v := value.MapIndex(reflect.ValueOf(k)) + + mapcur := current + if tag.Get("flattened") == "" { // add "entry" tag to non-flat maps + child := NewXMLElement(xml.Name{Local: "entry"}) + mapcur.AddChild(child) + mapcur = child + } + + kchild := NewXMLElement(xml.Name{Local: kname}) + kchild.Text = k + vchild := NewXMLElement(xml.Name{Local: vname}) + mapcur.AddChild(kchild) + mapcur.AddChild(vchild) + + if err := b.buildValue(v, vchild, ""); err != nil { + return err + } + } + + return nil +} + +// buildScalar will convert the value into a string and append it as a attribute or child +// of the current XMLNode. +// +// The value will be added as an attribute if tag contains a "xmlAttribute" attribute value. +// +// Error will be returned if the value type is unsupported. +func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + var str string + switch converted := value.Interface().(type) { + case string: + str = converted + case []byte: + if !value.IsNil() { + str = base64.StdEncoding.EncodeToString(converted) + } + case bool: + str = strconv.FormatBool(converted) + case int64: + str = strconv.FormatInt(converted, 10) + case int: + str = strconv.Itoa(converted) + case float64: + str = strconv.FormatFloat(converted, 'f', -1, 64) + case float32: + str = strconv.FormatFloat(float64(converted), 'f', -1, 32) + case time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.ISO8601TimeFormatName + } + + str = protocol.FormatTime(format, converted) + default: + return fmt.Errorf("unsupported value for param %s: %v (%s)", + tag.Get("locationName"), value.Interface(), value.Type().Name()) + } + + xname := xml.Name{Local: tag.Get("locationName")} + if tag.Get("xmlAttribute") != "" { // put into current node's attribute list + attr := xml.Attr{Name: xname, Value: str} + current.Attr = append(current.Attr, attr) + } else { // regular text node + current.AddChild(&XMLNode{Name: xname, Text: str}) + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go new file mode 100644 index 000000000..ff1ef6830 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go @@ -0,0 +1,272 @@ +package xmlutil + +import ( + "encoding/base64" + "encoding/xml" + "fmt" + "io" + "reflect" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/private/protocol" +) + +// UnmarshalXML deserializes an xml.Decoder into the container v. V +// needs to match the shape of the XML expected to be decoded. +// If the shape doesn't match unmarshaling will fail. +func UnmarshalXML(v interface{}, d *xml.Decoder, wrapper string) error { + n, err := XMLToStruct(d, nil) + if err != nil { + return err + } + if n.Children != nil { + for _, root := range n.Children { + for _, c := range root { + if wrappedChild, ok := c.Children[wrapper]; ok { + c = wrappedChild[0] // pull out wrapped element + } + + err = parse(reflect.ValueOf(v), c, "") + if err != nil { + if err == io.EOF { + return nil + } + return err + } + } + } + return nil + } + return nil +} + +// parse deserializes any value from the XMLNode. The type tag is used to infer the type, or reflect +// will be used to determine the type from r. +func parse(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + rtype := r.Type() + if rtype.Kind() == reflect.Ptr { + rtype = rtype.Elem() // check kind of actual element type + } + + t := tag.Get("type") + if t == "" { + switch rtype.Kind() { + case reflect.Struct: + // also it can't be a time object + if _, ok := r.Interface().(*time.Time); !ok { + t = "structure" + } + case reflect.Slice: + // also it can't be a byte slice + if _, ok := r.Interface().([]byte); !ok { + t = "list" + } + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + if field, ok := rtype.FieldByName("_"); ok { + tag = field.Tag + } + return parseStruct(r, node, tag) + case "list": + return parseList(r, node, tag) + case "map": + return parseMap(r, node, tag) + default: + return parseScalar(r, node, tag) + } +} + +// parseStruct deserializes a structure and its fields from an XMLNode. Any nested +// types in the structure will also be deserialized. +func parseStruct(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + t := r.Type() + if r.Kind() == reflect.Ptr { + if r.IsNil() { // create the structure if it's nil + s := reflect.New(r.Type().Elem()) + r.Set(s) + r = s + } + + r = r.Elem() + t = t.Elem() + } + + // unwrap any payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := t.FieldByName(payload) + return parseStruct(r.FieldByName(payload), node, field.Tag) + } + + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + if c := field.Name[0:1]; strings.ToLower(c) == c { + continue // ignore unexported fields + } + + // figure out what this field is called + name := field.Name + if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" { + name = field.Tag.Get("locationNameList") + } else if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + + // try to find the field by name in elements + elems := node.Children[name] + + if elems == nil { // try to find the field in attributes + if val, ok := node.findElem(name); ok { + elems = []*XMLNode{{Text: val}} + } + } + + member := r.FieldByName(field.Name) + for _, elem := range elems { + err := parse(member, elem, field.Tag) + if err != nil { + return err + } + } + } + return nil +} + +// parseList deserializes a list of values from an XML node. Each list entry +// will also be deserialized. +func parseList(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + t := r.Type() + + if tag.Get("flattened") == "" { // look at all item entries + mname := "member" + if name := tag.Get("locationNameList"); name != "" { + mname = name + } + + if Children, ok := node.Children[mname]; ok { + if r.IsNil() { + r.Set(reflect.MakeSlice(t, len(Children), len(Children))) + } + + for i, c := range Children { + err := parse(r.Index(i), c, "") + if err != nil { + return err + } + } + } + } else { // flattened list means this is a single element + if r.IsNil() { + r.Set(reflect.MakeSlice(t, 0, 0)) + } + + childR := reflect.Zero(t.Elem()) + r.Set(reflect.Append(r, childR)) + err := parse(r.Index(r.Len()-1), node, "") + if err != nil { + return err + } + } + + return nil +} + +// parseMap deserializes a map from an XMLNode. The direct children of the XMLNode +// will also be deserialized as map entries. +func parseMap(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + if r.IsNil() { + r.Set(reflect.MakeMap(r.Type())) + } + + if tag.Get("flattened") == "" { // look at all child entries + for _, entry := range node.Children["entry"] { + parseMapEntry(r, entry, tag) + } + } else { // this element is itself an entry + parseMapEntry(r, node, tag) + } + + return nil +} + +// parseMapEntry deserializes a map entry from a XML node. +func parseMapEntry(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + kname, vname := "key", "value" + if n := tag.Get("locationNameKey"); n != "" { + kname = n + } + if n := tag.Get("locationNameValue"); n != "" { + vname = n + } + + keys, ok := node.Children[kname] + values := node.Children[vname] + if ok { + for i, key := range keys { + keyR := reflect.ValueOf(key.Text) + value := values[i] + valueR := reflect.New(r.Type().Elem()).Elem() + + parse(valueR, value, "") + r.SetMapIndex(keyR, valueR) + } + } + return nil +} + +// parseScaller deserializes an XMLNode value into a concrete type based on the +// interface type of r. +// +// Error is returned if the deserialization fails due to invalid type conversion, +// or unsupported interface type. +func parseScalar(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + switch r.Interface().(type) { + case *string: + r.Set(reflect.ValueOf(&node.Text)) + return nil + case []byte: + b, err := base64.StdEncoding.DecodeString(node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(b)) + case *bool: + v, err := strconv.ParseBool(node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *int64: + v, err := strconv.ParseInt(node.Text, 10, 64) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *float64: + v, err := strconv.ParseFloat(node.Text, 64) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.ISO8601TimeFormatName + } + + t, err := protocol.ParseTime(format, node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&t)) + default: + return fmt.Errorf("unsupported value: %v (%s)", r.Interface(), r.Type()) + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go new file mode 100644 index 000000000..515ce1521 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go @@ -0,0 +1,148 @@ +package xmlutil + +import ( + "encoding/xml" + "fmt" + "io" + "sort" +) + +// A XMLNode contains the values to be encoded or decoded. +type XMLNode struct { + Name xml.Name `json:",omitempty"` + Children map[string][]*XMLNode `json:",omitempty"` + Text string `json:",omitempty"` + Attr []xml.Attr `json:",omitempty"` + + namespaces map[string]string + parent *XMLNode +} + +// NewXMLElement returns a pointer to a new XMLNode initialized to default values. +func NewXMLElement(name xml.Name) *XMLNode { + return &XMLNode{ + Name: name, + Children: map[string][]*XMLNode{}, + Attr: []xml.Attr{}, + } +} + +// AddChild adds child to the XMLNode. +func (n *XMLNode) AddChild(child *XMLNode) { + child.parent = n + if _, ok := n.Children[child.Name.Local]; !ok { + n.Children[child.Name.Local] = []*XMLNode{} + } + n.Children[child.Name.Local] = append(n.Children[child.Name.Local], child) +} + +// XMLToStruct converts a xml.Decoder stream to XMLNode with nested values. +func XMLToStruct(d *xml.Decoder, s *xml.StartElement) (*XMLNode, error) { + out := &XMLNode{} + for { + tok, err := d.Token() + if err != nil { + if err == io.EOF { + break + } else { + return out, err + } + } + + if tok == nil { + break + } + + switch typed := tok.(type) { + case xml.CharData: + out.Text = string(typed.Copy()) + case xml.StartElement: + el := typed.Copy() + out.Attr = el.Attr + if out.Children == nil { + out.Children = map[string][]*XMLNode{} + } + + name := typed.Name.Local + slice := out.Children[name] + if slice == nil { + slice = []*XMLNode{} + } + node, e := XMLToStruct(d, &el) + out.findNamespaces() + if e != nil { + return out, e + } + node.Name = typed.Name + node.findNamespaces() + tempOut := *out + // Save into a temp variable, simply because out gets squashed during + // loop iterations + node.parent = &tempOut + slice = append(slice, node) + out.Children[name] = slice + case xml.EndElement: + if s != nil && s.Name.Local == typed.Name.Local { // matching end token + return out, nil + } + out = &XMLNode{} + } + } + return out, nil +} + +func (n *XMLNode) findNamespaces() { + ns := map[string]string{} + for _, a := range n.Attr { + if a.Name.Space == "xmlns" { + ns[a.Value] = a.Name.Local + } + } + + n.namespaces = ns +} + +func (n *XMLNode) findElem(name string) (string, bool) { + for node := n; node != nil; node = node.parent { + for _, a := range node.Attr { + namespace := a.Name.Space + if v, ok := node.namespaces[namespace]; ok { + namespace = v + } + if name == fmt.Sprintf("%s:%s", namespace, a.Name.Local) { + return a.Value, true + } + } + } + return "", false +} + +// StructToXML writes an XMLNode to a xml.Encoder as tokens. +func StructToXML(e *xml.Encoder, node *XMLNode, sorted bool) error { + e.EncodeToken(xml.StartElement{Name: node.Name, Attr: node.Attr}) + + if node.Text != "" { + e.EncodeToken(xml.CharData([]byte(node.Text))) + } else if sorted { + sortedNames := []string{} + for k := range node.Children { + sortedNames = append(sortedNames, k) + } + sort.Strings(sortedNames) + + for _, k := range sortedNames { + for _, v := range node.Children[k] { + StructToXML(e, v, sorted) + } + } + } else { + for _, c := range node.Children { + for _, v := range c { + StructToXML(e, v, sorted) + } + } + } + + e.EncodeToken(xml.EndElement{Name: node.Name}) + return e.Flush() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go new file mode 100644 index 000000000..ee908f916 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go @@ -0,0 +1,2398 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sts + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opAssumeRole = "AssumeRole" + +// AssumeRoleRequest generates a "aws/request.Request" representing the +// client's request for the AssumeRole operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AssumeRole for more information on using the AssumeRole +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AssumeRoleRequest method. +// req, resp := client.AssumeRoleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole +func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, output *AssumeRoleOutput) { + op := &request.Operation{ + Name: opAssumeRole, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssumeRoleInput{} + } + + output = &AssumeRoleOutput{} + req = c.newRequest(op, input, output) + return +} + +// AssumeRole API operation for AWS Security Token Service. +// +// Returns a set of temporary security credentials (consisting of an access +// key ID, a secret access key, and a security token) that you can use to access +// AWS resources that you might not normally have access to. Typically, you +// use AssumeRole for cross-account access or federation. For a comparison of +// AssumeRole with the other APIs that produce temporary credentials, see Requesting +// Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// Important: You cannot call AssumeRole by using AWS root account credentials; +// access is denied. You must use credentials for an IAM user or an IAM role +// to call AssumeRole. +// +// For cross-account access, imagine that you own multiple accounts and need +// to access resources in each account. You could create long-term credentials +// in each account to access those resources. However, managing all those credentials +// and remembering which one can access which account can be time consuming. +// Instead, you can create one set of long-term credentials in one account and +// then use temporary security credentials to access all the other accounts +// by assuming roles in those accounts. For more information about roles, see +// IAM Roles (Delegation and Federation) (http://docs.aws.amazon.com/IAM/latest/UserGuide/roles-toplevel.html) +// in the IAM User Guide. +// +// For federation, you can, for example, grant single sign-on access to the +// AWS Management Console. If you already have an identity and authentication +// system in your corporate network, you don't have to recreate user identities +// in AWS in order to grant those user identities access to AWS. Instead, after +// a user has been authenticated, you call AssumeRole (and specify the role +// with the appropriate permissions) to get temporary security credentials for +// that user. With those temporary security credentials, you construct a sign-in +// URL that users can use to access the console. For more information, see Common +// Scenarios for Temporary Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html#sts-introduction) +// in the IAM User Guide. +// +// By default, the temporary security credentials created by AssumeRole last +// for one hour. However, you can use the optional DurationSeconds parameter +// to specify the duration of your session. You can provide a value from 900 +// seconds (15 minutes) up to the maximum session duration setting for the role. +// This setting can have a value from 1 hour to 12 hours. To learn how to view +// the maximum value for your role, see View the Maximum Session Duration Setting +// for a Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) +// in the IAM User Guide. The maximum session duration limit applies when you +// use the AssumeRole* API operations or the assume-role* CLI operations but +// does not apply when you use those operations to create a console URL. For +// more information, see Using IAM Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) +// in the IAM User Guide. +// +// The temporary security credentials created by AssumeRole can be used to make +// API calls to any AWS service with the following exception: you cannot call +// the STS service's GetFederationToken or GetSessionToken APIs. +// +// Optionally, you can pass an IAM access policy to this operation. If you choose +// not to pass a policy, the temporary security credentials that are returned +// by the operation have the permissions that are defined in the access policy +// of the role that is being assumed. If you pass a policy to this operation, +// the temporary security credentials that are returned by the operation have +// the permissions that are allowed by both the access policy of the role that +// is being assumed, and the policy that you pass. This gives you a way to further +// restrict the permissions for the resulting temporary security credentials. +// You cannot use the passed policy to grant permissions that are in excess +// of those allowed by the access policy of the role that is being assumed. +// For more information, see Permissions for AssumeRole, AssumeRoleWithSAML, +// and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) +// in the IAM User Guide. +// +// To assume a role, your AWS account must be trusted by the role. The trust +// relationship is defined in the role's trust policy when the role is created. +// That trust policy states which accounts are allowed to delegate access to +// this account's role. +// +// The user who wants to access the role must also have permissions delegated +// from the role's administrator. If the user is in a different account than +// the role, then the user's administrator must attach a policy that allows +// the user to call AssumeRole on the ARN of the role in the other account. +// If the user is in the same account as the role, then you can either attach +// a policy to the user (identical to the previous different account user), +// or you can add the user as a principal directly in the role's trust policy. +// In this case, the trust policy acts as the only resource-based policy in +// IAM, and users in the same account as the role do not need explicit permission +// to assume the role. For more information about trust policies and resource-based +// policies, see IAM Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) +// in the IAM User Guide. +// +// Using MFA with AssumeRole +// +// You can optionally include multi-factor authentication (MFA) information +// when you call AssumeRole. This is useful for cross-account scenarios in which +// you want to make sure that the user who is assuming the role has been authenticated +// using an AWS MFA device. In that scenario, the trust policy of the role being +// assumed includes a condition that tests for MFA authentication; if the caller +// does not include valid MFA information, the request to assume the role is +// denied. The condition in a trust policy that tests for MFA authentication +// might look like the following example. +// +// "Condition": {"Bool": {"aws:MultiFactorAuthPresent": true}} +// +// For more information, see Configuring MFA-Protected API Access (http://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html) +// in the IAM User Guide guide. +// +// To use MFA with AssumeRole, you pass values for the SerialNumber and TokenCode +// parameters. The SerialNumber value identifies the user's hardware or virtual +// MFA device. The TokenCode is the time-based one-time password (TOTP) that +// the MFA devices produces. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation AssumeRole for usage and error information. +// +// Returned Error Codes: +// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" +// The request was rejected because the policy document was too large. The error +// message describes how big the policy document is, in packed form, as a percentage +// of what the API allows. +// +// * ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole +func (c *STS) AssumeRole(input *AssumeRoleInput) (*AssumeRoleOutput, error) { + req, out := c.AssumeRoleRequest(input) + return out, req.Send() +} + +// AssumeRoleWithContext is the same as AssumeRole with the addition of +// the ability to pass a context and additional request options. +// +// See AssumeRole for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) AssumeRoleWithContext(ctx aws.Context, input *AssumeRoleInput, opts ...request.Option) (*AssumeRoleOutput, error) { + req, out := c.AssumeRoleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opAssumeRoleWithSAML = "AssumeRoleWithSAML" + +// AssumeRoleWithSAMLRequest generates a "aws/request.Request" representing the +// client's request for the AssumeRoleWithSAML operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AssumeRoleWithSAML for more information on using the AssumeRoleWithSAML +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AssumeRoleWithSAMLRequest method. +// req, resp := client.AssumeRoleWithSAMLRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML +func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *request.Request, output *AssumeRoleWithSAMLOutput) { + op := &request.Operation{ + Name: opAssumeRoleWithSAML, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssumeRoleWithSAMLInput{} + } + + output = &AssumeRoleWithSAMLOutput{} + req = c.newRequest(op, input, output) + return +} + +// AssumeRoleWithSAML API operation for AWS Security Token Service. +// +// Returns a set of temporary security credentials for users who have been authenticated +// via a SAML authentication response. This operation provides a mechanism for +// tying an enterprise identity store or directory to role-based AWS access +// without user-specific credentials or configuration. For a comparison of AssumeRoleWithSAML +// with the other APIs that produce temporary credentials, see Requesting Temporary +// Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// The temporary security credentials returned by this operation consist of +// an access key ID, a secret access key, and a security token. Applications +// can use these temporary security credentials to sign calls to AWS services. +// +// By default, the temporary security credentials created by AssumeRoleWithSAML +// last for one hour. However, you can use the optional DurationSeconds parameter +// to specify the duration of your session. Your role session lasts for the +// duration that you specify, or until the time specified in the SAML authentication +// response's SessionNotOnOrAfter value, whichever is shorter. You can provide +// a DurationSeconds value from 900 seconds (15 minutes) up to the maximum session +// duration setting for the role. This setting can have a value from 1 hour +// to 12 hours. To learn how to view the maximum value for your role, see View +// the Maximum Session Duration Setting for a Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) +// in the IAM User Guide. The maximum session duration limit applies when you +// use the AssumeRole* API operations or the assume-role* CLI operations but +// does not apply when you use those operations to create a console URL. For +// more information, see Using IAM Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) +// in the IAM User Guide. +// +// The temporary security credentials created by AssumeRoleWithSAML can be used +// to make API calls to any AWS service with the following exception: you cannot +// call the STS service's GetFederationToken or GetSessionToken APIs. +// +// Optionally, you can pass an IAM access policy to this operation. If you choose +// not to pass a policy, the temporary security credentials that are returned +// by the operation have the permissions that are defined in the access policy +// of the role that is being assumed. If you pass a policy to this operation, +// the temporary security credentials that are returned by the operation have +// the permissions that are allowed by the intersection of both the access policy +// of the role that is being assumed, and the policy that you pass. This means +// that both policies must grant the permission for the action to be allowed. +// This gives you a way to further restrict the permissions for the resulting +// temporary security credentials. You cannot use the passed policy to grant +// permissions that are in excess of those allowed by the access policy of the +// role that is being assumed. For more information, see Permissions for AssumeRole, +// AssumeRoleWithSAML, and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) +// in the IAM User Guide. +// +// Before your application can call AssumeRoleWithSAML, you must configure your +// SAML identity provider (IdP) to issue the claims required by AWS. Additionally, +// you must use AWS Identity and Access Management (IAM) to create a SAML provider +// entity in your AWS account that represents your identity provider, and create +// an IAM role that specifies this SAML provider in its trust policy. +// +// Calling AssumeRoleWithSAML does not require the use of AWS security credentials. +// The identity of the caller is validated by using keys in the metadata document +// that is uploaded for the SAML provider entity for your identity provider. +// +// Calling AssumeRoleWithSAML can result in an entry in your AWS CloudTrail +// logs. The entry includes the value in the NameID element of the SAML assertion. +// We recommend that you use a NameIDType that is not associated with any personally +// identifiable information (PII). For example, you could instead use the Persistent +// Identifier (urn:oasis:names:tc:SAML:2.0:nameid-format:persistent). +// +// For more information, see the following resources: +// +// * About SAML 2.0-based Federation (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html) +// in the IAM User Guide. +// +// * Creating SAML Identity Providers (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html) +// in the IAM User Guide. +// +// * Configuring a Relying Party and Claims (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html) +// in the IAM User Guide. +// +// * Creating a Role for SAML 2.0 Federation (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation AssumeRoleWithSAML for usage and error information. +// +// Returned Error Codes: +// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" +// The request was rejected because the policy document was too large. The error +// message describes how big the policy document is, in packed form, as a percentage +// of what the API allows. +// +// * ErrCodeIDPRejectedClaimException "IDPRejectedClaim" +// The identity provider (IdP) reported that authentication failed. This might +// be because the claim is invalid. +// +// If this error is returned for the AssumeRoleWithWebIdentity operation, it +// can also mean that the claim has expired or has been explicitly revoked. +// +// * ErrCodeInvalidIdentityTokenException "InvalidIdentityToken" +// The web identity token that was passed could not be validated by AWS. Get +// a new identity token from the identity provider and then retry the request. +// +// * ErrCodeExpiredTokenException "ExpiredTokenException" +// The web identity token that was passed is expired or is not valid. Get a +// new identity token from the identity provider and then retry the request. +// +// * ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML +func (c *STS) AssumeRoleWithSAML(input *AssumeRoleWithSAMLInput) (*AssumeRoleWithSAMLOutput, error) { + req, out := c.AssumeRoleWithSAMLRequest(input) + return out, req.Send() +} + +// AssumeRoleWithSAMLWithContext is the same as AssumeRoleWithSAML with the addition of +// the ability to pass a context and additional request options. +// +// See AssumeRoleWithSAML for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) AssumeRoleWithSAMLWithContext(ctx aws.Context, input *AssumeRoleWithSAMLInput, opts ...request.Option) (*AssumeRoleWithSAMLOutput, error) { + req, out := c.AssumeRoleWithSAMLRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity" + +// AssumeRoleWithWebIdentityRequest generates a "aws/request.Request" representing the +// client's request for the AssumeRoleWithWebIdentity operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AssumeRoleWithWebIdentity for more information on using the AssumeRoleWithWebIdentity +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AssumeRoleWithWebIdentityRequest method. +// req, resp := client.AssumeRoleWithWebIdentityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity +func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityInput) (req *request.Request, output *AssumeRoleWithWebIdentityOutput) { + op := &request.Operation{ + Name: opAssumeRoleWithWebIdentity, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssumeRoleWithWebIdentityInput{} + } + + output = &AssumeRoleWithWebIdentityOutput{} + req = c.newRequest(op, input, output) + return +} + +// AssumeRoleWithWebIdentity API operation for AWS Security Token Service. +// +// Returns a set of temporary security credentials for users who have been authenticated +// in a mobile or web application with a web identity provider, such as Amazon +// Cognito, Login with Amazon, Facebook, Google, or any OpenID Connect-compatible +// identity provider. +// +// For mobile applications, we recommend that you use Amazon Cognito. You can +// use Amazon Cognito with the AWS SDK for iOS (http://aws.amazon.com/sdkforios/) +// and the AWS SDK for Android (http://aws.amazon.com/sdkforandroid/) to uniquely +// identify a user and supply the user with a consistent identity throughout +// the lifetime of an application. +// +// To learn more about Amazon Cognito, see Amazon Cognito Overview (http://docs.aws.amazon.com/mobile/sdkforandroid/developerguide/cognito-auth.html#d0e840) +// in the AWS SDK for Android Developer Guide guide and Amazon Cognito Overview +// (http://docs.aws.amazon.com/mobile/sdkforios/developerguide/cognito-auth.html#d0e664) +// in the AWS SDK for iOS Developer Guide. +// +// Calling AssumeRoleWithWebIdentity does not require the use of AWS security +// credentials. Therefore, you can distribute an application (for example, on +// mobile devices) that requests temporary security credentials without including +// long-term AWS credentials in the application, and without deploying server-based +// proxy services that use long-term AWS credentials. Instead, the identity +// of the caller is validated by using a token from the web identity provider. +// For a comparison of AssumeRoleWithWebIdentity with the other APIs that produce +// temporary credentials, see Requesting Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// The temporary security credentials returned by this API consist of an access +// key ID, a secret access key, and a security token. Applications can use these +// temporary security credentials to sign calls to AWS service APIs. +// +// By default, the temporary security credentials created by AssumeRoleWithWebIdentity +// last for one hour. However, you can use the optional DurationSeconds parameter +// to specify the duration of your session. You can provide a value from 900 +// seconds (15 minutes) up to the maximum session duration setting for the role. +// This setting can have a value from 1 hour to 12 hours. To learn how to view +// the maximum value for your role, see View the Maximum Session Duration Setting +// for a Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) +// in the IAM User Guide. The maximum session duration limit applies when you +// use the AssumeRole* API operations or the assume-role* CLI operations but +// does not apply when you use those operations to create a console URL. For +// more information, see Using IAM Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) +// in the IAM User Guide. +// +// The temporary security credentials created by AssumeRoleWithWebIdentity can +// be used to make API calls to any AWS service with the following exception: +// you cannot call the STS service's GetFederationToken or GetSessionToken APIs. +// +// Optionally, you can pass an IAM access policy to this operation. If you choose +// not to pass a policy, the temporary security credentials that are returned +// by the operation have the permissions that are defined in the access policy +// of the role that is being assumed. If you pass a policy to this operation, +// the temporary security credentials that are returned by the operation have +// the permissions that are allowed by both the access policy of the role that +// is being assumed, and the policy that you pass. This gives you a way to further +// restrict the permissions for the resulting temporary security credentials. +// You cannot use the passed policy to grant permissions that are in excess +// of those allowed by the access policy of the role that is being assumed. +// For more information, see Permissions for AssumeRole, AssumeRoleWithSAML, +// and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) +// in the IAM User Guide. +// +// Before your application can call AssumeRoleWithWebIdentity, you must have +// an identity token from a supported identity provider and create a role that +// the application can assume. The role that your application assumes must trust +// the identity provider that is associated with the identity token. In other +// words, the identity provider must be specified in the role's trust policy. +// +// Calling AssumeRoleWithWebIdentity can result in an entry in your AWS CloudTrail +// logs. The entry includes the Subject (http://openid.net/specs/openid-connect-core-1_0.html#Claims) +// of the provided Web Identity Token. We recommend that you avoid using any +// personally identifiable information (PII) in this field. For example, you +// could instead use a GUID or a pairwise identifier, as suggested in the OIDC +// specification (http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes). +// +// For more information about how to use web identity federation and the AssumeRoleWithWebIdentity +// API, see the following resources: +// +// * Using Web Identity Federation APIs for Mobile Apps (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html) +// and Federation Through a Web-based Identity Provider (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). +// +// +// * Web Identity Federation Playground (https://web-identity-federation-playground.s3.amazonaws.com/index.html). +// This interactive website lets you walk through the process of authenticating +// via Login with Amazon, Facebook, or Google, getting temporary security +// credentials, and then using those credentials to make a request to AWS. +// +// +// * AWS SDK for iOS (http://aws.amazon.com/sdkforios/) and AWS SDK for Android +// (http://aws.amazon.com/sdkforandroid/). These toolkits contain sample +// apps that show how to invoke the identity providers, and then how to use +// the information from these providers to get and use temporary security +// credentials. +// +// * Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications). +// This article discusses web identity federation and shows an example of +// how to use web identity federation to get access to content in Amazon +// S3. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation AssumeRoleWithWebIdentity for usage and error information. +// +// Returned Error Codes: +// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" +// The request was rejected because the policy document was too large. The error +// message describes how big the policy document is, in packed form, as a percentage +// of what the API allows. +// +// * ErrCodeIDPRejectedClaimException "IDPRejectedClaim" +// The identity provider (IdP) reported that authentication failed. This might +// be because the claim is invalid. +// +// If this error is returned for the AssumeRoleWithWebIdentity operation, it +// can also mean that the claim has expired or has been explicitly revoked. +// +// * ErrCodeIDPCommunicationErrorException "IDPCommunicationError" +// The request could not be fulfilled because the non-AWS identity provider +// (IDP) that was asked to verify the incoming identity token could not be reached. +// This is often a transient error caused by network conditions. Retry the request +// a limited number of times so that you don't exceed the request rate. If the +// error persists, the non-AWS identity provider might be down or not responding. +// +// * ErrCodeInvalidIdentityTokenException "InvalidIdentityToken" +// The web identity token that was passed could not be validated by AWS. Get +// a new identity token from the identity provider and then retry the request. +// +// * ErrCodeExpiredTokenException "ExpiredTokenException" +// The web identity token that was passed is expired or is not valid. Get a +// new identity token from the identity provider and then retry the request. +// +// * ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity +func (c *STS) AssumeRoleWithWebIdentity(input *AssumeRoleWithWebIdentityInput) (*AssumeRoleWithWebIdentityOutput, error) { + req, out := c.AssumeRoleWithWebIdentityRequest(input) + return out, req.Send() +} + +// AssumeRoleWithWebIdentityWithContext is the same as AssumeRoleWithWebIdentity with the addition of +// the ability to pass a context and additional request options. +// +// See AssumeRoleWithWebIdentity for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) AssumeRoleWithWebIdentityWithContext(ctx aws.Context, input *AssumeRoleWithWebIdentityInput, opts ...request.Option) (*AssumeRoleWithWebIdentityOutput, error) { + req, out := c.AssumeRoleWithWebIdentityRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDecodeAuthorizationMessage = "DecodeAuthorizationMessage" + +// DecodeAuthorizationMessageRequest generates a "aws/request.Request" representing the +// client's request for the DecodeAuthorizationMessage operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DecodeAuthorizationMessage for more information on using the DecodeAuthorizationMessage +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DecodeAuthorizationMessageRequest method. +// req, resp := client.DecodeAuthorizationMessageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage +func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessageInput) (req *request.Request, output *DecodeAuthorizationMessageOutput) { + op := &request.Operation{ + Name: opDecodeAuthorizationMessage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DecodeAuthorizationMessageInput{} + } + + output = &DecodeAuthorizationMessageOutput{} + req = c.newRequest(op, input, output) + return +} + +// DecodeAuthorizationMessage API operation for AWS Security Token Service. +// +// Decodes additional information about the authorization status of a request +// from an encoded message returned in response to an AWS request. +// +// For example, if a user is not authorized to perform an action that he or +// she has requested, the request returns a Client.UnauthorizedOperation response +// (an HTTP 403 response). Some AWS actions additionally return an encoded message +// that can provide details about this authorization failure. +// +// Only certain AWS actions return an encoded authorization message. The documentation +// for an individual action indicates whether that action returns an encoded +// message in addition to returning an HTTP code. +// +// The message is encoded because the details of the authorization status can +// constitute privileged information that the user who requested the action +// should not see. To decode an authorization status message, a user must be +// granted permissions via an IAM policy to request the DecodeAuthorizationMessage +// (sts:DecodeAuthorizationMessage) action. +// +// The decoded message includes the following type of information: +// +// * Whether the request was denied due to an explicit deny or due to the +// absence of an explicit allow. For more information, see Determining Whether +// a Request is Allowed or Denied (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow) +// in the IAM User Guide. +// +// * The principal who made the request. +// +// * The requested action. +// +// * The requested resource. +// +// * The values of condition keys in the context of the user's request. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation DecodeAuthorizationMessage for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidAuthorizationMessageException "InvalidAuthorizationMessageException" +// The error returned if the message passed to DecodeAuthorizationMessage was +// invalid. This can happen if the token contains invalid characters, such as +// linebreaks. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage +func (c *STS) DecodeAuthorizationMessage(input *DecodeAuthorizationMessageInput) (*DecodeAuthorizationMessageOutput, error) { + req, out := c.DecodeAuthorizationMessageRequest(input) + return out, req.Send() +} + +// DecodeAuthorizationMessageWithContext is the same as DecodeAuthorizationMessage with the addition of +// the ability to pass a context and additional request options. +// +// See DecodeAuthorizationMessage for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) DecodeAuthorizationMessageWithContext(ctx aws.Context, input *DecodeAuthorizationMessageInput, opts ...request.Option) (*DecodeAuthorizationMessageOutput, error) { + req, out := c.DecodeAuthorizationMessageRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetCallerIdentity = "GetCallerIdentity" + +// GetCallerIdentityRequest generates a "aws/request.Request" representing the +// client's request for the GetCallerIdentity operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetCallerIdentity for more information on using the GetCallerIdentity +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetCallerIdentityRequest method. +// req, resp := client.GetCallerIdentityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity +func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *request.Request, output *GetCallerIdentityOutput) { + op := &request.Operation{ + Name: opGetCallerIdentity, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetCallerIdentityInput{} + } + + output = &GetCallerIdentityOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetCallerIdentity API operation for AWS Security Token Service. +// +// Returns details about the IAM identity whose credentials are used to call +// the API. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation GetCallerIdentity for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity +func (c *STS) GetCallerIdentity(input *GetCallerIdentityInput) (*GetCallerIdentityOutput, error) { + req, out := c.GetCallerIdentityRequest(input) + return out, req.Send() +} + +// GetCallerIdentityWithContext is the same as GetCallerIdentity with the addition of +// the ability to pass a context and additional request options. +// +// See GetCallerIdentity for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) GetCallerIdentityWithContext(ctx aws.Context, input *GetCallerIdentityInput, opts ...request.Option) (*GetCallerIdentityOutput, error) { + req, out := c.GetCallerIdentityRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetFederationToken = "GetFederationToken" + +// GetFederationTokenRequest generates a "aws/request.Request" representing the +// client's request for the GetFederationToken operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetFederationToken for more information on using the GetFederationToken +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetFederationTokenRequest method. +// req, resp := client.GetFederationTokenRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken +func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *request.Request, output *GetFederationTokenOutput) { + op := &request.Operation{ + Name: opGetFederationToken, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetFederationTokenInput{} + } + + output = &GetFederationTokenOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetFederationToken API operation for AWS Security Token Service. +// +// Returns a set of temporary security credentials (consisting of an access +// key ID, a secret access key, and a security token) for a federated user. +// A typical use is in a proxy application that gets temporary security credentials +// on behalf of distributed applications inside a corporate network. Because +// you must call the GetFederationToken action using the long-term security +// credentials of an IAM user, this call is appropriate in contexts where those +// credentials can be safely stored, usually in a server-based application. +// For a comparison of GetFederationToken with the other APIs that produce temporary +// credentials, see Requesting Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// If you are creating a mobile-based or browser-based app that can authenticate +// users using a web identity provider like Login with Amazon, Facebook, Google, +// or an OpenID Connect-compatible identity provider, we recommend that you +// use Amazon Cognito (http://aws.amazon.com/cognito/) or AssumeRoleWithWebIdentity. +// For more information, see Federation Through a Web-based Identity Provider +// (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). +// +// The GetFederationToken action must be called by using the long-term AWS security +// credentials of an IAM user. You can also call GetFederationToken using the +// security credentials of an AWS root account, but we do not recommended it. +// Instead, we recommend that you create an IAM user for the purpose of the +// proxy application and then attach a policy to the IAM user that limits federated +// users to only the actions and resources that they need access to. For more +// information, see IAM Best Practices (http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html) +// in the IAM User Guide. +// +// The temporary security credentials that are obtained by using the long-term +// credentials of an IAM user are valid for the specified duration, from 900 +// seconds (15 minutes) up to a maximium of 129600 seconds (36 hours). The default +// is 43200 seconds (12 hours). Temporary credentials that are obtained by using +// AWS root account credentials have a maximum duration of 3600 seconds (1 hour). +// +// The temporary security credentials created by GetFederationToken can be used +// to make API calls to any AWS service with the following exceptions: +// +// * You cannot use these credentials to call any IAM APIs. +// +// * You cannot call any STS APIs except GetCallerIdentity. +// +// Permissions +// +// The permissions for the temporary security credentials returned by GetFederationToken +// are determined by a combination of the following: +// +// * The policy or policies that are attached to the IAM user whose credentials +// are used to call GetFederationToken. +// +// * The policy that is passed as a parameter in the call. +// +// The passed policy is attached to the temporary security credentials that +// result from the GetFederationToken API call--that is, to the federated user. +// When the federated user makes an AWS request, AWS evaluates the policy attached +// to the federated user in combination with the policy or policies attached +// to the IAM user whose credentials were used to call GetFederationToken. AWS +// allows the federated user's request only when both the federated user and +// the IAM user are explicitly allowed to perform the requested action. The +// passed policy cannot grant more permissions than those that are defined in +// the IAM user policy. +// +// A typical use case is that the permissions of the IAM user whose credentials +// are used to call GetFederationToken are designed to allow access to all the +// actions and resources that any federated user will need. Then, for individual +// users, you pass a policy to the operation that scopes down the permissions +// to a level that's appropriate to that individual user, using a policy that +// allows only a subset of permissions that are granted to the IAM user. +// +// If you do not pass a policy, the resulting temporary security credentials +// have no effective permissions. The only exception is when the temporary security +// credentials are used to access a resource that has a resource-based policy +// that specifically allows the federated user to access the resource. +// +// For more information about how permissions work, see Permissions for GetFederationToken +// (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getfederationtoken.html). +// For information about using GetFederationToken to create temporary security +// credentials, see GetFederationToken—Federation Through a Custom Identity +// Broker (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation GetFederationToken for usage and error information. +// +// Returned Error Codes: +// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" +// The request was rejected because the policy document was too large. The error +// message describes how big the policy document is, in packed form, as a percentage +// of what the API allows. +// +// * ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken +func (c *STS) GetFederationToken(input *GetFederationTokenInput) (*GetFederationTokenOutput, error) { + req, out := c.GetFederationTokenRequest(input) + return out, req.Send() +} + +// GetFederationTokenWithContext is the same as GetFederationToken with the addition of +// the ability to pass a context and additional request options. +// +// See GetFederationToken for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) GetFederationTokenWithContext(ctx aws.Context, input *GetFederationTokenInput, opts ...request.Option) (*GetFederationTokenOutput, error) { + req, out := c.GetFederationTokenRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetSessionToken = "GetSessionToken" + +// GetSessionTokenRequest generates a "aws/request.Request" representing the +// client's request for the GetSessionToken operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetSessionToken for more information on using the GetSessionToken +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetSessionTokenRequest method. +// req, resp := client.GetSessionTokenRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken +func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.Request, output *GetSessionTokenOutput) { + op := &request.Operation{ + Name: opGetSessionToken, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetSessionTokenInput{} + } + + output = &GetSessionTokenOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetSessionToken API operation for AWS Security Token Service. +// +// Returns a set of temporary credentials for an AWS account or IAM user. The +// credentials consist of an access key ID, a secret access key, and a security +// token. Typically, you use GetSessionToken if you want to use MFA to protect +// programmatic calls to specific AWS APIs like Amazon EC2 StopInstances. MFA-enabled +// IAM users would need to call GetSessionToken and submit an MFA code that +// is associated with their MFA device. Using the temporary security credentials +// that are returned from the call, IAM users can then make programmatic calls +// to APIs that require MFA authentication. If you do not supply a correct MFA +// code, then the API returns an access denied error. For a comparison of GetSessionToken +// with the other APIs that produce temporary credentials, see Requesting Temporary +// Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// The GetSessionToken action must be called by using the long-term AWS security +// credentials of the AWS account or an IAM user. Credentials that are created +// by IAM users are valid for the duration that you specify, from 900 seconds +// (15 minutes) up to a maximum of 129600 seconds (36 hours), with a default +// of 43200 seconds (12 hours); credentials that are created by using account +// credentials can range from 900 seconds (15 minutes) up to a maximum of 3600 +// seconds (1 hour), with a default of 1 hour. +// +// The temporary security credentials created by GetSessionToken can be used +// to make API calls to any AWS service with the following exceptions: +// +// * You cannot call any IAM APIs unless MFA authentication information is +// included in the request. +// +// * You cannot call any STS API exceptAssumeRole or GetCallerIdentity. +// +// We recommend that you do not call GetSessionToken with root account credentials. +// Instead, follow our best practices (http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users) +// by creating one or more IAM users, giving them the necessary permissions, +// and using IAM users for everyday interaction with AWS. +// +// The permissions associated with the temporary security credentials returned +// by GetSessionToken are based on the permissions associated with account or +// IAM user whose credentials are used to call the action. If GetSessionToken +// is called using root account credentials, the temporary credentials have +// root account permissions. Similarly, if GetSessionToken is called using the +// credentials of an IAM user, the temporary credentials have the same permissions +// as the IAM user. +// +// For more information about using GetSessionToken to create temporary credentials, +// go to Temporary Credentials for Users in Untrusted Environments (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation GetSessionToken for usage and error information. +// +// Returned Error Codes: +// * ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken +func (c *STS) GetSessionToken(input *GetSessionTokenInput) (*GetSessionTokenOutput, error) { + req, out := c.GetSessionTokenRequest(input) + return out, req.Send() +} + +// GetSessionTokenWithContext is the same as GetSessionToken with the addition of +// the ability to pass a context and additional request options. +// +// See GetSessionToken for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) GetSessionTokenWithContext(ctx aws.Context, input *GetSessionTokenInput, opts ...request.Option) (*GetSessionTokenOutput, error) { + req, out := c.GetSessionTokenRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +type AssumeRoleInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, of the role session. The value can range from 900 + // seconds (15 minutes) up to the maximum session duration setting for the role. + // This setting can have a value from 1 hour to 12 hours. If you specify a value + // higher than this setting, the operation fails. For example, if you specify + // a session duration of 12 hours, but your administrator set the maximum session + // duration to 6 hours, your operation fails. To learn how to view the maximum + // value for your role, see View the Maximum Session Duration Setting for a + // Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // in the IAM User Guide. + // + // By default, the value is set to 3600 seconds. + // + // The DurationSeconds parameter is separate from the duration of a console + // session that you might request using the returned credentials. The request + // to the federation endpoint for a console sign-in token takes a SessionDuration + // parameter that specifies the maximum length of the console session. For more + // information, see Creating a URL that Enables Federated Users to Access the + // AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // in the IAM User Guide. + DurationSeconds *int64 `min:"900" type:"integer"` + + // A unique identifier that is used by third parties when assuming roles in + // their customers' accounts. For each role that the third party can assume, + // they should instruct their customers to ensure the role's trust policy checks + // for the external ID that the third party generated. Each time the third party + // assumes the role, they should pass the customer's external ID. The external + // ID is useful in order to help third parties bind a role to the customer who + // created it. For more information about the external ID, see How to Use an + // External ID When Granting Access to Your AWS Resources to a Third Party (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html) + // in the IAM User Guide. + // + // The regex used to validated this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@:/- + ExternalId *string `min:"2" type:"string"` + + // An IAM policy in JSON format. + // + // This parameter is optional. If you pass a policy, the temporary security + // credentials that are returned by the operation have the permissions that + // are allowed by both (the intersection of) the access policy of the role that + // is being assumed, and the policy that you pass. This gives you a way to further + // restrict the permissions for the resulting temporary security credentials. + // You cannot use the passed policy to grant permissions that are in excess + // of those allowed by the access policy of the role that is being assumed. + // For more information, see Permissions for AssumeRole, AssumeRoleWithSAML, + // and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) + // in the IAM User Guide. + // + // The format for this parameter, as described by its regex pattern, is a string + // of characters up to 2048 characters in length. The characters can be any + // ASCII character from the space character to the end of the valid character + // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // and carriage return (\u000D) characters. + // + // The policy plain text must be 2048 bytes or shorter. However, an internal + // conversion compresses it into a packed binary format with a separate limit. + // The PackedPolicySize response element indicates by percentage how close to + // the upper size limit the policy is, with 100% equaling the maximum allowed + // size. + Policy *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of the role to assume. + // + // RoleArn is a required field + RoleArn *string `min:"20" type:"string" required:"true"` + + // An identifier for the assumed role session. + // + // Use the role session name to uniquely identify a session when the same role + // is assumed by different principals or for different reasons. In cross-account + // scenarios, the role session name is visible to, and can be logged by the + // account that owns the role. The role session name is also used in the ARN + // of the assumed role principal. This means that subsequent cross-account API + // requests using the temporary security credentials will expose the role session + // name to the external account in their CloudTrail logs. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + // + // RoleSessionName is a required field + RoleSessionName *string `min:"2" type:"string" required:"true"` + + // The identification number of the MFA device that is associated with the user + // who is making the AssumeRole call. Specify this value if the trust policy + // of the role being assumed includes a condition that requires MFA authentication. + // The value is either the serial number for a hardware device (such as GAHT12345678) + // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + SerialNumber *string `min:"9" type:"string"` + + // The value provided by the MFA device, if the trust policy of the role being + // assumed requires MFA (that is, if the policy includes a condition that tests + // for MFA). If the role being assumed requires MFA and if the TokenCode value + // is missing or expired, the AssumeRole call returns an "access denied" error. + // + // The format for this parameter, as described by its regex pattern, is a sequence + // of six numeric digits. + TokenCode *string `min:"6" type:"string"` +} + +// String returns the string representation +func (s AssumeRoleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssumeRoleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssumeRoleInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.ExternalId != nil && len(*s.ExternalId) < 2 { + invalidParams.Add(request.NewErrParamMinLen("ExternalId", 2)) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.RoleSessionName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleSessionName")) + } + if s.RoleSessionName != nil && len(*s.RoleSessionName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RoleSessionName", 2)) + } + if s.SerialNumber != nil && len(*s.SerialNumber) < 9 { + invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9)) + } + if s.TokenCode != nil && len(*s.TokenCode) < 6 { + invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *AssumeRoleInput) SetDurationSeconds(v int64) *AssumeRoleInput { + s.DurationSeconds = &v + return s +} + +// SetExternalId sets the ExternalId field's value. +func (s *AssumeRoleInput) SetExternalId(v string) *AssumeRoleInput { + s.ExternalId = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *AssumeRoleInput) SetPolicy(v string) *AssumeRoleInput { + s.Policy = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *AssumeRoleInput) SetRoleArn(v string) *AssumeRoleInput { + s.RoleArn = &v + return s +} + +// SetRoleSessionName sets the RoleSessionName field's value. +func (s *AssumeRoleInput) SetRoleSessionName(v string) *AssumeRoleInput { + s.RoleSessionName = &v + return s +} + +// SetSerialNumber sets the SerialNumber field's value. +func (s *AssumeRoleInput) SetSerialNumber(v string) *AssumeRoleInput { + s.SerialNumber = &v + return s +} + +// SetTokenCode sets the TokenCode field's value. +func (s *AssumeRoleInput) SetTokenCode(v string) *AssumeRoleInput { + s.TokenCode = &v + return s +} + +// Contains the response to a successful AssumeRole request, including temporary +// AWS credentials that can be used to make AWS requests. +type AssumeRoleOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers + // that you can use to refer to the resulting temporary security credentials. + // For example, you can reference these credentials as a principal in a resource-based + // policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName + // that you specified when you called AssumeRole. + AssumedRoleUser *AssumedRoleUser `type:"structure"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // Note: The size of the security token that STS APIs return is not fixed. We + // strongly recommend that you make no assumptions about the maximum size. As + // of this writing, the typical size is less than 4096 bytes, but that can vary. + // Also, future updates to AWS might require larger sizes. + Credentials *Credentials `type:"structure"` + + // A percentage value that indicates the size of the policy in packed form. + // The service rejects any policy with a packed size greater than 100 percent, + // which means the policy exceeded the allowed space. + PackedPolicySize *int64 `type:"integer"` +} + +// String returns the string representation +func (s AssumeRoleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleOutput) GoString() string { + return s.String() +} + +// SetAssumedRoleUser sets the AssumedRoleUser field's value. +func (s *AssumeRoleOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleOutput { + s.AssumedRoleUser = v + return s +} + +// SetCredentials sets the Credentials field's value. +func (s *AssumeRoleOutput) SetCredentials(v *Credentials) *AssumeRoleOutput { + s.Credentials = v + return s +} + +// SetPackedPolicySize sets the PackedPolicySize field's value. +func (s *AssumeRoleOutput) SetPackedPolicySize(v int64) *AssumeRoleOutput { + s.PackedPolicySize = &v + return s +} + +type AssumeRoleWithSAMLInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, of the role session. Your role session lasts for + // the duration that you specify for the DurationSeconds parameter, or until + // the time specified in the SAML authentication response's SessionNotOnOrAfter + // value, whichever is shorter. You can provide a DurationSeconds value from + // 900 seconds (15 minutes) up to the maximum session duration setting for the + // role. This setting can have a value from 1 hour to 12 hours. If you specify + // a value higher than this setting, the operation fails. For example, if you + // specify a session duration of 12 hours, but your administrator set the maximum + // session duration to 6 hours, your operation fails. To learn how to view the + // maximum value for your role, see View the Maximum Session Duration Setting + // for a Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // in the IAM User Guide. + // + // By default, the value is set to 3600 seconds. + // + // The DurationSeconds parameter is separate from the duration of a console + // session that you might request using the returned credentials. The request + // to the federation endpoint for a console sign-in token takes a SessionDuration + // parameter that specifies the maximum length of the console session. For more + // information, see Creating a URL that Enables Federated Users to Access the + // AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // in the IAM User Guide. + DurationSeconds *int64 `min:"900" type:"integer"` + + // An IAM policy in JSON format. + // + // The policy parameter is optional. If you pass a policy, the temporary security + // credentials that are returned by the operation have the permissions that + // are allowed by both the access policy of the role that is being assumed, + // and the policy that you pass. This gives you a way to further restrict the + // permissions for the resulting temporary security credentials. You cannot + // use the passed policy to grant permissions that are in excess of those allowed + // by the access policy of the role that is being assumed. For more information, + // Permissions for AssumeRole, AssumeRoleWithSAML, and AssumeRoleWithWebIdentity + // (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) + // in the IAM User Guide. + // + // The format for this parameter, as described by its regex pattern, is a string + // of characters up to 2048 characters in length. The characters can be any + // ASCII character from the space character to the end of the valid character + // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // and carriage return (\u000D) characters. + // + // The policy plain text must be 2048 bytes or shorter. However, an internal + // conversion compresses it into a packed binary format with a separate limit. + // The PackedPolicySize response element indicates by percentage how close to + // the upper size limit the policy is, with 100% equaling the maximum allowed + // size. + Policy *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of the SAML provider in IAM that describes + // the IdP. + // + // PrincipalArn is a required field + PrincipalArn *string `min:"20" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the role that the caller is assuming. + // + // RoleArn is a required field + RoleArn *string `min:"20" type:"string" required:"true"` + + // The base-64 encoded SAML authentication response provided by the IdP. + // + // For more information, see Configuring a Relying Party and Adding Claims (http://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html) + // in the Using IAM guide. + // + // SAMLAssertion is a required field + SAMLAssertion *string `min:"4" type:"string" required:"true"` +} + +// String returns the string representation +func (s AssumeRoleWithSAMLInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleWithSAMLInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssumeRoleWithSAMLInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssumeRoleWithSAMLInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + if s.PrincipalArn == nil { + invalidParams.Add(request.NewErrParamRequired("PrincipalArn")) + } + if s.PrincipalArn != nil && len(*s.PrincipalArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("PrincipalArn", 20)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.SAMLAssertion == nil { + invalidParams.Add(request.NewErrParamRequired("SAMLAssertion")) + } + if s.SAMLAssertion != nil && len(*s.SAMLAssertion) < 4 { + invalidParams.Add(request.NewErrParamMinLen("SAMLAssertion", 4)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *AssumeRoleWithSAMLInput) SetDurationSeconds(v int64) *AssumeRoleWithSAMLInput { + s.DurationSeconds = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *AssumeRoleWithSAMLInput) SetPolicy(v string) *AssumeRoleWithSAMLInput { + s.Policy = &v + return s +} + +// SetPrincipalArn sets the PrincipalArn field's value. +func (s *AssumeRoleWithSAMLInput) SetPrincipalArn(v string) *AssumeRoleWithSAMLInput { + s.PrincipalArn = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *AssumeRoleWithSAMLInput) SetRoleArn(v string) *AssumeRoleWithSAMLInput { + s.RoleArn = &v + return s +} + +// SetSAMLAssertion sets the SAMLAssertion field's value. +func (s *AssumeRoleWithSAMLInput) SetSAMLAssertion(v string) *AssumeRoleWithSAMLInput { + s.SAMLAssertion = &v + return s +} + +// Contains the response to a successful AssumeRoleWithSAML request, including +// temporary AWS credentials that can be used to make AWS requests. +type AssumeRoleWithSAMLOutput struct { + _ struct{} `type:"structure"` + + // The identifiers for the temporary security credentials that the operation + // returns. + AssumedRoleUser *AssumedRoleUser `type:"structure"` + + // The value of the Recipient attribute of the SubjectConfirmationData element + // of the SAML assertion. + Audience *string `type:"string"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // Note: The size of the security token that STS APIs return is not fixed. We + // strongly recommend that you make no assumptions about the maximum size. As + // of this writing, the typical size is less than 4096 bytes, but that can vary. + // Also, future updates to AWS might require larger sizes. + Credentials *Credentials `type:"structure"` + + // The value of the Issuer element of the SAML assertion. + Issuer *string `type:"string"` + + // A hash value based on the concatenation of the Issuer response value, the + // AWS account ID, and the friendly name (the last part of the ARN) of the SAML + // provider in IAM. The combination of NameQualifier and Subject can be used + // to uniquely identify a federated user. + // + // The following pseudocode shows how the hash value is calculated: + // + // BASE64 ( SHA1 ( "https://example.com/saml" + "123456789012" + "/MySAMLIdP" + // ) ) + NameQualifier *string `type:"string"` + + // A percentage value that indicates the size of the policy in packed form. + // The service rejects any policy with a packed size greater than 100 percent, + // which means the policy exceeded the allowed space. + PackedPolicySize *int64 `type:"integer"` + + // The value of the NameID element in the Subject element of the SAML assertion. + Subject *string `type:"string"` + + // The format of the name ID, as defined by the Format attribute in the NameID + // element of the SAML assertion. Typical examples of the format are transient + // or persistent. + // + // If the format includes the prefix urn:oasis:names:tc:SAML:2.0:nameid-format, + // that prefix is removed. For example, urn:oasis:names:tc:SAML:2.0:nameid-format:transient + // is returned as transient. If the format includes any other prefix, the format + // is returned with no modifications. + SubjectType *string `type:"string"` +} + +// String returns the string representation +func (s AssumeRoleWithSAMLOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleWithSAMLOutput) GoString() string { + return s.String() +} + +// SetAssumedRoleUser sets the AssumedRoleUser field's value. +func (s *AssumeRoleWithSAMLOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleWithSAMLOutput { + s.AssumedRoleUser = v + return s +} + +// SetAudience sets the Audience field's value. +func (s *AssumeRoleWithSAMLOutput) SetAudience(v string) *AssumeRoleWithSAMLOutput { + s.Audience = &v + return s +} + +// SetCredentials sets the Credentials field's value. +func (s *AssumeRoleWithSAMLOutput) SetCredentials(v *Credentials) *AssumeRoleWithSAMLOutput { + s.Credentials = v + return s +} + +// SetIssuer sets the Issuer field's value. +func (s *AssumeRoleWithSAMLOutput) SetIssuer(v string) *AssumeRoleWithSAMLOutput { + s.Issuer = &v + return s +} + +// SetNameQualifier sets the NameQualifier field's value. +func (s *AssumeRoleWithSAMLOutput) SetNameQualifier(v string) *AssumeRoleWithSAMLOutput { + s.NameQualifier = &v + return s +} + +// SetPackedPolicySize sets the PackedPolicySize field's value. +func (s *AssumeRoleWithSAMLOutput) SetPackedPolicySize(v int64) *AssumeRoleWithSAMLOutput { + s.PackedPolicySize = &v + return s +} + +// SetSubject sets the Subject field's value. +func (s *AssumeRoleWithSAMLOutput) SetSubject(v string) *AssumeRoleWithSAMLOutput { + s.Subject = &v + return s +} + +// SetSubjectType sets the SubjectType field's value. +func (s *AssumeRoleWithSAMLOutput) SetSubjectType(v string) *AssumeRoleWithSAMLOutput { + s.SubjectType = &v + return s +} + +type AssumeRoleWithWebIdentityInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, of the role session. The value can range from 900 + // seconds (15 minutes) up to the maximum session duration setting for the role. + // This setting can have a value from 1 hour to 12 hours. If you specify a value + // higher than this setting, the operation fails. For example, if you specify + // a session duration of 12 hours, but your administrator set the maximum session + // duration to 6 hours, your operation fails. To learn how to view the maximum + // value for your role, see View the Maximum Session Duration Setting for a + // Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // in the IAM User Guide. + // + // By default, the value is set to 3600 seconds. + // + // The DurationSeconds parameter is separate from the duration of a console + // session that you might request using the returned credentials. The request + // to the federation endpoint for a console sign-in token takes a SessionDuration + // parameter that specifies the maximum length of the console session. For more + // information, see Creating a URL that Enables Federated Users to Access the + // AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // in the IAM User Guide. + DurationSeconds *int64 `min:"900" type:"integer"` + + // An IAM policy in JSON format. + // + // The policy parameter is optional. If you pass a policy, the temporary security + // credentials that are returned by the operation have the permissions that + // are allowed by both the access policy of the role that is being assumed, + // and the policy that you pass. This gives you a way to further restrict the + // permissions for the resulting temporary security credentials. You cannot + // use the passed policy to grant permissions that are in excess of those allowed + // by the access policy of the role that is being assumed. For more information, + // see Permissions for AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) + // in the IAM User Guide. + // + // The format for this parameter, as described by its regex pattern, is a string + // of characters up to 2048 characters in length. The characters can be any + // ASCII character from the space character to the end of the valid character + // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // and carriage return (\u000D) characters. + // + // The policy plain text must be 2048 bytes or shorter. However, an internal + // conversion compresses it into a packed binary format with a separate limit. + // The PackedPolicySize response element indicates by percentage how close to + // the upper size limit the policy is, with 100% equaling the maximum allowed + // size. + Policy *string `min:"1" type:"string"` + + // The fully qualified host component of the domain name of the identity provider. + // + // Specify this value only for OAuth 2.0 access tokens. Currently www.amazon.com + // and graph.facebook.com are the only supported identity providers for OAuth + // 2.0 access tokens. Do not include URL schemes and port numbers. + // + // Do not specify this value for OpenID Connect ID tokens. + ProviderId *string `min:"4" type:"string"` + + // The Amazon Resource Name (ARN) of the role that the caller is assuming. + // + // RoleArn is a required field + RoleArn *string `min:"20" type:"string" required:"true"` + + // An identifier for the assumed role session. Typically, you pass the name + // or identifier that is associated with the user who is using your application. + // That way, the temporary security credentials that your application will use + // are associated with that user. This session name is included as part of the + // ARN and assumed role ID in the AssumedRoleUser response element. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + // + // RoleSessionName is a required field + RoleSessionName *string `min:"2" type:"string" required:"true"` + + // The OAuth 2.0 access token or OpenID Connect ID token that is provided by + // the identity provider. Your application must get this token by authenticating + // the user who is using your application with a web identity provider before + // the application makes an AssumeRoleWithWebIdentity call. + // + // WebIdentityToken is a required field + WebIdentityToken *string `min:"4" type:"string" required:"true"` +} + +// String returns the string representation +func (s AssumeRoleWithWebIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleWithWebIdentityInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssumeRoleWithWebIdentityInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssumeRoleWithWebIdentityInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + if s.ProviderId != nil && len(*s.ProviderId) < 4 { + invalidParams.Add(request.NewErrParamMinLen("ProviderId", 4)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.RoleSessionName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleSessionName")) + } + if s.RoleSessionName != nil && len(*s.RoleSessionName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RoleSessionName", 2)) + } + if s.WebIdentityToken == nil { + invalidParams.Add(request.NewErrParamRequired("WebIdentityToken")) + } + if s.WebIdentityToken != nil && len(*s.WebIdentityToken) < 4 { + invalidParams.Add(request.NewErrParamMinLen("WebIdentityToken", 4)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *AssumeRoleWithWebIdentityInput) SetDurationSeconds(v int64) *AssumeRoleWithWebIdentityInput { + s.DurationSeconds = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *AssumeRoleWithWebIdentityInput) SetPolicy(v string) *AssumeRoleWithWebIdentityInput { + s.Policy = &v + return s +} + +// SetProviderId sets the ProviderId field's value. +func (s *AssumeRoleWithWebIdentityInput) SetProviderId(v string) *AssumeRoleWithWebIdentityInput { + s.ProviderId = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *AssumeRoleWithWebIdentityInput) SetRoleArn(v string) *AssumeRoleWithWebIdentityInput { + s.RoleArn = &v + return s +} + +// SetRoleSessionName sets the RoleSessionName field's value. +func (s *AssumeRoleWithWebIdentityInput) SetRoleSessionName(v string) *AssumeRoleWithWebIdentityInput { + s.RoleSessionName = &v + return s +} + +// SetWebIdentityToken sets the WebIdentityToken field's value. +func (s *AssumeRoleWithWebIdentityInput) SetWebIdentityToken(v string) *AssumeRoleWithWebIdentityInput { + s.WebIdentityToken = &v + return s +} + +// Contains the response to a successful AssumeRoleWithWebIdentity request, +// including temporary AWS credentials that can be used to make AWS requests. +type AssumeRoleWithWebIdentityOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers + // that you can use to refer to the resulting temporary security credentials. + // For example, you can reference these credentials as a principal in a resource-based + // policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName + // that you specified when you called AssumeRole. + AssumedRoleUser *AssumedRoleUser `type:"structure"` + + // The intended audience (also known as client ID) of the web identity token. + // This is traditionally the client identifier issued to the application that + // requested the web identity token. + Audience *string `type:"string"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security token. + // + // Note: The size of the security token that STS APIs return is not fixed. We + // strongly recommend that you make no assumptions about the maximum size. As + // of this writing, the typical size is less than 4096 bytes, but that can vary. + // Also, future updates to AWS might require larger sizes. + Credentials *Credentials `type:"structure"` + + // A percentage value that indicates the size of the policy in packed form. + // The service rejects any policy with a packed size greater than 100 percent, + // which means the policy exceeded the allowed space. + PackedPolicySize *int64 `type:"integer"` + + // The issuing authority of the web identity token presented. For OpenID Connect + // ID Tokens this contains the value of the iss field. For OAuth 2.0 access + // tokens, this contains the value of the ProviderId parameter that was passed + // in the AssumeRoleWithWebIdentity request. + Provider *string `type:"string"` + + // The unique user identifier that is returned by the identity provider. This + // identifier is associated with the WebIdentityToken that was submitted with + // the AssumeRoleWithWebIdentity call. The identifier is typically unique to + // the user and the application that acquired the WebIdentityToken (pairwise + // identifier). For OpenID Connect ID tokens, this field contains the value + // returned by the identity provider as the token's sub (Subject) claim. + SubjectFromWebIdentityToken *string `min:"6" type:"string"` +} + +// String returns the string representation +func (s AssumeRoleWithWebIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleWithWebIdentityOutput) GoString() string { + return s.String() +} + +// SetAssumedRoleUser sets the AssumedRoleUser field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleWithWebIdentityOutput { + s.AssumedRoleUser = v + return s +} + +// SetAudience sets the Audience field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetAudience(v string) *AssumeRoleWithWebIdentityOutput { + s.Audience = &v + return s +} + +// SetCredentials sets the Credentials field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetCredentials(v *Credentials) *AssumeRoleWithWebIdentityOutput { + s.Credentials = v + return s +} + +// SetPackedPolicySize sets the PackedPolicySize field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetPackedPolicySize(v int64) *AssumeRoleWithWebIdentityOutput { + s.PackedPolicySize = &v + return s +} + +// SetProvider sets the Provider field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetProvider(v string) *AssumeRoleWithWebIdentityOutput { + s.Provider = &v + return s +} + +// SetSubjectFromWebIdentityToken sets the SubjectFromWebIdentityToken field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetSubjectFromWebIdentityToken(v string) *AssumeRoleWithWebIdentityOutput { + s.SubjectFromWebIdentityToken = &v + return s +} + +// The identifiers for the temporary security credentials that the operation +// returns. +type AssumedRoleUser struct { + _ struct{} `type:"structure"` + + // The ARN of the temporary security credentials that are returned from the + // AssumeRole action. For more information about ARNs and how to use them in + // policies, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) + // in Using IAM. + // + // Arn is a required field + Arn *string `min:"20" type:"string" required:"true"` + + // A unique identifier that contains the role ID and the role session name of + // the role that is being assumed. The role ID is generated by AWS when the + // role is created. + // + // AssumedRoleId is a required field + AssumedRoleId *string `min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s AssumedRoleUser) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumedRoleUser) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *AssumedRoleUser) SetArn(v string) *AssumedRoleUser { + s.Arn = &v + return s +} + +// SetAssumedRoleId sets the AssumedRoleId field's value. +func (s *AssumedRoleUser) SetAssumedRoleId(v string) *AssumedRoleUser { + s.AssumedRoleId = &v + return s +} + +// AWS credentials for API authentication. +type Credentials struct { + _ struct{} `type:"structure"` + + // The access key ID that identifies the temporary security credentials. + // + // AccessKeyId is a required field + AccessKeyId *string `min:"16" type:"string" required:"true"` + + // The date on which the current credentials expire. + // + // Expiration is a required field + Expiration *time.Time `type:"timestamp" required:"true"` + + // The secret access key that can be used to sign requests. + // + // SecretAccessKey is a required field + SecretAccessKey *string `type:"string" required:"true"` + + // The token that users must pass to the service API to use the temporary credentials. + // + // SessionToken is a required field + SessionToken *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Credentials) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Credentials) GoString() string { + return s.String() +} + +// SetAccessKeyId sets the AccessKeyId field's value. +func (s *Credentials) SetAccessKeyId(v string) *Credentials { + s.AccessKeyId = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *Credentials) SetExpiration(v time.Time) *Credentials { + s.Expiration = &v + return s +} + +// SetSecretAccessKey sets the SecretAccessKey field's value. +func (s *Credentials) SetSecretAccessKey(v string) *Credentials { + s.SecretAccessKey = &v + return s +} + +// SetSessionToken sets the SessionToken field's value. +func (s *Credentials) SetSessionToken(v string) *Credentials { + s.SessionToken = &v + return s +} + +type DecodeAuthorizationMessageInput struct { + _ struct{} `type:"structure"` + + // The encoded message that was returned with the response. + // + // EncodedMessage is a required field + EncodedMessage *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DecodeAuthorizationMessageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DecodeAuthorizationMessageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DecodeAuthorizationMessageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DecodeAuthorizationMessageInput"} + if s.EncodedMessage == nil { + invalidParams.Add(request.NewErrParamRequired("EncodedMessage")) + } + if s.EncodedMessage != nil && len(*s.EncodedMessage) < 1 { + invalidParams.Add(request.NewErrParamMinLen("EncodedMessage", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEncodedMessage sets the EncodedMessage field's value. +func (s *DecodeAuthorizationMessageInput) SetEncodedMessage(v string) *DecodeAuthorizationMessageInput { + s.EncodedMessage = &v + return s +} + +// A document that contains additional information about the authorization status +// of a request from an encoded message that is returned in response to an AWS +// request. +type DecodeAuthorizationMessageOutput struct { + _ struct{} `type:"structure"` + + // An XML document that contains the decoded message. + DecodedMessage *string `type:"string"` +} + +// String returns the string representation +func (s DecodeAuthorizationMessageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DecodeAuthorizationMessageOutput) GoString() string { + return s.String() +} + +// SetDecodedMessage sets the DecodedMessage field's value. +func (s *DecodeAuthorizationMessageOutput) SetDecodedMessage(v string) *DecodeAuthorizationMessageOutput { + s.DecodedMessage = &v + return s +} + +// Identifiers for the federated user that is associated with the credentials. +type FederatedUser struct { + _ struct{} `type:"structure"` + + // The ARN that specifies the federated user that is associated with the credentials. + // For more information about ARNs and how to use them in policies, see IAM + // Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) + // in Using IAM. + // + // Arn is a required field + Arn *string `min:"20" type:"string" required:"true"` + + // The string that identifies the federated user associated with the credentials, + // similar to the unique ID of an IAM user. + // + // FederatedUserId is a required field + FederatedUserId *string `min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s FederatedUser) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FederatedUser) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *FederatedUser) SetArn(v string) *FederatedUser { + s.Arn = &v + return s +} + +// SetFederatedUserId sets the FederatedUserId field's value. +func (s *FederatedUser) SetFederatedUserId(v string) *FederatedUser { + s.FederatedUserId = &v + return s +} + +type GetCallerIdentityInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetCallerIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCallerIdentityInput) GoString() string { + return s.String() +} + +// Contains the response to a successful GetCallerIdentity request, including +// information about the entity making the request. +type GetCallerIdentityOutput struct { + _ struct{} `type:"structure"` + + // The AWS account ID number of the account that owns or contains the calling + // entity. + Account *string `type:"string"` + + // The AWS ARN associated with the calling entity. + Arn *string `min:"20" type:"string"` + + // The unique identifier of the calling entity. The exact value depends on the + // type of entity making the call. The values returned are those listed in the + // aws:userid column in the Principal table (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable) + // found on the Policy Variables reference page in the IAM User Guide. + UserId *string `type:"string"` +} + +// String returns the string representation +func (s GetCallerIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCallerIdentityOutput) GoString() string { + return s.String() +} + +// SetAccount sets the Account field's value. +func (s *GetCallerIdentityOutput) SetAccount(v string) *GetCallerIdentityOutput { + s.Account = &v + return s +} + +// SetArn sets the Arn field's value. +func (s *GetCallerIdentityOutput) SetArn(v string) *GetCallerIdentityOutput { + s.Arn = &v + return s +} + +// SetUserId sets the UserId field's value. +func (s *GetCallerIdentityOutput) SetUserId(v string) *GetCallerIdentityOutput { + s.UserId = &v + return s +} + +type GetFederationTokenInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, that the session should last. Acceptable durations + // for federation sessions range from 900 seconds (15 minutes) to 129600 seconds + // (36 hours), with 43200 seconds (12 hours) as the default. Sessions obtained + // using AWS account (root) credentials are restricted to a maximum of 3600 + // seconds (one hour). If the specified duration is longer than one hour, the + // session obtained by using AWS account (root) credentials defaults to one + // hour. + DurationSeconds *int64 `min:"900" type:"integer"` + + // The name of the federated user. The name is used as an identifier for the + // temporary security credentials (such as Bob). For example, you can reference + // the federated user name in a resource-based policy, such as in an Amazon + // S3 bucket policy. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + // + // Name is a required field + Name *string `min:"2" type:"string" required:"true"` + + // An IAM policy in JSON format that is passed with the GetFederationToken call + // and evaluated along with the policy or policies that are attached to the + // IAM user whose credentials are used to call GetFederationToken. The passed + // policy is used to scope down the permissions that are available to the IAM + // user, by allowing only a subset of the permissions that are granted to the + // IAM user. The passed policy cannot grant more permissions than those granted + // to the IAM user. The final permissions for the federated user are the most + // restrictive set based on the intersection of the passed policy and the IAM + // user policy. + // + // If you do not pass a policy, the resulting temporary security credentials + // have no effective permissions. The only exception is when the temporary security + // credentials are used to access a resource that has a resource-based policy + // that specifically allows the federated user to access the resource. + // + // The format for this parameter, as described by its regex pattern, is a string + // of characters up to 2048 characters in length. The characters can be any + // ASCII character from the space character to the end of the valid character + // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // and carriage return (\u000D) characters. + // + // The policy plain text must be 2048 bytes or shorter. However, an internal + // conversion compresses it into a packed binary format with a separate limit. + // The PackedPolicySize response element indicates by percentage how close to + // the upper size limit the policy is, with 100% equaling the maximum allowed + // size. + // + // For more information about how permissions work, see Permissions for GetFederationToken + // (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getfederationtoken.html). + Policy *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GetFederationTokenInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetFederationTokenInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetFederationTokenInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetFederationTokenInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 2 { + invalidParams.Add(request.NewErrParamMinLen("Name", 2)) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *GetFederationTokenInput) SetDurationSeconds(v int64) *GetFederationTokenInput { + s.DurationSeconds = &v + return s +} + +// SetName sets the Name field's value. +func (s *GetFederationTokenInput) SetName(v string) *GetFederationTokenInput { + s.Name = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *GetFederationTokenInput) SetPolicy(v string) *GetFederationTokenInput { + s.Policy = &v + return s +} + +// Contains the response to a successful GetFederationToken request, including +// temporary AWS credentials that can be used to make AWS requests. +type GetFederationTokenOutput struct { + _ struct{} `type:"structure"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // Note: The size of the security token that STS APIs return is not fixed. We + // strongly recommend that you make no assumptions about the maximum size. As + // of this writing, the typical size is less than 4096 bytes, but that can vary. + // Also, future updates to AWS might require larger sizes. + Credentials *Credentials `type:"structure"` + + // Identifiers for the federated user associated with the credentials (such + // as arn:aws:sts::123456789012:federated-user/Bob or 123456789012:Bob). You + // can use the federated user's ARN in your resource-based policies, such as + // an Amazon S3 bucket policy. + FederatedUser *FederatedUser `type:"structure"` + + // A percentage value indicating the size of the policy in packed form. The + // service rejects policies for which the packed size is greater than 100 percent + // of the allowed value. + PackedPolicySize *int64 `type:"integer"` +} + +// String returns the string representation +func (s GetFederationTokenOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetFederationTokenOutput) GoString() string { + return s.String() +} + +// SetCredentials sets the Credentials field's value. +func (s *GetFederationTokenOutput) SetCredentials(v *Credentials) *GetFederationTokenOutput { + s.Credentials = v + return s +} + +// SetFederatedUser sets the FederatedUser field's value. +func (s *GetFederationTokenOutput) SetFederatedUser(v *FederatedUser) *GetFederationTokenOutput { + s.FederatedUser = v + return s +} + +// SetPackedPolicySize sets the PackedPolicySize field's value. +func (s *GetFederationTokenOutput) SetPackedPolicySize(v int64) *GetFederationTokenOutput { + s.PackedPolicySize = &v + return s +} + +type GetSessionTokenInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, that the credentials should remain valid. Acceptable + // durations for IAM user sessions range from 900 seconds (15 minutes) to 129600 + // seconds (36 hours), with 43200 seconds (12 hours) as the default. Sessions + // for AWS account owners are restricted to a maximum of 3600 seconds (one hour). + // If the duration is longer than one hour, the session for AWS account owners + // defaults to one hour. + DurationSeconds *int64 `min:"900" type:"integer"` + + // The identification number of the MFA device that is associated with the IAM + // user who is making the GetSessionToken call. Specify this value if the IAM + // user has a policy that requires MFA authentication. The value is either the + // serial number for a hardware device (such as GAHT12345678) or an Amazon Resource + // Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). + // You can find the device for an IAM user by going to the AWS Management Console + // and viewing the user's security credentials. + // + // The regex used to validated this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@:/- + SerialNumber *string `min:"9" type:"string"` + + // The value provided by the MFA device, if MFA is required. If any policy requires + // the IAM user to submit an MFA code, specify this value. If MFA authentication + // is required, and the user does not provide a code when requesting a set of + // temporary security credentials, the user will receive an "access denied" + // response when requesting resources that require MFA authentication. + // + // The format for this parameter, as described by its regex pattern, is a sequence + // of six numeric digits. + TokenCode *string `min:"6" type:"string"` +} + +// String returns the string representation +func (s GetSessionTokenInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSessionTokenInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetSessionTokenInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetSessionTokenInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.SerialNumber != nil && len(*s.SerialNumber) < 9 { + invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9)) + } + if s.TokenCode != nil && len(*s.TokenCode) < 6 { + invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *GetSessionTokenInput) SetDurationSeconds(v int64) *GetSessionTokenInput { + s.DurationSeconds = &v + return s +} + +// SetSerialNumber sets the SerialNumber field's value. +func (s *GetSessionTokenInput) SetSerialNumber(v string) *GetSessionTokenInput { + s.SerialNumber = &v + return s +} + +// SetTokenCode sets the TokenCode field's value. +func (s *GetSessionTokenInput) SetTokenCode(v string) *GetSessionTokenInput { + s.TokenCode = &v + return s +} + +// Contains the response to a successful GetSessionToken request, including +// temporary AWS credentials that can be used to make AWS requests. +type GetSessionTokenOutput struct { + _ struct{} `type:"structure"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // Note: The size of the security token that STS APIs return is not fixed. We + // strongly recommend that you make no assumptions about the maximum size. As + // of this writing, the typical size is less than 4096 bytes, but that can vary. + // Also, future updates to AWS might require larger sizes. + Credentials *Credentials `type:"structure"` +} + +// String returns the string representation +func (s GetSessionTokenOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSessionTokenOutput) GoString() string { + return s.String() +} + +// SetCredentials sets the Credentials field's value. +func (s *GetSessionTokenOutput) SetCredentials(v *Credentials) *GetSessionTokenOutput { + s.Credentials = v + return s +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go new file mode 100644 index 000000000..4010cc7fa --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go @@ -0,0 +1,12 @@ +package sts + +import "github.com/aws/aws-sdk-go/aws/request" + +func init() { + initRequest = func(r *request.Request) { + switch r.Operation.Name { + case opAssumeRoleWithSAML, opAssumeRoleWithWebIdentity: + r.Handlers.Sign.Clear() // these operations are unsigned + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go new file mode 100644 index 000000000..ef681ab0c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go @@ -0,0 +1,72 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package sts provides the client and types for making API +// requests to AWS Security Token Service. +// +// The AWS Security Token Service (STS) is a web service that enables you to +// request temporary, limited-privilege credentials for AWS Identity and Access +// Management (IAM) users or for users that you authenticate (federated users). +// This guide provides descriptions of the STS API. For more detailed information +// about using this service, go to Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html). +// +// As an alternative to using the API, you can use one of the AWS SDKs, which +// consist of libraries and sample code for various programming languages and +// platforms (Java, Ruby, .NET, iOS, Android, etc.). The SDKs provide a convenient +// way to create programmatic access to STS. For example, the SDKs take care +// of cryptographically signing requests, managing errors, and retrying requests +// automatically. For information about the AWS SDKs, including how to download +// and install them, see the Tools for Amazon Web Services page (http://aws.amazon.com/tools/). +// +// For information about setting up signatures and authorization through the +// API, go to Signing AWS API Requests (http://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html) +// in the AWS General Reference. For general information about the Query API, +// go to Making Query Requests (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html) +// in Using IAM. For information about using security tokens with other AWS +// products, go to AWS Services That Work with IAM (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-services-that-work-with-iam.html) +// in the IAM User Guide. +// +// If you're new to AWS and need additional technical information about a specific +// AWS product, you can find the product's technical documentation at http://aws.amazon.com/documentation/ +// (http://aws.amazon.com/documentation/). +// +// Endpoints +// +// The AWS Security Token Service (STS) has a default endpoint of https://sts.amazonaws.com +// that maps to the US East (N. Virginia) region. Additional regions are available +// and are activated by default. For more information, see Activating and Deactivating +// AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// For information about STS endpoints, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html#sts_region) +// in the AWS General Reference. +// +// Recording API requests +// +// STS supports AWS CloudTrail, which is a service that records AWS calls for +// your AWS account and delivers log files to an Amazon S3 bucket. By using +// information collected by CloudTrail, you can determine what requests were +// successfully made to STS, who made the request, when it was made, and so +// on. To learn more about CloudTrail, including how to turn it on and find +// your log files, see the AWS CloudTrail User Guide (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/what_is_cloud_trail_top_level.html). +// +// See https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15 for more information on this service. +// +// See sts package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/ +// +// Using the Client +// +// To contact AWS Security Token Service with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the AWS Security Token Service client STS for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/#New +package sts diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go new file mode 100644 index 000000000..e24884ef3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go @@ -0,0 +1,73 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sts + +const ( + + // ErrCodeExpiredTokenException for service response error code + // "ExpiredTokenException". + // + // The web identity token that was passed is expired or is not valid. Get a + // new identity token from the identity provider and then retry the request. + ErrCodeExpiredTokenException = "ExpiredTokenException" + + // ErrCodeIDPCommunicationErrorException for service response error code + // "IDPCommunicationError". + // + // The request could not be fulfilled because the non-AWS identity provider + // (IDP) that was asked to verify the incoming identity token could not be reached. + // This is often a transient error caused by network conditions. Retry the request + // a limited number of times so that you don't exceed the request rate. If the + // error persists, the non-AWS identity provider might be down or not responding. + ErrCodeIDPCommunicationErrorException = "IDPCommunicationError" + + // ErrCodeIDPRejectedClaimException for service response error code + // "IDPRejectedClaim". + // + // The identity provider (IdP) reported that authentication failed. This might + // be because the claim is invalid. + // + // If this error is returned for the AssumeRoleWithWebIdentity operation, it + // can also mean that the claim has expired or has been explicitly revoked. + ErrCodeIDPRejectedClaimException = "IDPRejectedClaim" + + // ErrCodeInvalidAuthorizationMessageException for service response error code + // "InvalidAuthorizationMessageException". + // + // The error returned if the message passed to DecodeAuthorizationMessage was + // invalid. This can happen if the token contains invalid characters, such as + // linebreaks. + ErrCodeInvalidAuthorizationMessageException = "InvalidAuthorizationMessageException" + + // ErrCodeInvalidIdentityTokenException for service response error code + // "InvalidIdentityToken". + // + // The web identity token that was passed could not be validated by AWS. Get + // a new identity token from the identity provider and then retry the request. + ErrCodeInvalidIdentityTokenException = "InvalidIdentityToken" + + // ErrCodeMalformedPolicyDocumentException for service response error code + // "MalformedPolicyDocument". + // + // The request was rejected because the policy document was malformed. The error + // message describes the specific error. + ErrCodeMalformedPolicyDocumentException = "MalformedPolicyDocument" + + // ErrCodePackedPolicyTooLargeException for service response error code + // "PackedPolicyTooLarge". + // + // The request was rejected because the policy document was too large. The error + // message describes how big the policy document is, in packed form, as a percentage + // of what the API allows. + ErrCodePackedPolicyTooLargeException = "PackedPolicyTooLarge" + + // ErrCodeRegionDisabledException for service response error code + // "RegionDisabledException". + // + // STS is not activated in the requested region for the account that is being + // asked to generate credentials. The account administrator must use the IAM + // console to activate STS in that region. For more information, see Activating + // and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) + // in the IAM User Guide. + ErrCodeRegionDisabledException = "RegionDisabledException" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go new file mode 100644 index 000000000..185c914d1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go @@ -0,0 +1,95 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sts + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/query" +) + +// STS provides the API operation methods for making requests to +// AWS Security Token Service. See this package's package overview docs +// for details on the service. +// +// STS methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type STS struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "sts" // Name of service. + EndpointsID = ServiceName // ID to lookup a service endpoint with. + ServiceID = "STS" // ServiceID is a unique identifer of a specific service. +) + +// New creates a new instance of the STS client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a STS client from just a session. +// svc := sts.New(mySession) +// +// // Create a STS client with additional configuration +// svc := sts.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *STS { + c := p.ClientConfig(EndpointsID, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *STS { + svc := &STS{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + ServiceID: ServiceID, + SigningName: signingName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2011-06-15", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a STS operation and runs any +// custom request initialization. +func (c *STS) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/beorn7/perks/LICENSE b/vendor/github.com/beorn7/perks/LICENSE new file mode 100644 index 000000000..339177be6 --- /dev/null +++ b/vendor/github.com/beorn7/perks/LICENSE @@ -0,0 +1,20 @@ +Copyright (C) 2013 Blake Mizerany + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/beorn7/perks/quantile/stream.go b/vendor/github.com/beorn7/perks/quantile/stream.go new file mode 100644 index 000000000..d7d14f8eb --- /dev/null +++ b/vendor/github.com/beorn7/perks/quantile/stream.go @@ -0,0 +1,316 @@ +// Package quantile computes approximate quantiles over an unbounded data +// stream within low memory and CPU bounds. +// +// A small amount of accuracy is traded to achieve the above properties. +// +// Multiple streams can be merged before calling Query to generate a single set +// of results. This is meaningful when the streams represent the same type of +// data. See Merge and Samples. +// +// For more detailed information about the algorithm used, see: +// +// Effective Computation of Biased Quantiles over Data Streams +// +// http://www.cs.rutgers.edu/~muthu/bquant.pdf +package quantile + +import ( + "math" + "sort" +) + +// Sample holds an observed value and meta information for compression. JSON +// tags have been added for convenience. +type Sample struct { + Value float64 `json:",string"` + Width float64 `json:",string"` + Delta float64 `json:",string"` +} + +// Samples represents a slice of samples. It implements sort.Interface. +type Samples []Sample + +func (a Samples) Len() int { return len(a) } +func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value } +func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +type invariant func(s *stream, r float64) float64 + +// NewLowBiased returns an initialized Stream for low-biased quantiles +// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but +// error guarantees can still be given even for the lower ranks of the data +// distribution. +// +// The provided epsilon is a relative error, i.e. the true quantile of a value +// returned by a query is guaranteed to be within (1±Epsilon)*Quantile. +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error +// properties. +func NewLowBiased(epsilon float64) *Stream { + ƒ := func(s *stream, r float64) float64 { + return 2 * epsilon * r + } + return newStream(ƒ) +} + +// NewHighBiased returns an initialized Stream for high-biased quantiles +// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but +// error guarantees can still be given even for the higher ranks of the data +// distribution. +// +// The provided epsilon is a relative error, i.e. the true quantile of a value +// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile). +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error +// properties. +func NewHighBiased(epsilon float64) *Stream { + ƒ := func(s *stream, r float64) float64 { + return 2 * epsilon * (s.n - r) + } + return newStream(ƒ) +} + +// NewTargeted returns an initialized Stream concerned with a particular set of +// quantile values that are supplied a priori. Knowing these a priori reduces +// space and computation time. The targets map maps the desired quantiles to +// their absolute errors, i.e. the true quantile of a value returned by a query +// is guaranteed to be within (Quantile±Epsilon). +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties. +func NewTargeted(targetMap map[float64]float64) *Stream { + // Convert map to slice to avoid slow iterations on a map. + // ƒ is called on the hot path, so converting the map to a slice + // beforehand results in significant CPU savings. + targets := targetMapToSlice(targetMap) + + ƒ := func(s *stream, r float64) float64 { + var m = math.MaxFloat64 + var f float64 + for _, t := range targets { + if t.quantile*s.n <= r { + f = (2 * t.epsilon * r) / t.quantile + } else { + f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile) + } + if f < m { + m = f + } + } + return m + } + return newStream(ƒ) +} + +type target struct { + quantile float64 + epsilon float64 +} + +func targetMapToSlice(targetMap map[float64]float64) []target { + targets := make([]target, 0, len(targetMap)) + + for quantile, epsilon := range targetMap { + t := target{ + quantile: quantile, + epsilon: epsilon, + } + targets = append(targets, t) + } + + return targets +} + +// Stream computes quantiles for a stream of float64s. It is not thread-safe by +// design. Take care when using across multiple goroutines. +type Stream struct { + *stream + b Samples + sorted bool +} + +func newStream(ƒ invariant) *Stream { + x := &stream{ƒ: ƒ} + return &Stream{x, make(Samples, 0, 500), true} +} + +// Insert inserts v into the stream. +func (s *Stream) Insert(v float64) { + s.insert(Sample{Value: v, Width: 1}) +} + +func (s *Stream) insert(sample Sample) { + s.b = append(s.b, sample) + s.sorted = false + if len(s.b) == cap(s.b) { + s.flush() + } +} + +// Query returns the computed qth percentiles value. If s was created with +// NewTargeted, and q is not in the set of quantiles provided a priori, Query +// will return an unspecified result. +func (s *Stream) Query(q float64) float64 { + if !s.flushed() { + // Fast path when there hasn't been enough data for a flush; + // this also yields better accuracy for small sets of data. + l := len(s.b) + if l == 0 { + return 0 + } + i := int(math.Ceil(float64(l) * q)) + if i > 0 { + i -= 1 + } + s.maybeSort() + return s.b[i].Value + } + s.flush() + return s.stream.query(q) +} + +// Merge merges samples into the underlying streams samples. This is handy when +// merging multiple streams from separate threads, database shards, etc. +// +// ATTENTION: This method is broken and does not yield correct results. The +// underlying algorithm is not capable of merging streams correctly. +func (s *Stream) Merge(samples Samples) { + sort.Sort(samples) + s.stream.merge(samples) +} + +// Reset reinitializes and clears the list reusing the samples buffer memory. +func (s *Stream) Reset() { + s.stream.reset() + s.b = s.b[:0] +} + +// Samples returns stream samples held by s. +func (s *Stream) Samples() Samples { + if !s.flushed() { + return s.b + } + s.flush() + return s.stream.samples() +} + +// Count returns the total number of samples observed in the stream +// since initialization. +func (s *Stream) Count() int { + return len(s.b) + s.stream.count() +} + +func (s *Stream) flush() { + s.maybeSort() + s.stream.merge(s.b) + s.b = s.b[:0] +} + +func (s *Stream) maybeSort() { + if !s.sorted { + s.sorted = true + sort.Sort(s.b) + } +} + +func (s *Stream) flushed() bool { + return len(s.stream.l) > 0 +} + +type stream struct { + n float64 + l []Sample + ƒ invariant +} + +func (s *stream) reset() { + s.l = s.l[:0] + s.n = 0 +} + +func (s *stream) insert(v float64) { + s.merge(Samples{{v, 1, 0}}) +} + +func (s *stream) merge(samples Samples) { + // TODO(beorn7): This tries to merge not only individual samples, but + // whole summaries. The paper doesn't mention merging summaries at + // all. Unittests show that the merging is inaccurate. Find out how to + // do merges properly. + var r float64 + i := 0 + for _, sample := range samples { + for ; i < len(s.l); i++ { + c := s.l[i] + if c.Value > sample.Value { + // Insert at position i. + s.l = append(s.l, Sample{}) + copy(s.l[i+1:], s.l[i:]) + s.l[i] = Sample{ + sample.Value, + sample.Width, + math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1), + // TODO(beorn7): How to calculate delta correctly? + } + i++ + goto inserted + } + r += c.Width + } + s.l = append(s.l, Sample{sample.Value, sample.Width, 0}) + i++ + inserted: + s.n += sample.Width + r += sample.Width + } + s.compress() +} + +func (s *stream) count() int { + return int(s.n) +} + +func (s *stream) query(q float64) float64 { + t := math.Ceil(q * s.n) + t += math.Ceil(s.ƒ(s, t) / 2) + p := s.l[0] + var r float64 + for _, c := range s.l[1:] { + r += p.Width + if r+c.Width+c.Delta > t { + return p.Value + } + p = c + } + return p.Value +} + +func (s *stream) compress() { + if len(s.l) < 2 { + return + } + x := s.l[len(s.l)-1] + xi := len(s.l) - 1 + r := s.n - 1 - x.Width + + for i := len(s.l) - 2; i >= 0; i-- { + c := s.l[i] + if c.Width+x.Width+x.Delta <= s.ƒ(s, r) { + x.Width += c.Width + s.l[xi] = x + // Remove element at i. + copy(s.l[i:], s.l[i+1:]) + s.l = s.l[:len(s.l)-1] + xi -= 1 + } else { + x = c + xi = i + } + r -= c.Width + } +} + +func (s *stream) samples() Samples { + samples := make(Samples, len(s.l)) + copy(samples, s.l) + return samples +} diff --git a/vendor/github.com/go-ini/ini/LICENSE b/vendor/github.com/go-ini/ini/LICENSE new file mode 100644 index 000000000..d361bbcdf --- /dev/null +++ b/vendor/github.com/go-ini/ini/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright 2014 Unknwon + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-ini/ini/error.go b/vendor/github.com/go-ini/ini/error.go new file mode 100644 index 000000000..80afe7431 --- /dev/null +++ b/vendor/github.com/go-ini/ini/error.go @@ -0,0 +1,32 @@ +// Copyright 2016 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "fmt" +) + +type ErrDelimiterNotFound struct { + Line string +} + +func IsErrDelimiterNotFound(err error) bool { + _, ok := err.(ErrDelimiterNotFound) + return ok +} + +func (err ErrDelimiterNotFound) Error() string { + return fmt.Sprintf("key-value delimiter not found: %s", err.Line) +} diff --git a/vendor/github.com/go-ini/ini/file.go b/vendor/github.com/go-ini/ini/file.go new file mode 100644 index 000000000..762e52551 --- /dev/null +++ b/vendor/github.com/go-ini/ini/file.go @@ -0,0 +1,415 @@ +// Copyright 2017 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "strings" + "sync" +) + +// File represents a combination of a or more INI file(s) in memory. +type File struct { + options LoadOptions + dataSources []dataSource + + // Should make things safe, but sometimes doesn't matter. + BlockMode bool + lock sync.RWMutex + + // To keep data in order. + sectionList []string + // Actual data is stored here. + sections map[string]*Section + + NameMapper + ValueMapper +} + +// newFile initializes File object with given data sources. +func newFile(dataSources []dataSource, opts LoadOptions) *File { + return &File{ + BlockMode: true, + dataSources: dataSources, + sections: make(map[string]*Section), + sectionList: make([]string, 0, 10), + options: opts, + } +} + +// Empty returns an empty file object. +func Empty() *File { + // Ignore error here, we sure our data is good. + f, _ := Load([]byte("")) + return f +} + +// NewSection creates a new section. +func (f *File) NewSection(name string) (*Section, error) { + if len(name) == 0 { + return nil, errors.New("error creating new section: empty section name") + } else if f.options.Insensitive && name != DEFAULT_SECTION { + name = strings.ToLower(name) + } + + if f.BlockMode { + f.lock.Lock() + defer f.lock.Unlock() + } + + if inSlice(name, f.sectionList) { + return f.sections[name], nil + } + + f.sectionList = append(f.sectionList, name) + f.sections[name] = newSection(f, name) + return f.sections[name], nil +} + +// NewRawSection creates a new section with an unparseable body. +func (f *File) NewRawSection(name, body string) (*Section, error) { + section, err := f.NewSection(name) + if err != nil { + return nil, err + } + + section.isRawSection = true + section.rawBody = body + return section, nil +} + +// NewSections creates a list of sections. +func (f *File) NewSections(names ...string) (err error) { + for _, name := range names { + if _, err = f.NewSection(name); err != nil { + return err + } + } + return nil +} + +// GetSection returns section by given name. +func (f *File) GetSection(name string) (*Section, error) { + if len(name) == 0 { + name = DEFAULT_SECTION + } + if f.options.Insensitive { + name = strings.ToLower(name) + } + + if f.BlockMode { + f.lock.RLock() + defer f.lock.RUnlock() + } + + sec := f.sections[name] + if sec == nil { + return nil, fmt.Errorf("section '%s' does not exist", name) + } + return sec, nil +} + +// Section assumes named section exists and returns a zero-value when not. +func (f *File) Section(name string) *Section { + sec, err := f.GetSection(name) + if err != nil { + // Note: It's OK here because the only possible error is empty section name, + // but if it's empty, this piece of code won't be executed. + sec, _ = f.NewSection(name) + return sec + } + return sec +} + +// Section returns list of Section. +func (f *File) Sections() []*Section { + if f.BlockMode { + f.lock.RLock() + defer f.lock.RUnlock() + } + + sections := make([]*Section, len(f.sectionList)) + for i, name := range f.sectionList { + sections[i] = f.sections[name] + } + return sections +} + +// ChildSections returns a list of child sections of given section name. +func (f *File) ChildSections(name string) []*Section { + return f.Section(name).ChildSections() +} + +// SectionStrings returns list of section names. +func (f *File) SectionStrings() []string { + list := make([]string, len(f.sectionList)) + copy(list, f.sectionList) + return list +} + +// DeleteSection deletes a section. +func (f *File) DeleteSection(name string) { + if f.BlockMode { + f.lock.Lock() + defer f.lock.Unlock() + } + + if len(name) == 0 { + name = DEFAULT_SECTION + } + + for i, s := range f.sectionList { + if s == name { + f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...) + delete(f.sections, name) + return + } + } +} + +func (f *File) reload(s dataSource) error { + r, err := s.ReadCloser() + if err != nil { + return err + } + defer r.Close() + + return f.parse(r) +} + +// Reload reloads and parses all data sources. +func (f *File) Reload() (err error) { + for _, s := range f.dataSources { + if err = f.reload(s); err != nil { + // In loose mode, we create an empty default section for nonexistent files. + if os.IsNotExist(err) && f.options.Loose { + f.parse(bytes.NewBuffer(nil)) + continue + } + return err + } + } + return nil +} + +// Append appends one or more data sources and reloads automatically. +func (f *File) Append(source interface{}, others ...interface{}) error { + ds, err := parseDataSource(source) + if err != nil { + return err + } + f.dataSources = append(f.dataSources, ds) + for _, s := range others { + ds, err = parseDataSource(s) + if err != nil { + return err + } + f.dataSources = append(f.dataSources, ds) + } + return f.Reload() +} + +func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) { + equalSign := DefaultFormatLeft + "=" + DefaultFormatRight + + if PrettyFormat || PrettyEqual { + equalSign = " = " + } + + // Use buffer to make sure target is safe until finish encoding. + buf := bytes.NewBuffer(nil) + for i, sname := range f.sectionList { + sec := f.Section(sname) + if len(sec.Comment) > 0 { + // Support multiline comments + lines := strings.Split(sec.Comment, LineBreak) + for i := range lines { + if lines[i][0] != '#' && lines[i][0] != ';' { + lines[i] = "; " + lines[i] + } else { + lines[i] = lines[i][:1] + " " + strings.TrimSpace(lines[i][1:]) + } + + if _, err := buf.WriteString(lines[i] + LineBreak); err != nil { + return nil, err + } + } + } + + if i > 0 || DefaultHeader { + if _, err := buf.WriteString("[" + sname + "]" + LineBreak); err != nil { + return nil, err + } + } else { + // Write nothing if default section is empty + if len(sec.keyList) == 0 { + continue + } + } + + if sec.isRawSection { + if _, err := buf.WriteString(sec.rawBody); err != nil { + return nil, err + } + + if PrettySection { + // Put a line between sections + if _, err := buf.WriteString(LineBreak); err != nil { + return nil, err + } + } + continue + } + + // Count and generate alignment length and buffer spaces using the + // longest key. Keys may be modifed if they contain certain characters so + // we need to take that into account in our calculation. + alignLength := 0 + if PrettyFormat { + for _, kname := range sec.keyList { + keyLength := len(kname) + // First case will surround key by ` and second by """ + if strings.ContainsAny(kname, "\"=:") { + keyLength += 2 + } else if strings.Contains(kname, "`") { + keyLength += 6 + } + + if keyLength > alignLength { + alignLength = keyLength + } + } + } + alignSpaces := bytes.Repeat([]byte(" "), alignLength) + + KEY_LIST: + for _, kname := range sec.keyList { + key := sec.Key(kname) + if len(key.Comment) > 0 { + if len(indent) > 0 && sname != DEFAULT_SECTION { + buf.WriteString(indent) + } + + // Support multiline comments + lines := strings.Split(key.Comment, LineBreak) + for i := range lines { + if lines[i][0] != '#' && lines[i][0] != ';' { + lines[i] = "; " + lines[i] + } else { + lines[i] = lines[i][:1] + " " + strings.TrimSpace(lines[i][1:]) + } + + if _, err := buf.WriteString(lines[i] + LineBreak); err != nil { + return nil, err + } + } + } + + if len(indent) > 0 && sname != DEFAULT_SECTION { + buf.WriteString(indent) + } + + switch { + case key.isAutoIncrement: + kname = "-" + case strings.ContainsAny(kname, "\"=:"): + kname = "`" + kname + "`" + case strings.Contains(kname, "`"): + kname = `"""` + kname + `"""` + } + + for _, val := range key.ValueWithShadows() { + if _, err := buf.WriteString(kname); err != nil { + return nil, err + } + + if key.isBooleanType { + if kname != sec.keyList[len(sec.keyList)-1] { + buf.WriteString(LineBreak) + } + continue KEY_LIST + } + + // Write out alignment spaces before "=" sign + if PrettyFormat { + buf.Write(alignSpaces[:alignLength-len(kname)]) + } + + // In case key value contains "\n", "`", "\"", "#" or ";" + if strings.ContainsAny(val, "\n`") { + val = `"""` + val + `"""` + } else if !f.options.IgnoreInlineComment && strings.ContainsAny(val, "#;") { + val = "`" + val + "`" + } + if _, err := buf.WriteString(equalSign + val + LineBreak); err != nil { + return nil, err + } + } + + for _, val := range key.nestedValues { + if _, err := buf.WriteString(indent + " " + val + LineBreak); err != nil { + return nil, err + } + } + } + + if PrettySection { + // Put a line between sections + if _, err := buf.WriteString(LineBreak); err != nil { + return nil, err + } + } + } + + return buf, nil +} + +// WriteToIndent writes content into io.Writer with given indention. +// If PrettyFormat has been set to be true, +// it will align "=" sign with spaces under each section. +func (f *File) WriteToIndent(w io.Writer, indent string) (int64, error) { + buf, err := f.writeToBuffer(indent) + if err != nil { + return 0, err + } + return buf.WriteTo(w) +} + +// WriteTo writes file content into io.Writer. +func (f *File) WriteTo(w io.Writer) (int64, error) { + return f.WriteToIndent(w, "") +} + +// SaveToIndent writes content to file system with given value indention. +func (f *File) SaveToIndent(filename, indent string) error { + // Note: Because we are truncating with os.Create, + // so it's safer to save to a temporary file location and rename afte done. + buf, err := f.writeToBuffer(indent) + if err != nil { + return err + } + + return ioutil.WriteFile(filename, buf.Bytes(), 0666) +} + +// SaveTo writes content to file system. +func (f *File) SaveTo(filename string) error { + return f.SaveToIndent(filename, "") +} diff --git a/vendor/github.com/go-ini/ini/ini.go b/vendor/github.com/go-ini/ini/ini.go new file mode 100644 index 000000000..2920e0118 --- /dev/null +++ b/vendor/github.com/go-ini/ini/ini.go @@ -0,0 +1,215 @@ +// +build go1.6 + +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +// Package ini provides INI file read and write functionality in Go. +package ini + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "regexp" + "runtime" +) + +const ( + // Name for default section. You can use this constant or the string literal. + // In most of cases, an empty string is all you need to access the section. + DEFAULT_SECTION = "DEFAULT" + + // Maximum allowed depth when recursively substituing variable names. + _DEPTH_VALUES = 99 + _VERSION = "1.38.3" +) + +// Version returns current package version literal. +func Version() string { + return _VERSION +} + +var ( + // Delimiter to determine or compose a new line. + // This variable will be changed to "\r\n" automatically on Windows + // at package init time. + LineBreak = "\n" + + // Place custom spaces when PrettyFormat and PrettyEqual are both disabled + DefaultFormatLeft = "" + DefaultFormatRight = "" + + // Variable regexp pattern: %(variable)s + varPattern = regexp.MustCompile(`%\(([^\)]+)\)s`) + + // Indicate whether to align "=" sign with spaces to produce pretty output + // or reduce all possible spaces for compact format. + PrettyFormat = true + + // Place spaces around "=" sign even when PrettyFormat is false + PrettyEqual = false + + // Explicitly write DEFAULT section header + DefaultHeader = false + + // Indicate whether to put a line between sections + PrettySection = true +) + +func init() { + if runtime.GOOS == "windows" { + LineBreak = "\r\n" + } +} + +func inSlice(str string, s []string) bool { + for _, v := range s { + if str == v { + return true + } + } + return false +} + +// dataSource is an interface that returns object which can be read and closed. +type dataSource interface { + ReadCloser() (io.ReadCloser, error) +} + +// sourceFile represents an object that contains content on the local file system. +type sourceFile struct { + name string +} + +func (s sourceFile) ReadCloser() (_ io.ReadCloser, err error) { + return os.Open(s.name) +} + +// sourceData represents an object that contains content in memory. +type sourceData struct { + data []byte +} + +func (s *sourceData) ReadCloser() (io.ReadCloser, error) { + return ioutil.NopCloser(bytes.NewReader(s.data)), nil +} + +// sourceReadCloser represents an input stream with Close method. +type sourceReadCloser struct { + reader io.ReadCloser +} + +func (s *sourceReadCloser) ReadCloser() (io.ReadCloser, error) { + return s.reader, nil +} + +func parseDataSource(source interface{}) (dataSource, error) { + switch s := source.(type) { + case string: + return sourceFile{s}, nil + case []byte: + return &sourceData{s}, nil + case io.ReadCloser: + return &sourceReadCloser{s}, nil + default: + return nil, fmt.Errorf("error parsing data source: unknown type '%s'", s) + } +} + +type LoadOptions struct { + // Loose indicates whether the parser should ignore nonexistent files or return error. + Loose bool + // Insensitive indicates whether the parser forces all section and key names to lowercase. + Insensitive bool + // IgnoreContinuation indicates whether to ignore continuation lines while parsing. + IgnoreContinuation bool + // IgnoreInlineComment indicates whether to ignore comments at the end of value and treat it as part of value. + IgnoreInlineComment bool + // SkipUnrecognizableLines indicates whether to skip unrecognizable lines that do not conform to key/value pairs. + SkipUnrecognizableLines bool + // AllowBooleanKeys indicates whether to allow boolean type keys or treat as value is missing. + // This type of keys are mostly used in my.cnf. + AllowBooleanKeys bool + // AllowShadows indicates whether to keep track of keys with same name under same section. + AllowShadows bool + // AllowNestedValues indicates whether to allow AWS-like nested values. + // Docs: http://docs.aws.amazon.com/cli/latest/topic/config-vars.html#nested-values + AllowNestedValues bool + // AllowPythonMultilineValues indicates whether to allow Python-like multi-line values. + // Docs: https://docs.python.org/3/library/configparser.html#supported-ini-file-structure + // Relevant quote: Values can also span multiple lines, as long as they are indented deeper + // than the first line of the value. + AllowPythonMultilineValues bool + // SpaceBeforeInlineComment indicates whether to allow comment symbols (\# and \;) inside value. + // Docs: https://docs.python.org/2/library/configparser.html + // Quote: Comments may appear on their own in an otherwise empty line, or may be entered in lines holding values or section names. + // In the latter case, they need to be preceded by a whitespace character to be recognized as a comment. + SpaceBeforeInlineComment bool + // UnescapeValueDoubleQuotes indicates whether to unescape double quotes inside value to regular format + // when value is surrounded by double quotes, e.g. key="a \"value\"" => key=a "value" + UnescapeValueDoubleQuotes bool + // UnescapeValueCommentSymbols indicates to unescape comment symbols (\# and \;) inside value to regular format + // when value is NOT surrounded by any quotes. + // Note: UNSTABLE, behavior might change to only unescape inside double quotes but may noy necessary at all. + UnescapeValueCommentSymbols bool + // UnparseableSections stores a list of blocks that are allowed with raw content which do not otherwise + // conform to key/value pairs. Specify the names of those blocks here. + UnparseableSections []string +} + +func LoadSources(opts LoadOptions, source interface{}, others ...interface{}) (_ *File, err error) { + sources := make([]dataSource, len(others)+1) + sources[0], err = parseDataSource(source) + if err != nil { + return nil, err + } + for i := range others { + sources[i+1], err = parseDataSource(others[i]) + if err != nil { + return nil, err + } + } + f := newFile(sources, opts) + if err = f.Reload(); err != nil { + return nil, err + } + return f, nil +} + +// Load loads and parses from INI data sources. +// Arguments can be mixed of file name with string type, or raw data in []byte. +// It will return error if list contains nonexistent files. +func Load(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{}, source, others...) +} + +// LooseLoad has exactly same functionality as Load function +// except it ignores nonexistent files instead of returning error. +func LooseLoad(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{Loose: true}, source, others...) +} + +// InsensitiveLoad has exactly same functionality as Load function +// except it forces all section and key names to be lowercased. +func InsensitiveLoad(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{Insensitive: true}, source, others...) +} + +// ShadowLoad has exactly same functionality as Load function +// except it allows have shadow keys. +func ShadowLoad(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{AllowShadows: true}, source, others...) +} diff --git a/vendor/github.com/go-ini/ini/key.go b/vendor/github.com/go-ini/ini/key.go new file mode 100644 index 000000000..7c8566a1b --- /dev/null +++ b/vendor/github.com/go-ini/ini/key.go @@ -0,0 +1,751 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bytes" + "errors" + "fmt" + "strconv" + "strings" + "time" +) + +// Key represents a key under a section. +type Key struct { + s *Section + Comment string + name string + value string + isAutoIncrement bool + isBooleanType bool + + isShadow bool + shadows []*Key + + nestedValues []string +} + +// newKey simply return a key object with given values. +func newKey(s *Section, name, val string) *Key { + return &Key{ + s: s, + name: name, + value: val, + } +} + +func (k *Key) addShadow(val string) error { + if k.isShadow { + return errors.New("cannot add shadow to another shadow key") + } else if k.isAutoIncrement || k.isBooleanType { + return errors.New("cannot add shadow to auto-increment or boolean key") + } + + shadow := newKey(k.s, k.name, val) + shadow.isShadow = true + k.shadows = append(k.shadows, shadow) + return nil +} + +// AddShadow adds a new shadow key to itself. +func (k *Key) AddShadow(val string) error { + if !k.s.f.options.AllowShadows { + return errors.New("shadow key is not allowed") + } + return k.addShadow(val) +} + +func (k *Key) addNestedValue(val string) error { + if k.isAutoIncrement || k.isBooleanType { + return errors.New("cannot add nested value to auto-increment or boolean key") + } + + k.nestedValues = append(k.nestedValues, val) + return nil +} + +func (k *Key) AddNestedValue(val string) error { + if !k.s.f.options.AllowNestedValues { + return errors.New("nested value is not allowed") + } + return k.addNestedValue(val) +} + +// ValueMapper represents a mapping function for values, e.g. os.ExpandEnv +type ValueMapper func(string) string + +// Name returns name of key. +func (k *Key) Name() string { + return k.name +} + +// Value returns raw value of key for performance purpose. +func (k *Key) Value() string { + return k.value +} + +// ValueWithShadows returns raw values of key and its shadows if any. +func (k *Key) ValueWithShadows() []string { + if len(k.shadows) == 0 { + return []string{k.value} + } + vals := make([]string, len(k.shadows)+1) + vals[0] = k.value + for i := range k.shadows { + vals[i+1] = k.shadows[i].value + } + return vals +} + +// NestedValues returns nested values stored in the key. +// It is possible returned value is nil if no nested values stored in the key. +func (k *Key) NestedValues() []string { + return k.nestedValues +} + +// transformValue takes a raw value and transforms to its final string. +func (k *Key) transformValue(val string) string { + if k.s.f.ValueMapper != nil { + val = k.s.f.ValueMapper(val) + } + + // Fail-fast if no indicate char found for recursive value + if !strings.Contains(val, "%") { + return val + } + for i := 0; i < _DEPTH_VALUES; i++ { + vr := varPattern.FindString(val) + if len(vr) == 0 { + break + } + + // Take off leading '%(' and trailing ')s'. + noption := strings.TrimLeft(vr, "%(") + noption = strings.TrimRight(noption, ")s") + + // Search in the same section. + nk, err := k.s.GetKey(noption) + if err != nil || k == nk { + // Search again in default section. + nk, _ = k.s.f.Section("").GetKey(noption) + } + + // Substitute by new value and take off leading '%(' and trailing ')s'. + val = strings.Replace(val, vr, nk.value, -1) + } + return val +} + +// String returns string representation of value. +func (k *Key) String() string { + return k.transformValue(k.value) +} + +// Validate accepts a validate function which can +// return modifed result as key value. +func (k *Key) Validate(fn func(string) string) string { + return fn(k.String()) +} + +// parseBool returns the boolean value represented by the string. +// +// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On, +// 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off. +// Any other value returns an error. +func parseBool(str string) (value bool, err error) { + switch str { + case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "y", "ON", "on", "On": + return true, nil + case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "n", "OFF", "off", "Off": + return false, nil + } + return false, fmt.Errorf("parsing \"%s\": invalid syntax", str) +} + +// Bool returns bool type value. +func (k *Key) Bool() (bool, error) { + return parseBool(k.String()) +} + +// Float64 returns float64 type value. +func (k *Key) Float64() (float64, error) { + return strconv.ParseFloat(k.String(), 64) +} + +// Int returns int type value. +func (k *Key) Int() (int, error) { + return strconv.Atoi(k.String()) +} + +// Int64 returns int64 type value. +func (k *Key) Int64() (int64, error) { + return strconv.ParseInt(k.String(), 10, 64) +} + +// Uint returns uint type valued. +func (k *Key) Uint() (uint, error) { + u, e := strconv.ParseUint(k.String(), 10, 64) + return uint(u), e +} + +// Uint64 returns uint64 type value. +func (k *Key) Uint64() (uint64, error) { + return strconv.ParseUint(k.String(), 10, 64) +} + +// Duration returns time.Duration type value. +func (k *Key) Duration() (time.Duration, error) { + return time.ParseDuration(k.String()) +} + +// TimeFormat parses with given format and returns time.Time type value. +func (k *Key) TimeFormat(format string) (time.Time, error) { + return time.Parse(format, k.String()) +} + +// Time parses with RFC3339 format and returns time.Time type value. +func (k *Key) Time() (time.Time, error) { + return k.TimeFormat(time.RFC3339) +} + +// MustString returns default value if key value is empty. +func (k *Key) MustString(defaultVal string) string { + val := k.String() + if len(val) == 0 { + k.value = defaultVal + return defaultVal + } + return val +} + +// MustBool always returns value without error, +// it returns false if error occurs. +func (k *Key) MustBool(defaultVal ...bool) bool { + val, err := k.Bool() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatBool(defaultVal[0]) + return defaultVal[0] + } + return val +} + +// MustFloat64 always returns value without error, +// it returns 0.0 if error occurs. +func (k *Key) MustFloat64(defaultVal ...float64) float64 { + val, err := k.Float64() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatFloat(defaultVal[0], 'f', -1, 64) + return defaultVal[0] + } + return val +} + +// MustInt always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustInt(defaultVal ...int) int { + val, err := k.Int() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatInt(int64(defaultVal[0]), 10) + return defaultVal[0] + } + return val +} + +// MustInt64 always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustInt64(defaultVal ...int64) int64 { + val, err := k.Int64() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatInt(defaultVal[0], 10) + return defaultVal[0] + } + return val +} + +// MustUint always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustUint(defaultVal ...uint) uint { + val, err := k.Uint() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatUint(uint64(defaultVal[0]), 10) + return defaultVal[0] + } + return val +} + +// MustUint64 always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustUint64(defaultVal ...uint64) uint64 { + val, err := k.Uint64() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatUint(defaultVal[0], 10) + return defaultVal[0] + } + return val +} + +// MustDuration always returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustDuration(defaultVal ...time.Duration) time.Duration { + val, err := k.Duration() + if len(defaultVal) > 0 && err != nil { + k.value = defaultVal[0].String() + return defaultVal[0] + } + return val +} + +// MustTimeFormat always parses with given format and returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustTimeFormat(format string, defaultVal ...time.Time) time.Time { + val, err := k.TimeFormat(format) + if len(defaultVal) > 0 && err != nil { + k.value = defaultVal[0].Format(format) + return defaultVal[0] + } + return val +} + +// MustTime always parses with RFC3339 format and returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustTime(defaultVal ...time.Time) time.Time { + return k.MustTimeFormat(time.RFC3339, defaultVal...) +} + +// In always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) In(defaultVal string, candidates []string) string { + val := k.String() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InFloat64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InFloat64(defaultVal float64, candidates []float64) float64 { + val := k.MustFloat64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InInt always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InInt(defaultVal int, candidates []int) int { + val := k.MustInt() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InInt64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InInt64(defaultVal int64, candidates []int64) int64 { + val := k.MustInt64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InUint always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InUint(defaultVal uint, candidates []uint) uint { + val := k.MustUint() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InUint64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InUint64(defaultVal uint64, candidates []uint64) uint64 { + val := k.MustUint64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InTimeFormat always parses with given format and returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InTimeFormat(format string, defaultVal time.Time, candidates []time.Time) time.Time { + val := k.MustTimeFormat(format) + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InTime always parses with RFC3339 format and returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InTime(defaultVal time.Time, candidates []time.Time) time.Time { + return k.InTimeFormat(time.RFC3339, defaultVal, candidates) +} + +// RangeFloat64 checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeFloat64(defaultVal, min, max float64) float64 { + val := k.MustFloat64() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeInt checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeInt(defaultVal, min, max int) int { + val := k.MustInt() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeInt64 checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeInt64(defaultVal, min, max int64) int64 { + val := k.MustInt64() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeTimeFormat checks if value with given format is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeTimeFormat(format string, defaultVal, min, max time.Time) time.Time { + val := k.MustTimeFormat(format) + if val.Unix() < min.Unix() || val.Unix() > max.Unix() { + return defaultVal + } + return val +} + +// RangeTime checks if value with RFC3339 format is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time { + return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max) +} + +// Strings returns list of string divided by given delimiter. +func (k *Key) Strings(delim string) []string { + str := k.String() + if len(str) == 0 { + return []string{} + } + + runes := []rune(str) + vals := make([]string, 0, 2) + var buf bytes.Buffer + escape := false + idx := 0 + for { + if escape { + escape = false + if runes[idx] != '\\' && !strings.HasPrefix(string(runes[idx:]), delim) { + buf.WriteRune('\\') + } + buf.WriteRune(runes[idx]) + } else { + if runes[idx] == '\\' { + escape = true + } else if strings.HasPrefix(string(runes[idx:]), delim) { + idx += len(delim) - 1 + vals = append(vals, strings.TrimSpace(buf.String())) + buf.Reset() + } else { + buf.WriteRune(runes[idx]) + } + } + idx += 1 + if idx == len(runes) { + break + } + } + + if buf.Len() > 0 { + vals = append(vals, strings.TrimSpace(buf.String())) + } + + return vals +} + +// StringsWithShadows returns list of string divided by given delimiter. +// Shadows will also be appended if any. +func (k *Key) StringsWithShadows(delim string) []string { + vals := k.ValueWithShadows() + results := make([]string, 0, len(vals)*2) + for i := range vals { + if len(vals) == 0 { + continue + } + + results = append(results, strings.Split(vals[i], delim)...) + } + + for i := range results { + results[i] = k.transformValue(strings.TrimSpace(results[i])) + } + return results +} + +// Float64s returns list of float64 divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Float64s(delim string) []float64 { + vals, _ := k.parseFloat64s(k.Strings(delim), true, false) + return vals +} + +// Ints returns list of int divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Ints(delim string) []int { + vals, _ := k.parseInts(k.Strings(delim), true, false) + return vals +} + +// Int64s returns list of int64 divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Int64s(delim string) []int64 { + vals, _ := k.parseInt64s(k.Strings(delim), true, false) + return vals +} + +// Uints returns list of uint divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Uints(delim string) []uint { + vals, _ := k.parseUints(k.Strings(delim), true, false) + return vals +} + +// Uint64s returns list of uint64 divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Uint64s(delim string) []uint64 { + vals, _ := k.parseUint64s(k.Strings(delim), true, false) + return vals +} + +// TimesFormat parses with given format and returns list of time.Time divided by given delimiter. +// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC). +func (k *Key) TimesFormat(format, delim string) []time.Time { + vals, _ := k.parseTimesFormat(format, k.Strings(delim), true, false) + return vals +} + +// Times parses with RFC3339 format and returns list of time.Time divided by given delimiter. +// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC). +func (k *Key) Times(delim string) []time.Time { + return k.TimesFormat(time.RFC3339, delim) +} + +// ValidFloat64s returns list of float64 divided by given delimiter. If some value is not float, then +// it will not be included to result list. +func (k *Key) ValidFloat64s(delim string) []float64 { + vals, _ := k.parseFloat64s(k.Strings(delim), false, false) + return vals +} + +// ValidInts returns list of int divided by given delimiter. If some value is not integer, then it will +// not be included to result list. +func (k *Key) ValidInts(delim string) []int { + vals, _ := k.parseInts(k.Strings(delim), false, false) + return vals +} + +// ValidInt64s returns list of int64 divided by given delimiter. If some value is not 64-bit integer, +// then it will not be included to result list. +func (k *Key) ValidInt64s(delim string) []int64 { + vals, _ := k.parseInt64s(k.Strings(delim), false, false) + return vals +} + +// ValidUints returns list of uint divided by given delimiter. If some value is not unsigned integer, +// then it will not be included to result list. +func (k *Key) ValidUints(delim string) []uint { + vals, _ := k.parseUints(k.Strings(delim), false, false) + return vals +} + +// ValidUint64s returns list of uint64 divided by given delimiter. If some value is not 64-bit unsigned +// integer, then it will not be included to result list. +func (k *Key) ValidUint64s(delim string) []uint64 { + vals, _ := k.parseUint64s(k.Strings(delim), false, false) + return vals +} + +// ValidTimesFormat parses with given format and returns list of time.Time divided by given delimiter. +func (k *Key) ValidTimesFormat(format, delim string) []time.Time { + vals, _ := k.parseTimesFormat(format, k.Strings(delim), false, false) + return vals +} + +// ValidTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter. +func (k *Key) ValidTimes(delim string) []time.Time { + return k.ValidTimesFormat(time.RFC3339, delim) +} + +// StrictFloat64s returns list of float64 divided by given delimiter or error on first invalid input. +func (k *Key) StrictFloat64s(delim string) ([]float64, error) { + return k.parseFloat64s(k.Strings(delim), false, true) +} + +// StrictInts returns list of int divided by given delimiter or error on first invalid input. +func (k *Key) StrictInts(delim string) ([]int, error) { + return k.parseInts(k.Strings(delim), false, true) +} + +// StrictInt64s returns list of int64 divided by given delimiter or error on first invalid input. +func (k *Key) StrictInt64s(delim string) ([]int64, error) { + return k.parseInt64s(k.Strings(delim), false, true) +} + +// StrictUints returns list of uint divided by given delimiter or error on first invalid input. +func (k *Key) StrictUints(delim string) ([]uint, error) { + return k.parseUints(k.Strings(delim), false, true) +} + +// StrictUint64s returns list of uint64 divided by given delimiter or error on first invalid input. +func (k *Key) StrictUint64s(delim string) ([]uint64, error) { + return k.parseUint64s(k.Strings(delim), false, true) +} + +// StrictTimesFormat parses with given format and returns list of time.Time divided by given delimiter +// or error on first invalid input. +func (k *Key) StrictTimesFormat(format, delim string) ([]time.Time, error) { + return k.parseTimesFormat(format, k.Strings(delim), false, true) +} + +// StrictTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter +// or error on first invalid input. +func (k *Key) StrictTimes(delim string) ([]time.Time, error) { + return k.StrictTimesFormat(time.RFC3339, delim) +} + +// parseFloat64s transforms strings to float64s. +func (k *Key) parseFloat64s(strs []string, addInvalid, returnOnInvalid bool) ([]float64, error) { + vals := make([]float64, 0, len(strs)) + for _, str := range strs { + val, err := strconv.ParseFloat(str, 64) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// parseInts transforms strings to ints. +func (k *Key) parseInts(strs []string, addInvalid, returnOnInvalid bool) ([]int, error) { + vals := make([]int, 0, len(strs)) + for _, str := range strs { + val, err := strconv.Atoi(str) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// parseInt64s transforms strings to int64s. +func (k *Key) parseInt64s(strs []string, addInvalid, returnOnInvalid bool) ([]int64, error) { + vals := make([]int64, 0, len(strs)) + for _, str := range strs { + val, err := strconv.ParseInt(str, 10, 64) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// parseUints transforms strings to uints. +func (k *Key) parseUints(strs []string, addInvalid, returnOnInvalid bool) ([]uint, error) { + vals := make([]uint, 0, len(strs)) + for _, str := range strs { + val, err := strconv.ParseUint(str, 10, 0) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, uint(val)) + } + } + return vals, nil +} + +// parseUint64s transforms strings to uint64s. +func (k *Key) parseUint64s(strs []string, addInvalid, returnOnInvalid bool) ([]uint64, error) { + vals := make([]uint64, 0, len(strs)) + for _, str := range strs { + val, err := strconv.ParseUint(str, 10, 64) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// parseTimesFormat transforms strings to times in given format. +func (k *Key) parseTimesFormat(format string, strs []string, addInvalid, returnOnInvalid bool) ([]time.Time, error) { + vals := make([]time.Time, 0, len(strs)) + for _, str := range strs { + val, err := time.Parse(format, str) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// SetValue changes key value. +func (k *Key) SetValue(v string) { + if k.s.f.BlockMode { + k.s.f.lock.Lock() + defer k.s.f.lock.Unlock() + } + + k.value = v + k.s.keysHash[k.name] = v +} diff --git a/vendor/github.com/go-ini/ini/parser.go b/vendor/github.com/go-ini/ini/parser.go new file mode 100644 index 000000000..3daf54c38 --- /dev/null +++ b/vendor/github.com/go-ini/ini/parser.go @@ -0,0 +1,494 @@ +// Copyright 2015 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bufio" + "bytes" + "fmt" + "io" + "regexp" + "strconv" + "strings" + "unicode" +) + +var pythonMultiline = regexp.MustCompile("^(\\s+)([^\n]+)") + +type tokenType int + +const ( + _TOKEN_INVALID tokenType = iota + _TOKEN_COMMENT + _TOKEN_SECTION + _TOKEN_KEY +) + +type parser struct { + buf *bufio.Reader + isEOF bool + count int + comment *bytes.Buffer +} + +func newParser(r io.Reader) *parser { + return &parser{ + buf: bufio.NewReader(r), + count: 1, + comment: &bytes.Buffer{}, + } +} + +// BOM handles header of UTF-8, UTF-16 LE and UTF-16 BE's BOM format. +// http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding +func (p *parser) BOM() error { + mask, err := p.buf.Peek(2) + if err != nil && err != io.EOF { + return err + } else if len(mask) < 2 { + return nil + } + + switch { + case mask[0] == 254 && mask[1] == 255: + fallthrough + case mask[0] == 255 && mask[1] == 254: + p.buf.Read(mask) + case mask[0] == 239 && mask[1] == 187: + mask, err := p.buf.Peek(3) + if err != nil && err != io.EOF { + return err + } else if len(mask) < 3 { + return nil + } + if mask[2] == 191 { + p.buf.Read(mask) + } + } + return nil +} + +func (p *parser) readUntil(delim byte) ([]byte, error) { + data, err := p.buf.ReadBytes(delim) + if err != nil { + if err == io.EOF { + p.isEOF = true + } else { + return nil, err + } + } + return data, nil +} + +func cleanComment(in []byte) ([]byte, bool) { + i := bytes.IndexAny(in, "#;") + if i == -1 { + return nil, false + } + return in[i:], true +} + +func readKeyName(in []byte) (string, int, error) { + line := string(in) + + // Check if key name surrounded by quotes. + var keyQuote string + if line[0] == '"' { + if len(line) > 6 && string(line[0:3]) == `"""` { + keyQuote = `"""` + } else { + keyQuote = `"` + } + } else if line[0] == '`' { + keyQuote = "`" + } + + // Get out key name + endIdx := -1 + if len(keyQuote) > 0 { + startIdx := len(keyQuote) + // FIXME: fail case -> """"""name"""=value + pos := strings.Index(line[startIdx:], keyQuote) + if pos == -1 { + return "", -1, fmt.Errorf("missing closing key quote: %s", line) + } + pos += startIdx + + // Find key-value delimiter + i := strings.IndexAny(line[pos+startIdx:], "=:") + if i < 0 { + return "", -1, ErrDelimiterNotFound{line} + } + endIdx = pos + i + return strings.TrimSpace(line[startIdx:pos]), endIdx + startIdx + 1, nil + } + + endIdx = strings.IndexAny(line, "=:") + if endIdx < 0 { + return "", -1, ErrDelimiterNotFound{line} + } + return strings.TrimSpace(line[0:endIdx]), endIdx + 1, nil +} + +func (p *parser) readMultilines(line, val, valQuote string) (string, error) { + for { + data, err := p.readUntil('\n') + if err != nil { + return "", err + } + next := string(data) + + pos := strings.LastIndex(next, valQuote) + if pos > -1 { + val += next[:pos] + + comment, has := cleanComment([]byte(next[pos:])) + if has { + p.comment.Write(bytes.TrimSpace(comment)) + } + break + } + val += next + if p.isEOF { + return "", fmt.Errorf("missing closing key quote from '%s' to '%s'", line, next) + } + } + return val, nil +} + +func (p *parser) readContinuationLines(val string) (string, error) { + for { + data, err := p.readUntil('\n') + if err != nil { + return "", err + } + next := strings.TrimSpace(string(data)) + + if len(next) == 0 { + break + } + val += next + if val[len(val)-1] != '\\' { + break + } + val = val[:len(val)-1] + } + return val, nil +} + +// hasSurroundedQuote check if and only if the first and last characters +// are quotes \" or \'. +// It returns false if any other parts also contain same kind of quotes. +func hasSurroundedQuote(in string, quote byte) bool { + return len(in) >= 2 && in[0] == quote && in[len(in)-1] == quote && + strings.IndexByte(in[1:], quote) == len(in)-2 +} + +func (p *parser) readValue(in []byte, + parserBufferSize int, + ignoreContinuation, ignoreInlineComment, unescapeValueDoubleQuotes, unescapeValueCommentSymbols, allowPythonMultilines, spaceBeforeInlineComment bool) (string, error) { + + line := strings.TrimLeftFunc(string(in), unicode.IsSpace) + if len(line) == 0 { + return "", nil + } + + var valQuote string + if len(line) > 3 && string(line[0:3]) == `"""` { + valQuote = `"""` + } else if line[0] == '`' { + valQuote = "`" + } else if unescapeValueDoubleQuotes && line[0] == '"' { + valQuote = `"` + } + + if len(valQuote) > 0 { + startIdx := len(valQuote) + pos := strings.LastIndex(line[startIdx:], valQuote) + // Check for multi-line value + if pos == -1 { + return p.readMultilines(line, line[startIdx:], valQuote) + } + + if unescapeValueDoubleQuotes && valQuote == `"` { + return strings.Replace(line[startIdx:pos+startIdx], `\"`, `"`, -1), nil + } + return line[startIdx : pos+startIdx], nil + } + + lastChar := line[len(line)-1] + // Won't be able to reach here if value only contains whitespace + line = strings.TrimSpace(line) + trimmedLastChar := line[len(line)-1] + + // Check continuation lines when desired + if !ignoreContinuation && trimmedLastChar == '\\' { + return p.readContinuationLines(line[:len(line)-1]) + } + + // Check if ignore inline comment + if !ignoreInlineComment { + var i int + if spaceBeforeInlineComment { + i = strings.Index(line, " #") + if i == -1 { + i = strings.Index(line, " ;") + } + + } else { + i = strings.IndexAny(line, "#;") + } + + if i > -1 { + p.comment.WriteString(line[i:]) + line = strings.TrimSpace(line[:i]) + } + + } + + // Trim single and double quotes + if hasSurroundedQuote(line, '\'') || + hasSurroundedQuote(line, '"') { + line = line[1 : len(line)-1] + } else if len(valQuote) == 0 && unescapeValueCommentSymbols { + if strings.Contains(line, `\;`) { + line = strings.Replace(line, `\;`, ";", -1) + } + if strings.Contains(line, `\#`) { + line = strings.Replace(line, `\#`, "#", -1) + } + } else if allowPythonMultilines && lastChar == '\n' { + parserBufferPeekResult, _ := p.buf.Peek(parserBufferSize) + peekBuffer := bytes.NewBuffer(parserBufferPeekResult) + + identSize := -1 + val := line + + for { + peekData, peekErr := peekBuffer.ReadBytes('\n') + if peekErr != nil { + if peekErr == io.EOF { + return val, nil + } + return "", peekErr + } + + peekMatches := pythonMultiline.FindStringSubmatch(string(peekData)) + if len(peekMatches) != 3 { + return val, nil + } + + currentIdentSize := len(peekMatches[1]) + // NOTE: Return if not a python-ini multi-line value. + if currentIdentSize < 0 { + return val, nil + } + identSize = currentIdentSize + + // NOTE: Just advance the parser reader (buffer) in-sync with the peek buffer. + _, err := p.readUntil('\n') + if err != nil { + return "", err + } + + val += fmt.Sprintf("\n%s", peekMatches[2]) + } + + // NOTE: If it was a Python multi-line value, + // return the appended value. + if identSize > 0 { + return val, nil + } + } + + return line, nil +} + +// parse parses data through an io.Reader. +func (f *File) parse(reader io.Reader) (err error) { + p := newParser(reader) + if err = p.BOM(); err != nil { + return fmt.Errorf("BOM: %v", err) + } + + // Ignore error because default section name is never empty string. + name := DEFAULT_SECTION + if f.options.Insensitive { + name = strings.ToLower(DEFAULT_SECTION) + } + section, _ := f.NewSection(name) + + // This "last" is not strictly equivalent to "previous one" if current key is not the first nested key + var isLastValueEmpty bool + var lastRegularKey *Key + + var line []byte + var inUnparseableSection bool + + // NOTE: Iterate and increase `currentPeekSize` until + // the size of the parser buffer is found. + // TODO(unknwon): When Golang 1.10 is the lowest version supported, replace with `parserBufferSize := p.buf.Size()`. + parserBufferSize := 0 + // NOTE: Peek 1kb at a time. + currentPeekSize := 1024 + + if f.options.AllowPythonMultilineValues { + for { + peekBytes, _ := p.buf.Peek(currentPeekSize) + peekBytesLength := len(peekBytes) + + if parserBufferSize >= peekBytesLength { + break + } + + currentPeekSize *= 2 + parserBufferSize = peekBytesLength + } + } + + for !p.isEOF { + line, err = p.readUntil('\n') + if err != nil { + return err + } + + if f.options.AllowNestedValues && + isLastValueEmpty && len(line) > 0 { + if line[0] == ' ' || line[0] == '\t' { + lastRegularKey.addNestedValue(string(bytes.TrimSpace(line))) + continue + } + } + + line = bytes.TrimLeftFunc(line, unicode.IsSpace) + if len(line) == 0 { + continue + } + + // Comments + if line[0] == '#' || line[0] == ';' { + // Note: we do not care ending line break, + // it is needed for adding second line, + // so just clean it once at the end when set to value. + p.comment.Write(line) + continue + } + + // Section + if line[0] == '[' { + // Read to the next ']' (TODO: support quoted strings) + closeIdx := bytes.LastIndexByte(line, ']') + if closeIdx == -1 { + return fmt.Errorf("unclosed section: %s", line) + } + + name := string(line[1:closeIdx]) + section, err = f.NewSection(name) + if err != nil { + return err + } + + comment, has := cleanComment(line[closeIdx+1:]) + if has { + p.comment.Write(comment) + } + + section.Comment = strings.TrimSpace(p.comment.String()) + + // Reset aotu-counter and comments + p.comment.Reset() + p.count = 1 + + inUnparseableSection = false + for i := range f.options.UnparseableSections { + if f.options.UnparseableSections[i] == name || + (f.options.Insensitive && strings.ToLower(f.options.UnparseableSections[i]) == strings.ToLower(name)) { + inUnparseableSection = true + continue + } + } + continue + } + + if inUnparseableSection { + section.isRawSection = true + section.rawBody += string(line) + continue + } + + kname, offset, err := readKeyName(line) + if err != nil { + // Treat as boolean key when desired, and whole line is key name. + if IsErrDelimiterNotFound(err) { + switch { + case f.options.AllowBooleanKeys: + kname, err := p.readValue(line, + parserBufferSize, + f.options.IgnoreContinuation, + f.options.IgnoreInlineComment, + f.options.UnescapeValueDoubleQuotes, + f.options.UnescapeValueCommentSymbols, + f.options.AllowPythonMultilineValues, + f.options.SpaceBeforeInlineComment) + if err != nil { + return err + } + key, err := section.NewBooleanKey(kname) + if err != nil { + return err + } + key.Comment = strings.TrimSpace(p.comment.String()) + p.comment.Reset() + continue + + case f.options.SkipUnrecognizableLines: + continue + } + } + return err + } + + // Auto increment. + isAutoIncr := false + if kname == "-" { + isAutoIncr = true + kname = "#" + strconv.Itoa(p.count) + p.count++ + } + + value, err := p.readValue(line[offset:], + parserBufferSize, + f.options.IgnoreContinuation, + f.options.IgnoreInlineComment, + f.options.UnescapeValueDoubleQuotes, + f.options.UnescapeValueCommentSymbols, + f.options.AllowPythonMultilineValues, + f.options.SpaceBeforeInlineComment) + if err != nil { + return err + } + isLastValueEmpty = len(value) == 0 + + key, err := section.NewKey(kname, value) + if err != nil { + return err + } + key.isAutoIncrement = isAutoIncr + key.Comment = strings.TrimSpace(p.comment.String()) + p.comment.Reset() + lastRegularKey = key + } + return nil +} diff --git a/vendor/github.com/go-ini/ini/section.go b/vendor/github.com/go-ini/ini/section.go new file mode 100644 index 000000000..340a1efad --- /dev/null +++ b/vendor/github.com/go-ini/ini/section.go @@ -0,0 +1,258 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "errors" + "fmt" + "strings" +) + +// Section represents a config section. +type Section struct { + f *File + Comment string + name string + keys map[string]*Key + keyList []string + keysHash map[string]string + + isRawSection bool + rawBody string +} + +func newSection(f *File, name string) *Section { + return &Section{ + f: f, + name: name, + keys: make(map[string]*Key), + keyList: make([]string, 0, 10), + keysHash: make(map[string]string), + } +} + +// Name returns name of Section. +func (s *Section) Name() string { + return s.name +} + +// Body returns rawBody of Section if the section was marked as unparseable. +// It still follows the other rules of the INI format surrounding leading/trailing whitespace. +func (s *Section) Body() string { + return strings.TrimSpace(s.rawBody) +} + +// SetBody updates body content only if section is raw. +func (s *Section) SetBody(body string) { + if !s.isRawSection { + return + } + s.rawBody = body +} + +// NewKey creates a new key to given section. +func (s *Section) NewKey(name, val string) (*Key, error) { + if len(name) == 0 { + return nil, errors.New("error creating new key: empty key name") + } else if s.f.options.Insensitive { + name = strings.ToLower(name) + } + + if s.f.BlockMode { + s.f.lock.Lock() + defer s.f.lock.Unlock() + } + + if inSlice(name, s.keyList) { + if s.f.options.AllowShadows { + if err := s.keys[name].addShadow(val); err != nil { + return nil, err + } + } else { + s.keys[name].value = val + s.keysHash[name] = val + } + return s.keys[name], nil + } + + s.keyList = append(s.keyList, name) + s.keys[name] = newKey(s, name, val) + s.keysHash[name] = val + return s.keys[name], nil +} + +// NewBooleanKey creates a new boolean type key to given section. +func (s *Section) NewBooleanKey(name string) (*Key, error) { + key, err := s.NewKey(name, "true") + if err != nil { + return nil, err + } + + key.isBooleanType = true + return key, nil +} + +// GetKey returns key in section by given name. +func (s *Section) GetKey(name string) (*Key, error) { + // FIXME: change to section level lock? + if s.f.BlockMode { + s.f.lock.RLock() + } + if s.f.options.Insensitive { + name = strings.ToLower(name) + } + key := s.keys[name] + if s.f.BlockMode { + s.f.lock.RUnlock() + } + + if key == nil { + // Check if it is a child-section. + sname := s.name + for { + if i := strings.LastIndex(sname, "."); i > -1 { + sname = sname[:i] + sec, err := s.f.GetSection(sname) + if err != nil { + continue + } + return sec.GetKey(name) + } else { + break + } + } + return nil, fmt.Errorf("error when getting key of section '%s': key '%s' not exists", s.name, name) + } + return key, nil +} + +// HasKey returns true if section contains a key with given name. +func (s *Section) HasKey(name string) bool { + key, _ := s.GetKey(name) + return key != nil +} + +// Haskey is a backwards-compatible name for HasKey. +// TODO: delete me in v2 +func (s *Section) Haskey(name string) bool { + return s.HasKey(name) +} + +// HasValue returns true if section contains given raw value. +func (s *Section) HasValue(value string) bool { + if s.f.BlockMode { + s.f.lock.RLock() + defer s.f.lock.RUnlock() + } + + for _, k := range s.keys { + if value == k.value { + return true + } + } + return false +} + +// Key assumes named Key exists in section and returns a zero-value when not. +func (s *Section) Key(name string) *Key { + key, err := s.GetKey(name) + if err != nil { + // It's OK here because the only possible error is empty key name, + // but if it's empty, this piece of code won't be executed. + key, _ = s.NewKey(name, "") + return key + } + return key +} + +// Keys returns list of keys of section. +func (s *Section) Keys() []*Key { + keys := make([]*Key, len(s.keyList)) + for i := range s.keyList { + keys[i] = s.Key(s.keyList[i]) + } + return keys +} + +// ParentKeys returns list of keys of parent section. +func (s *Section) ParentKeys() []*Key { + var parentKeys []*Key + sname := s.name + for { + if i := strings.LastIndex(sname, "."); i > -1 { + sname = sname[:i] + sec, err := s.f.GetSection(sname) + if err != nil { + continue + } + parentKeys = append(parentKeys, sec.Keys()...) + } else { + break + } + + } + return parentKeys +} + +// KeyStrings returns list of key names of section. +func (s *Section) KeyStrings() []string { + list := make([]string, len(s.keyList)) + copy(list, s.keyList) + return list +} + +// KeysHash returns keys hash consisting of names and values. +func (s *Section) KeysHash() map[string]string { + if s.f.BlockMode { + s.f.lock.RLock() + defer s.f.lock.RUnlock() + } + + hash := map[string]string{} + for key, value := range s.keysHash { + hash[key] = value + } + return hash +} + +// DeleteKey deletes a key from section. +func (s *Section) DeleteKey(name string) { + if s.f.BlockMode { + s.f.lock.Lock() + defer s.f.lock.Unlock() + } + + for i, k := range s.keyList { + if k == name { + s.keyList = append(s.keyList[:i], s.keyList[i+1:]...) + delete(s.keys, name) + return + } + } +} + +// ChildSections returns a list of child sections of current section. +// For example, "[parent.child1]" and "[parent.child12]" are child sections +// of section "[parent]". +func (s *Section) ChildSections() []*Section { + prefix := s.name + "." + children := make([]*Section, 0, 3) + for _, name := range s.f.sectionList { + if strings.HasPrefix(name, prefix) { + children = append(children, s.f.sections[name]) + } + } + return children +} diff --git a/vendor/github.com/go-ini/ini/struct.go b/vendor/github.com/go-ini/ini/struct.go new file mode 100644 index 000000000..9719dc698 --- /dev/null +++ b/vendor/github.com/go-ini/ini/struct.go @@ -0,0 +1,512 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bytes" + "errors" + "fmt" + "reflect" + "strings" + "time" + "unicode" +) + +// NameMapper represents a ini tag name mapper. +type NameMapper func(string) string + +// Built-in name getters. +var ( + // AllCapsUnderscore converts to format ALL_CAPS_UNDERSCORE. + AllCapsUnderscore NameMapper = func(raw string) string { + newstr := make([]rune, 0, len(raw)) + for i, chr := range raw { + if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { + if i > 0 { + newstr = append(newstr, '_') + } + } + newstr = append(newstr, unicode.ToUpper(chr)) + } + return string(newstr) + } + // TitleUnderscore converts to format title_underscore. + TitleUnderscore NameMapper = func(raw string) string { + newstr := make([]rune, 0, len(raw)) + for i, chr := range raw { + if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { + if i > 0 { + newstr = append(newstr, '_') + } + chr -= ('A' - 'a') + } + newstr = append(newstr, chr) + } + return string(newstr) + } +) + +func (s *Section) parseFieldName(raw, actual string) string { + if len(actual) > 0 { + return actual + } + if s.f.NameMapper != nil { + return s.f.NameMapper(raw) + } + return raw +} + +func parseDelim(actual string) string { + if len(actual) > 0 { + return actual + } + return "," +} + +var reflectTime = reflect.TypeOf(time.Now()).Kind() + +// setSliceWithProperType sets proper values to slice based on its type. +func setSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error { + var strs []string + if allowShadow { + strs = key.StringsWithShadows(delim) + } else { + strs = key.Strings(delim) + } + + numVals := len(strs) + if numVals == 0 { + return nil + } + + var vals interface{} + var err error + + sliceOf := field.Type().Elem().Kind() + switch sliceOf { + case reflect.String: + vals = strs + case reflect.Int: + vals, err = key.parseInts(strs, true, false) + case reflect.Int64: + vals, err = key.parseInt64s(strs, true, false) + case reflect.Uint: + vals, err = key.parseUints(strs, true, false) + case reflect.Uint64: + vals, err = key.parseUint64s(strs, true, false) + case reflect.Float64: + vals, err = key.parseFloat64s(strs, true, false) + case reflectTime: + vals, err = key.parseTimesFormat(time.RFC3339, strs, true, false) + default: + return fmt.Errorf("unsupported type '[]%s'", sliceOf) + } + if err != nil && isStrict { + return err + } + + slice := reflect.MakeSlice(field.Type(), numVals, numVals) + for i := 0; i < numVals; i++ { + switch sliceOf { + case reflect.String: + slice.Index(i).Set(reflect.ValueOf(vals.([]string)[i])) + case reflect.Int: + slice.Index(i).Set(reflect.ValueOf(vals.([]int)[i])) + case reflect.Int64: + slice.Index(i).Set(reflect.ValueOf(vals.([]int64)[i])) + case reflect.Uint: + slice.Index(i).Set(reflect.ValueOf(vals.([]uint)[i])) + case reflect.Uint64: + slice.Index(i).Set(reflect.ValueOf(vals.([]uint64)[i])) + case reflect.Float64: + slice.Index(i).Set(reflect.ValueOf(vals.([]float64)[i])) + case reflectTime: + slice.Index(i).Set(reflect.ValueOf(vals.([]time.Time)[i])) + } + } + field.Set(slice) + return nil +} + +func wrapStrictError(err error, isStrict bool) error { + if isStrict { + return err + } + return nil +} + +// setWithProperType sets proper value to field based on its type, +// but it does not return error for failing parsing, +// because we want to use default value that is already assigned to strcut. +func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error { + switch t.Kind() { + case reflect.String: + if len(key.String()) == 0 { + return nil + } + field.SetString(key.String()) + case reflect.Bool: + boolVal, err := key.Bool() + if err != nil { + return wrapStrictError(err, isStrict) + } + field.SetBool(boolVal) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + durationVal, err := key.Duration() + // Skip zero value + if err == nil && int64(durationVal) > 0 { + field.Set(reflect.ValueOf(durationVal)) + return nil + } + + intVal, err := key.Int64() + if err != nil { + return wrapStrictError(err, isStrict) + } + field.SetInt(intVal) + // byte is an alias for uint8, so supporting uint8 breaks support for byte + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: + durationVal, err := key.Duration() + // Skip zero value + if err == nil && int(durationVal) > 0 { + field.Set(reflect.ValueOf(durationVal)) + return nil + } + + uintVal, err := key.Uint64() + if err != nil { + return wrapStrictError(err, isStrict) + } + field.SetUint(uintVal) + + case reflect.Float32, reflect.Float64: + floatVal, err := key.Float64() + if err != nil { + return wrapStrictError(err, isStrict) + } + field.SetFloat(floatVal) + case reflectTime: + timeVal, err := key.Time() + if err != nil { + return wrapStrictError(err, isStrict) + } + field.Set(reflect.ValueOf(timeVal)) + case reflect.Slice: + return setSliceWithProperType(key, field, delim, allowShadow, isStrict) + default: + return fmt.Errorf("unsupported type '%s'", t) + } + return nil +} + +func parseTagOptions(tag string) (rawName string, omitEmpty bool, allowShadow bool) { + opts := strings.SplitN(tag, ",", 3) + rawName = opts[0] + if len(opts) > 1 { + omitEmpty = opts[1] == "omitempty" + } + if len(opts) > 2 { + allowShadow = opts[2] == "allowshadow" + } + return rawName, omitEmpty, allowShadow +} + +func (s *Section) mapTo(val reflect.Value, isStrict bool) error { + if val.Kind() == reflect.Ptr { + val = val.Elem() + } + typ := val.Type() + + for i := 0; i < typ.NumField(); i++ { + field := val.Field(i) + tpField := typ.Field(i) + + tag := tpField.Tag.Get("ini") + if tag == "-" { + continue + } + + rawName, _, allowShadow := parseTagOptions(tag) + fieldName := s.parseFieldName(tpField.Name, rawName) + if len(fieldName) == 0 || !field.CanSet() { + continue + } + + isAnonymous := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous + isStruct := tpField.Type.Kind() == reflect.Struct + if isAnonymous { + field.Set(reflect.New(tpField.Type.Elem())) + } + + if isAnonymous || isStruct { + if sec, err := s.f.GetSection(fieldName); err == nil { + if err = sec.mapTo(field, isStrict); err != nil { + return fmt.Errorf("error mapping field(%s): %v", fieldName, err) + } + continue + } + } + + if key, err := s.GetKey(fieldName); err == nil { + delim := parseDelim(tpField.Tag.Get("delim")) + if err = setWithProperType(tpField.Type, key, field, delim, allowShadow, isStrict); err != nil { + return fmt.Errorf("error mapping field(%s): %v", fieldName, err) + } + } + } + return nil +} + +// MapTo maps section to given struct. +func (s *Section) MapTo(v interface{}) error { + typ := reflect.TypeOf(v) + val := reflect.ValueOf(v) + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + val = val.Elem() + } else { + return errors.New("cannot map to non-pointer struct") + } + + return s.mapTo(val, false) +} + +// MapTo maps section to given struct in strict mode, +// which returns all possible error including value parsing error. +func (s *Section) StrictMapTo(v interface{}) error { + typ := reflect.TypeOf(v) + val := reflect.ValueOf(v) + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + val = val.Elem() + } else { + return errors.New("cannot map to non-pointer struct") + } + + return s.mapTo(val, true) +} + +// MapTo maps file to given struct. +func (f *File) MapTo(v interface{}) error { + return f.Section("").MapTo(v) +} + +// MapTo maps file to given struct in strict mode, +// which returns all possible error including value parsing error. +func (f *File) StrictMapTo(v interface{}) error { + return f.Section("").StrictMapTo(v) +} + +// MapTo maps data sources to given struct with name mapper. +func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error { + cfg, err := Load(source, others...) + if err != nil { + return err + } + cfg.NameMapper = mapper + return cfg.MapTo(v) +} + +// StrictMapToWithMapper maps data sources to given struct with name mapper in strict mode, +// which returns all possible error including value parsing error. +func StrictMapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error { + cfg, err := Load(source, others...) + if err != nil { + return err + } + cfg.NameMapper = mapper + return cfg.StrictMapTo(v) +} + +// MapTo maps data sources to given struct. +func MapTo(v, source interface{}, others ...interface{}) error { + return MapToWithMapper(v, nil, source, others...) +} + +// StrictMapTo maps data sources to given struct in strict mode, +// which returns all possible error including value parsing error. +func StrictMapTo(v, source interface{}, others ...interface{}) error { + return StrictMapToWithMapper(v, nil, source, others...) +} + +// reflectSliceWithProperType does the opposite thing as setSliceWithProperType. +func reflectSliceWithProperType(key *Key, field reflect.Value, delim string) error { + slice := field.Slice(0, field.Len()) + if field.Len() == 0 { + return nil + } + + var buf bytes.Buffer + sliceOf := field.Type().Elem().Kind() + for i := 0; i < field.Len(); i++ { + switch sliceOf { + case reflect.String: + buf.WriteString(slice.Index(i).String()) + case reflect.Int, reflect.Int64: + buf.WriteString(fmt.Sprint(slice.Index(i).Int())) + case reflect.Uint, reflect.Uint64: + buf.WriteString(fmt.Sprint(slice.Index(i).Uint())) + case reflect.Float64: + buf.WriteString(fmt.Sprint(slice.Index(i).Float())) + case reflectTime: + buf.WriteString(slice.Index(i).Interface().(time.Time).Format(time.RFC3339)) + default: + return fmt.Errorf("unsupported type '[]%s'", sliceOf) + } + buf.WriteString(delim) + } + key.SetValue(buf.String()[:buf.Len()-1]) + return nil +} + +// reflectWithProperType does the opposite thing as setWithProperType. +func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error { + switch t.Kind() { + case reflect.String: + key.SetValue(field.String()) + case reflect.Bool: + key.SetValue(fmt.Sprint(field.Bool())) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + key.SetValue(fmt.Sprint(field.Int())) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + key.SetValue(fmt.Sprint(field.Uint())) + case reflect.Float32, reflect.Float64: + key.SetValue(fmt.Sprint(field.Float())) + case reflectTime: + key.SetValue(fmt.Sprint(field.Interface().(time.Time).Format(time.RFC3339))) + case reflect.Slice: + return reflectSliceWithProperType(key, field, delim) + default: + return fmt.Errorf("unsupported type '%s'", t) + } + return nil +} + +// CR: copied from encoding/json/encode.go with modifications of time.Time support. +// TODO: add more test coverage. +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflectTime: + t, ok := v.Interface().(time.Time) + return ok && t.IsZero() + } + return false +} + +func (s *Section) reflectFrom(val reflect.Value) error { + if val.Kind() == reflect.Ptr { + val = val.Elem() + } + typ := val.Type() + + for i := 0; i < typ.NumField(); i++ { + field := val.Field(i) + tpField := typ.Field(i) + + tag := tpField.Tag.Get("ini") + if tag == "-" { + continue + } + + opts := strings.SplitN(tag, ",", 2) + if len(opts) == 2 && opts[1] == "omitempty" && isEmptyValue(field) { + continue + } + + fieldName := s.parseFieldName(tpField.Name, opts[0]) + if len(fieldName) == 0 || !field.CanSet() { + continue + } + + if (tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous) || + (tpField.Type.Kind() == reflect.Struct && tpField.Type.Name() != "Time") { + // Note: The only error here is section doesn't exist. + sec, err := s.f.GetSection(fieldName) + if err != nil { + // Note: fieldName can never be empty here, ignore error. + sec, _ = s.f.NewSection(fieldName) + } + + // Add comment from comment tag + if len(sec.Comment) == 0 { + sec.Comment = tpField.Tag.Get("comment") + } + + if err = sec.reflectFrom(field); err != nil { + return fmt.Errorf("error reflecting field (%s): %v", fieldName, err) + } + continue + } + + // Note: Same reason as secion. + key, err := s.GetKey(fieldName) + if err != nil { + key, _ = s.NewKey(fieldName, "") + } + + // Add comment from comment tag + if len(key.Comment) == 0 { + key.Comment = tpField.Tag.Get("comment") + } + + if err = reflectWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil { + return fmt.Errorf("error reflecting field (%s): %v", fieldName, err) + } + + } + return nil +} + +// ReflectFrom reflects secion from given struct. +func (s *Section) ReflectFrom(v interface{}) error { + typ := reflect.TypeOf(v) + val := reflect.ValueOf(v) + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + val = val.Elem() + } else { + return errors.New("cannot reflect from non-pointer struct") + } + + return s.reflectFrom(val) +} + +// ReflectFrom reflects file from given struct. +func (f *File) ReflectFrom(v interface{}) error { + return f.Section("").ReflectFrom(v) +} + +// ReflectFrom reflects data sources from given struct with name mapper. +func ReflectFromWithMapper(cfg *File, v interface{}, mapper NameMapper) error { + cfg.NameMapper = mapper + return cfg.ReflectFrom(v) +} + +// ReflectFrom reflects data sources from given struct. +func ReflectFrom(cfg *File, v interface{}) error { + return ReflectFromWithMapper(cfg, v, nil) +} diff --git a/vendor/github.com/golang/protobuf/LICENSE b/vendor/github.com/golang/protobuf/LICENSE index 1b1b1921e..0f646931a 100644 --- a/vendor/github.com/golang/protobuf/LICENSE +++ b/vendor/github.com/golang/protobuf/LICENSE @@ -1,7 +1,4 @@ -Go support for Protocol Buffers - Google's data interchange format - Copyright 2010 The Go Authors. All rights reserved. -https://github.com/golang/protobuf Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are diff --git a/vendor/github.com/golang/protobuf/proto/clone.go b/vendor/github.com/golang/protobuf/proto/clone.go index e392575b3..3cd3249f7 100644 --- a/vendor/github.com/golang/protobuf/proto/clone.go +++ b/vendor/github.com/golang/protobuf/proto/clone.go @@ -35,22 +35,39 @@ package proto import ( + "fmt" "log" "reflect" "strings" ) // Clone returns a deep copy of a protocol buffer. -func Clone(pb Message) Message { - in := reflect.ValueOf(pb) +func Clone(src Message) Message { + in := reflect.ValueOf(src) if in.IsNil() { - return pb + return src } - out := reflect.New(in.Type().Elem()) - // out is empty so a merge is a deep copy. - mergeStruct(out.Elem(), in.Elem()) - return out.Interface().(Message) + dst := out.Interface().(Message) + Merge(dst, src) + return dst +} + +// Merger is the interface representing objects that can merge messages of the same type. +type Merger interface { + // Merge merges src into this message. + // Required and optional fields that are set in src will be set to that value in dst. + // Elements of repeated fields will be appended. + // + // Merge may panic if called with a different argument type than the receiver. + Merge(src Message) +} + +// generatedMerger is the custom merge method that generated protos will have. +// We must add this method since a generate Merge method will conflict with +// many existing protos that have a Merge data field already defined. +type generatedMerger interface { + XXX_Merge(src Message) } // Merge merges src into dst. @@ -58,17 +75,24 @@ func Clone(pb Message) Message { // Elements of repeated fields will be appended. // Merge panics if src and dst are not the same type, or if dst is nil. func Merge(dst, src Message) { + if m, ok := dst.(Merger); ok { + m.Merge(src) + return + } + in := reflect.ValueOf(src) out := reflect.ValueOf(dst) if out.IsNil() { panic("proto: nil destination") } if in.Type() != out.Type() { - // Explicit test prior to mergeStruct so that mistyped nils will fail - panic("proto: type mismatch") + panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src)) } if in.IsNil() { - // Merging nil into non-nil is a quiet no-op + return // Merge from nil src is a noop + } + if m, ok := dst.(generatedMerger); ok { + m.XXX_Merge(src) return } mergeStruct(out.Elem(), in.Elem()) @@ -84,7 +108,7 @@ func mergeStruct(out, in reflect.Value) { mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) } - if emIn, ok := extendable(in.Addr().Interface()); ok { + if emIn, err := extendable(in.Addr().Interface()); err == nil { emOut, _ := extendable(out.Addr().Interface()) mIn, muIn := emIn.extensionsRead() if mIn != nil { diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go index aa207298f..d9aa3c42d 100644 --- a/vendor/github.com/golang/protobuf/proto/decode.go +++ b/vendor/github.com/golang/protobuf/proto/decode.go @@ -39,8 +39,6 @@ import ( "errors" "fmt" "io" - "os" - "reflect" ) // errOverflow is returned when an integer is too large to be represented. @@ -50,10 +48,6 @@ var errOverflow = errors.New("proto: integer overflow") // wire type is encountered. It does not get returned to user code. var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") -// The fundamental decoders that interpret bytes on the wire. -// Those that take integer types all return uint64 and are -// therefore of type valueDecoder. - // DecodeVarint reads a varint-encoded integer from the slice. // It returns the integer and the number of bytes consumed, or // zero if there is not enough. @@ -267,9 +261,6 @@ func (p *Buffer) DecodeZigzag32() (x uint64, err error) { return } -// These are not ValueDecoders: they produce an array of bytes or a string. -// bytes, embedded messages - // DecodeRawBytes reads a count-delimited byte buffer from the Buffer. // This is the format used for the bytes protocol buffer // type and for embedded messages. @@ -311,81 +302,29 @@ func (p *Buffer) DecodeStringBytes() (s string, err error) { return string(buf), nil } -// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. -// If the protocol buffer has extensions, and the field matches, add it as an extension. -// Otherwise, if the XXX_unrecognized field exists, append the skipped data there. -func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error { - oi := o.index - - err := o.skip(t, tag, wire) - if err != nil { - return err - } - - if !unrecField.IsValid() { - return nil - } - - ptr := structPointer_Bytes(base, unrecField) - - // Add the skipped field to struct field - obuf := o.buf - - o.buf = *ptr - o.EncodeVarint(uint64(tag<<3 | wire)) - *ptr = append(o.buf, obuf[oi:o.index]...) - - o.buf = obuf - - return nil -} - -// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. -func (o *Buffer) skip(t reflect.Type, tag, wire int) error { - - var u uint64 - var err error - - switch wire { - case WireVarint: - _, err = o.DecodeVarint() - case WireFixed64: - _, err = o.DecodeFixed64() - case WireBytes: - _, err = o.DecodeRawBytes(false) - case WireFixed32: - _, err = o.DecodeFixed32() - case WireStartGroup: - for { - u, err = o.DecodeVarint() - if err != nil { - break - } - fwire := int(u & 0x7) - if fwire == WireEndGroup { - break - } - ftag := int(u >> 3) - err = o.skip(t, ftag, fwire) - if err != nil { - break - } - } - default: - err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t) - } - return err -} - // Unmarshaler is the interface representing objects that can -// unmarshal themselves. The method should reset the receiver before -// decoding starts. The argument points to data that may be +// unmarshal themselves. The argument points to data that may be // overwritten, so implementations should not keep references to the // buffer. +// Unmarshal implementations should not clear the receiver. +// Any unmarshaled data should be merged into the receiver. +// Callers of Unmarshal that do not want to retain existing data +// should Reset the receiver before calling Unmarshal. type Unmarshaler interface { Unmarshal([]byte) error } +// newUnmarshaler is the interface representing objects that can +// unmarshal themselves. The semantics are identical to Unmarshaler. +// +// This exists to support protoc-gen-go generated messages. +// The proto package will stop type-asserting to this interface in the future. +// +// DO NOT DEPEND ON THIS. +type newUnmarshaler interface { + XXX_Unmarshal([]byte) error +} + // Unmarshal parses the protocol buffer representation in buf and places the // decoded result in pb. If the struct underlying pb does not match // the data in buf, the results can be unpredictable. @@ -395,7 +334,13 @@ type Unmarshaler interface { // to preserve and append to existing data. func Unmarshal(buf []byte, pb Message) error { pb.Reset() - return UnmarshalMerge(buf, pb) + if u, ok := pb.(newUnmarshaler); ok { + return u.XXX_Unmarshal(buf) + } + if u, ok := pb.(Unmarshaler); ok { + return u.Unmarshal(buf) + } + return NewBuffer(buf).Unmarshal(pb) } // UnmarshalMerge parses the protocol buffer representation in buf and @@ -405,8 +350,16 @@ func Unmarshal(buf []byte, pb Message) error { // UnmarshalMerge merges into existing data in pb. // Most code should use Unmarshal instead. func UnmarshalMerge(buf []byte, pb Message) error { - // If the object can unmarshal itself, let it. + if u, ok := pb.(newUnmarshaler); ok { + return u.XXX_Unmarshal(buf) + } if u, ok := pb.(Unmarshaler); ok { + // NOTE: The history of proto have unfortunately been inconsistent + // whether Unmarshaler should or should not implicitly clear itself. + // Some implementations do, most do not. + // Thus, calling this here may or may not do what people want. + // + // See https://github.com/golang/protobuf/issues/424 return u.Unmarshal(buf) } return NewBuffer(buf).Unmarshal(pb) @@ -422,12 +375,17 @@ func (p *Buffer) DecodeMessage(pb Message) error { } // DecodeGroup reads a tag-delimited group from the Buffer. +// StartGroup tag is already consumed. This function consumes +// EndGroup tag. func (p *Buffer) DecodeGroup(pb Message) error { - typ, base, err := getbase(pb) - if err != nil { - return err + b := p.buf[p.index:] + x, y := findEndGroup(b) + if x < 0 { + return io.ErrUnexpectedEOF } - return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base) + err := Unmarshal(b[:x], pb) + p.index += y + return err } // Unmarshal parses the protocol buffer representation in the @@ -438,533 +396,33 @@ func (p *Buffer) DecodeGroup(pb Message) error { // Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. func (p *Buffer) Unmarshal(pb Message) error { // If the object can unmarshal itself, let it. + if u, ok := pb.(newUnmarshaler); ok { + err := u.XXX_Unmarshal(p.buf[p.index:]) + p.index = len(p.buf) + return err + } if u, ok := pb.(Unmarshaler); ok { + // NOTE: The history of proto have unfortunately been inconsistent + // whether Unmarshaler should or should not implicitly clear itself. + // Some implementations do, most do not. + // Thus, calling this here may or may not do what people want. + // + // See https://github.com/golang/protobuf/issues/424 err := u.Unmarshal(p.buf[p.index:]) p.index = len(p.buf) return err } - typ, base, err := getbase(pb) - if err != nil { - return err - } - - err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base) - - if collectStats { - stats.Decode++ - } - - return err -} - -// unmarshalType does the work of unmarshaling a structure. -func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error { - var state errorState - required, reqFields := prop.reqCount, uint64(0) - - var err error - for err == nil && o.index < len(o.buf) { - oi := o.index - var u uint64 - u, err = o.DecodeVarint() - if err != nil { - break - } - wire := int(u & 0x7) - if wire == WireEndGroup { - if is_group { - if required > 0 { - // Not enough information to determine the exact field. - // (See below.) - return &RequiredNotSetError{"{Unknown}"} - } - return nil // input is satisfied - } - return fmt.Errorf("proto: %s: wiretype end group for non-group", st) - } - tag := int(u >> 3) - if tag <= 0 { - return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire) - } - fieldnum, ok := prop.decoderTags.get(tag) - if !ok { - // Maybe it's an extension? - if prop.extendable { - if e, _ := extendable(structPointer_Interface(base, st)); isExtensionField(e, int32(tag)) { - if err = o.skip(st, tag, wire); err == nil { - extmap := e.extensionsWrite() - ext := extmap[int32(tag)] // may be missing - ext.enc = append(ext.enc, o.buf[oi:o.index]...) - extmap[int32(tag)] = ext - } - continue - } - } - // Maybe it's a oneof? - if prop.oneofUnmarshaler != nil { - m := structPointer_Interface(base, st).(Message) - // First return value indicates whether tag is a oneof field. - ok, err = prop.oneofUnmarshaler(m, tag, wire, o) - if err == ErrInternalBadWireType { - // Map the error to something more descriptive. - // Do the formatting here to save generated code space. - err = fmt.Errorf("bad wiretype for oneof field in %T", m) - } - if ok { - continue - } - } - err = o.skipAndSave(st, tag, wire, base, prop.unrecField) - continue - } - p := prop.Prop[fieldnum] - - if p.dec == nil { - fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name) - continue - } - dec := p.dec - if wire != WireStartGroup && wire != p.WireType { - if wire == WireBytes && p.packedDec != nil { - // a packable field - dec = p.packedDec - } else { - err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType) - continue - } - } - decErr := dec(o, p, base) - if decErr != nil && !state.shouldContinue(decErr, p) { - err = decErr - } - if err == nil && p.Required { - // Successfully decoded a required field. - if tag <= 64 { - // use bitmap for fields 1-64 to catch field reuse. - var mask uint64 = 1 << uint64(tag-1) - if reqFields&mask == 0 { - // new required field - reqFields |= mask - required-- - } - } else { - // This is imprecise. It can be fooled by a required field - // with a tag > 64 that is encoded twice; that's very rare. - // A fully correct implementation would require allocating - // a data structure, which we would like to avoid. - required-- - } - } - } - if err == nil { - if is_group { - return io.ErrUnexpectedEOF - } - if state.err != nil { - return state.err - } - if required > 0 { - // Not enough information to determine the exact field. If we use extra - // CPU, we could determine the field only if the missing required field - // has a tag <= 64 and we check reqFields. - return &RequiredNotSetError{"{Unknown}"} - } - } - return err -} - -// Individual type decoders -// For each, -// u is the decoded value, -// v is a pointer to the field (pointer) in the struct - -// Sizes of the pools to allocate inside the Buffer. -// The goal is modest amortization and allocation -// on at least 16-byte boundaries. -const ( - boolPoolSize = 16 - uint32PoolSize = 8 - uint64PoolSize = 4 -) - -// Decode a bool. -func (o *Buffer) dec_bool(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - if len(o.bools) == 0 { - o.bools = make([]bool, boolPoolSize) - } - o.bools[0] = u != 0 - *structPointer_Bool(base, p.field) = &o.bools[0] - o.bools = o.bools[1:] - return nil -} - -func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - *structPointer_BoolVal(base, p.field) = u != 0 - return nil -} - -// Decode an int32. -func (o *Buffer) dec_int32(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word32_Set(structPointer_Word32(base, p.field), o, uint32(u)) - return nil -} - -func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u)) - return nil -} - -// Decode an int64. -func (o *Buffer) dec_int64(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word64_Set(structPointer_Word64(base, p.field), o, u) - return nil -} - -func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word64Val_Set(structPointer_Word64Val(base, p.field), o, u) - return nil -} - -// Decode a string. -func (o *Buffer) dec_string(p *Properties, base structPointer) error { - s, err := o.DecodeStringBytes() - if err != nil { - return err - } - *structPointer_String(base, p.field) = &s - return nil -} - -func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error { - s, err := o.DecodeStringBytes() - if err != nil { - return err - } - *structPointer_StringVal(base, p.field) = s - return nil -} - -// Decode a slice of bytes ([]byte). -func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error { - b, err := o.DecodeRawBytes(true) - if err != nil { - return err - } - *structPointer_Bytes(base, p.field) = b - return nil -} - -// Decode a slice of bools ([]bool). -func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - v := structPointer_BoolSlice(base, p.field) - *v = append(*v, u != 0) - return nil -} - -// Decode a slice of bools ([]bool) in packed format. -func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error { - v := structPointer_BoolSlice(base, p.field) - - nn, err := o.DecodeVarint() - if err != nil { - return err - } - nb := int(nn) // number of bytes of encoded bools - fin := o.index + nb - if fin < o.index { - return errOverflow - } - - y := *v - for o.index < fin { - u, err := p.valDec(o) - if err != nil { - return err - } - y = append(y, u != 0) - } - - *v = y - return nil -} - -// Decode a slice of int32s ([]int32). -func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - structPointer_Word32Slice(base, p.field).Append(uint32(u)) - return nil -} - -// Decode a slice of int32s ([]int32) in packed format. -func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error { - v := structPointer_Word32Slice(base, p.field) - - nn, err := o.DecodeVarint() - if err != nil { - return err - } - nb := int(nn) // number of bytes of encoded int32s - - fin := o.index + nb - if fin < o.index { - return errOverflow - } - for o.index < fin { - u, err := p.valDec(o) - if err != nil { - return err - } - v.Append(uint32(u)) - } - return nil -} - -// Decode a slice of int64s ([]int64). -func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - - structPointer_Word64Slice(base, p.field).Append(u) - return nil -} - -// Decode a slice of int64s ([]int64) in packed format. -func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error { - v := structPointer_Word64Slice(base, p.field) - - nn, err := o.DecodeVarint() - if err != nil { - return err - } - nb := int(nn) // number of bytes of encoded int64s - - fin := o.index + nb - if fin < o.index { - return errOverflow - } - for o.index < fin { - u, err := p.valDec(o) - if err != nil { - return err - } - v.Append(u) - } - return nil -} - -// Decode a slice of strings ([]string). -func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error { - s, err := o.DecodeStringBytes() - if err != nil { - return err - } - v := structPointer_StringSlice(base, p.field) - *v = append(*v, s) - return nil -} - -// Decode a slice of slice of bytes ([][]byte). -func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error { - b, err := o.DecodeRawBytes(true) - if err != nil { - return err - } - v := structPointer_BytesSlice(base, p.field) - *v = append(*v, b) - return nil -} - -// Decode a map field. -func (o *Buffer) dec_new_map(p *Properties, base structPointer) error { - raw, err := o.DecodeRawBytes(false) - if err != nil { - return err - } - oi := o.index // index at the end of this map entry - o.index -= len(raw) // move buffer back to start of map entry - - mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V - if mptr.Elem().IsNil() { - mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem())) - } - v := mptr.Elem() // map[K]V - - // Prepare addressable doubly-indirect placeholders for the key and value types. - // See enc_new_map for why. - keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K - keybase := toStructPointer(keyptr.Addr()) // **K - - var valbase structPointer - var valptr reflect.Value - switch p.mtype.Elem().Kind() { - case reflect.Slice: - // []byte - var dummy []byte - valptr = reflect.ValueOf(&dummy) // *[]byte - valbase = toStructPointer(valptr) // *[]byte - case reflect.Ptr: - // message; valptr is **Msg; need to allocate the intermediate pointer - valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V - valptr.Set(reflect.New(valptr.Type().Elem())) - valbase = toStructPointer(valptr) - default: - // everything else - valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V - valbase = toStructPointer(valptr.Addr()) // **V - } - - // Decode. - // This parses a restricted wire format, namely the encoding of a message - // with two fields. See enc_new_map for the format. - for o.index < oi { - // tagcode for key and value properties are always a single byte - // because they have tags 1 and 2. - tagcode := o.buf[o.index] - o.index++ - switch tagcode { - case p.mkeyprop.tagcode[0]: - if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil { - return err - } - case p.mvalprop.tagcode[0]: - if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil { - return err - } - default: - // TODO: Should we silently skip this instead? - return fmt.Errorf("proto: bad map data tag %d", raw[0]) - } - } - keyelem, valelem := keyptr.Elem(), valptr.Elem() - if !keyelem.IsValid() { - keyelem = reflect.Zero(p.mtype.Key()) - } - if !valelem.IsValid() { - valelem = reflect.Zero(p.mtype.Elem()) - } - - v.SetMapIndex(keyelem, valelem) - return nil -} - -// Decode a group. -func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error { - bas := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(bas) { - // allocate new nested message - bas = toStructPointer(reflect.New(p.stype)) - structPointer_SetStructPointer(base, p.field, bas) - } - return o.unmarshalType(p.stype, p.sprop, true, bas) -} - -// Decode an embedded message. -func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) { - raw, e := o.DecodeRawBytes(false) - if e != nil { - return e - } - - bas := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(bas) { - // allocate new nested message - bas = toStructPointer(reflect.New(p.stype)) - structPointer_SetStructPointer(base, p.field, bas) - } - - // If the object can unmarshal itself, let it. - if p.isUnmarshaler { - iv := structPointer_Interface(bas, p.stype) - return iv.(Unmarshaler).Unmarshal(raw) - } - - obuf := o.buf - oi := o.index - o.buf = raw - o.index = 0 - - err = o.unmarshalType(p.stype, p.sprop, false, bas) - o.buf = obuf - o.index = oi - - return err -} - -// Decode a slice of embedded messages. -func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error { - return o.dec_slice_struct(p, false, base) -} - -// Decode a slice of embedded groups. -func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error { - return o.dec_slice_struct(p, true, base) -} - -// Decode a slice of structs ([]*struct). -func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error { - v := reflect.New(p.stype) - bas := toStructPointer(v) - structPointer_StructPointerSlice(base, p.field).Append(bas) - - if is_group { - err := o.unmarshalType(p.stype, p.sprop, is_group, bas) - return err - } - - raw, err := o.DecodeRawBytes(false) - if err != nil { - return err - } - - // If the object can unmarshal itself, let it. - if p.isUnmarshaler { - iv := v.Interface() - return iv.(Unmarshaler).Unmarshal(raw) - } - - obuf := o.buf - oi := o.index - o.buf = raw - o.index = 0 - - err = o.unmarshalType(p.stype, p.sprop, is_group, bas) - - o.buf = obuf - o.index = oi - + // Slow workaround for messages that aren't Unmarshalers. + // This includes some hand-coded .pb.go files and + // bootstrap protos. + // TODO: fix all of those and then add Unmarshal to + // the Message interface. Then: + // The cast above and code below can be deleted. + // The old unmarshaler can be deleted. + // Clients can call Unmarshal directly (can already do that, actually). + var info InternalMessageInfo + err := info.Unmarshal(pb, p.buf[p.index:]) + p.index = len(p.buf) return err } diff --git a/vendor/github.com/golang/protobuf/proto/discard.go b/vendor/github.com/golang/protobuf/proto/discard.go new file mode 100644 index 000000000..dea2617ce --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/discard.go @@ -0,0 +1,350 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2017 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "reflect" + "strings" + "sync" + "sync/atomic" +) + +type generatedDiscarder interface { + XXX_DiscardUnknown() +} + +// DiscardUnknown recursively discards all unknown fields from this message +// and all embedded messages. +// +// When unmarshaling a message with unrecognized fields, the tags and values +// of such fields are preserved in the Message. This allows a later call to +// marshal to be able to produce a message that continues to have those +// unrecognized fields. To avoid this, DiscardUnknown is used to +// explicitly clear the unknown fields after unmarshaling. +// +// For proto2 messages, the unknown fields of message extensions are only +// discarded from messages that have been accessed via GetExtension. +func DiscardUnknown(m Message) { + if m, ok := m.(generatedDiscarder); ok { + m.XXX_DiscardUnknown() + return + } + // TODO: Dynamically populate a InternalMessageInfo for legacy messages, + // but the master branch has no implementation for InternalMessageInfo, + // so it would be more work to replicate that approach. + discardLegacy(m) +} + +// DiscardUnknown recursively discards all unknown fields. +func (a *InternalMessageInfo) DiscardUnknown(m Message) { + di := atomicLoadDiscardInfo(&a.discard) + if di == nil { + di = getDiscardInfo(reflect.TypeOf(m).Elem()) + atomicStoreDiscardInfo(&a.discard, di) + } + di.discard(toPointer(&m)) +} + +type discardInfo struct { + typ reflect.Type + + initialized int32 // 0: only typ is valid, 1: everything is valid + lock sync.Mutex + + fields []discardFieldInfo + unrecognized field +} + +type discardFieldInfo struct { + field field // Offset of field, guaranteed to be valid + discard func(src pointer) +} + +var ( + discardInfoMap = map[reflect.Type]*discardInfo{} + discardInfoLock sync.Mutex +) + +func getDiscardInfo(t reflect.Type) *discardInfo { + discardInfoLock.Lock() + defer discardInfoLock.Unlock() + di := discardInfoMap[t] + if di == nil { + di = &discardInfo{typ: t} + discardInfoMap[t] = di + } + return di +} + +func (di *discardInfo) discard(src pointer) { + if src.isNil() { + return // Nothing to do. + } + + if atomic.LoadInt32(&di.initialized) == 0 { + di.computeDiscardInfo() + } + + for _, fi := range di.fields { + sfp := src.offset(fi.field) + fi.discard(sfp) + } + + // For proto2 messages, only discard unknown fields in message extensions + // that have been accessed via GetExtension. + if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil { + // Ignore lock since DiscardUnknown is not concurrency safe. + emm, _ := em.extensionsRead() + for _, mx := range emm { + if m, ok := mx.value.(Message); ok { + DiscardUnknown(m) + } + } + } + + if di.unrecognized.IsValid() { + *src.offset(di.unrecognized).toBytes() = nil + } +} + +func (di *discardInfo) computeDiscardInfo() { + di.lock.Lock() + defer di.lock.Unlock() + if di.initialized != 0 { + return + } + t := di.typ + n := t.NumField() + + for i := 0; i < n; i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + + dfi := discardFieldInfo{field: toField(&f)} + tf := f.Type + + // Unwrap tf to get its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name)) + } + + switch tf.Kind() { + case reflect.Struct: + switch { + case !isPointer: + panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name)) + case isSlice: // E.g., []*pb.T + di := getDiscardInfo(tf) + dfi.discard = func(src pointer) { + sps := src.getPointerSlice() + for _, sp := range sps { + if !sp.isNil() { + di.discard(sp) + } + } + } + default: // E.g., *pb.T + di := getDiscardInfo(tf) + dfi.discard = func(src pointer) { + sp := src.getPointer() + if !sp.isNil() { + di.discard(sp) + } + } + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name)) + default: // E.g., map[K]V + if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T) + dfi.discard = func(src pointer) { + sm := src.asPointerTo(tf).Elem() + if sm.Len() == 0 { + return + } + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + DiscardUnknown(val.Interface().(Message)) + } + } + } else { + dfi.discard = func(pointer) {} // Noop + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name)) + default: // E.g., interface{} + // TODO: Make this faster? + dfi.discard = func(src pointer) { + su := src.asPointerTo(tf).Elem() + if !su.IsNil() { + sv := su.Elem().Elem().Field(0) + if sv.Kind() == reflect.Ptr && sv.IsNil() { + return + } + switch sv.Type().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + DiscardUnknown(sv.Interface().(Message)) + } + } + } + } + default: + continue + } + di.fields = append(di.fields, dfi) + } + + di.unrecognized = invalidField + if f, ok := t.FieldByName("XXX_unrecognized"); ok { + if f.Type != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + di.unrecognized = toField(&f) + } + + atomic.StoreInt32(&di.initialized, 1) +} + +func discardLegacy(m Message) { + v := reflect.ValueOf(m) + if v.Kind() != reflect.Ptr || v.IsNil() { + return + } + v = v.Elem() + if v.Kind() != reflect.Struct { + return + } + t := v.Type() + + for i := 0; i < v.NumField(); i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + vf := v.Field(i) + tf := f.Type + + // Unwrap tf to get its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name)) + } + + switch tf.Kind() { + case reflect.Struct: + switch { + case !isPointer: + panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name)) + case isSlice: // E.g., []*pb.T + for j := 0; j < vf.Len(); j++ { + discardLegacy(vf.Index(j).Interface().(Message)) + } + default: // E.g., *pb.T + discardLegacy(vf.Interface().(Message)) + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name)) + default: // E.g., map[K]V + tv := vf.Type().Elem() + if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T) + for _, key := range vf.MapKeys() { + val := vf.MapIndex(key) + discardLegacy(val.Interface().(Message)) + } + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name)) + default: // E.g., test_proto.isCommunique_Union interface + if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" { + vf = vf.Elem() // E.g., *test_proto.Communique_Msg + if !vf.IsNil() { + vf = vf.Elem() // E.g., test_proto.Communique_Msg + vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value + if vf.Kind() == reflect.Ptr { + discardLegacy(vf.Interface().(Message)) + } + } + } + } + } + } + + if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() { + if vf.Type() != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + vf.Set(reflect.ValueOf([]byte(nil))) + } + + // For proto2 messages, only discard unknown fields in message extensions + // that have been accessed via GetExtension. + if em, err := extendable(m); err == nil { + // Ignore lock since discardLegacy is not concurrency safe. + emm, _ := em.extensionsRead() + for _, mx := range emm { + if m, ok := mx.value.(Message); ok { + discardLegacy(m) + } + } + } +} diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/golang/protobuf/proto/encode.go index 8b84d1b22..3abfed2cf 100644 --- a/vendor/github.com/golang/protobuf/proto/encode.go +++ b/vendor/github.com/golang/protobuf/proto/encode.go @@ -37,28 +37,9 @@ package proto import ( "errors" - "fmt" "reflect" - "sort" ) -// RequiredNotSetError is the error returned if Marshal is called with -// a protocol buffer struct whose required fields have not -// all been initialized. It is also the error returned if Unmarshal is -// called with an encoded protocol buffer that does not include all the -// required fields. -// -// When printed, RequiredNotSetError reports the first unset required field in a -// message. If the field cannot be precisely determined, it is reported as -// "{Unknown}". -type RequiredNotSetError struct { - field string -} - -func (e *RequiredNotSetError) Error() string { - return fmt.Sprintf("proto: required field %q not set", e.field) -} - var ( // errRepeatedHasNil is the error returned if Marshal is called with // a struct with a repeated field containing a nil element. @@ -82,10 +63,6 @@ var ( const maxVarintBytes = 10 // maximum length of a varint -// maxMarshalSize is the largest allowed size of an encoded protobuf, -// since C++ and Java use signed int32s for the size. -const maxMarshalSize = 1<<31 - 1 - // EncodeVarint returns the varint encoding of x. // This is the format for the // int32, int64, uint32, uint64, bool, and enum @@ -119,18 +96,27 @@ func (p *Buffer) EncodeVarint(x uint64) error { // SizeVarint returns the varint encoding size of an integer. func SizeVarint(x uint64) int { - return sizeVarint(x) -} - -func sizeVarint(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } + switch { + case x < 1<<7: + return 1 + case x < 1<<14: + return 2 + case x < 1<<21: + return 3 + case x < 1<<28: + return 4 + case x < 1<<35: + return 5 + case x < 1<<42: + return 6 + case x < 1<<49: + return 7 + case x < 1<<56: + return 8 + case x < 1<<63: + return 9 } - return n + return 10 } // EncodeFixed64 writes a 64-bit integer to the Buffer. @@ -149,10 +135,6 @@ func (p *Buffer) EncodeFixed64(x uint64) error { return nil } -func sizeFixed64(x uint64) int { - return 8 -} - // EncodeFixed32 writes a 32-bit integer to the Buffer. // This is the format for the // fixed32, sfixed32, and float protocol buffer types. @@ -165,20 +147,12 @@ func (p *Buffer) EncodeFixed32(x uint64) error { return nil } -func sizeFixed32(x uint64) int { - return 4 -} - // EncodeZigzag64 writes a zigzag-encoded 64-bit integer // to the Buffer. // This is the format used for the sint64 protocol buffer type. func (p *Buffer) EncodeZigzag64(x uint64) error { // use signed number to get arithmetic right shift. - return p.EncodeVarint((x << 1) ^ uint64((int64(x) >> 63))) -} - -func sizeZigzag64(x uint64) int { - return sizeVarint((x << 1) ^ uint64((int64(x) >> 63))) + return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } // EncodeZigzag32 writes a zigzag-encoded 32-bit integer @@ -189,10 +163,6 @@ func (p *Buffer) EncodeZigzag32(x uint64) error { return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) } -func sizeZigzag32(x uint64) int { - return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) -} - // EncodeRawBytes writes a count-delimited byte buffer to the Buffer. // This is the format used for the bytes protocol buffer // type and for embedded messages. @@ -202,11 +172,6 @@ func (p *Buffer) EncodeRawBytes(b []byte) error { return nil } -func sizeRawBytes(b []byte) int { - return sizeVarint(uint64(len(b))) + - len(b) -} - // EncodeStringBytes writes an encoded string to the Buffer. // This is the format used for the proto2 string type. func (p *Buffer) EncodeStringBytes(s string) error { @@ -215,319 +180,17 @@ func (p *Buffer) EncodeStringBytes(s string) error { return nil } -func sizeStringBytes(s string) int { - return sizeVarint(uint64(len(s))) + - len(s) -} - // Marshaler is the interface representing objects that can marshal themselves. type Marshaler interface { Marshal() ([]byte, error) } -// Marshal takes the protocol buffer -// and encodes it into the wire format, returning the data. -func Marshal(pb Message) ([]byte, error) { - // Can the object marshal itself? - if m, ok := pb.(Marshaler); ok { - return m.Marshal() - } - p := NewBuffer(nil) - err := p.Marshal(pb) - if p.buf == nil && err == nil { - // Return a non-nil slice on success. - return []byte{}, nil - } - return p.buf, err -} - // EncodeMessage writes the protocol buffer to the Buffer, // prefixed by a varint-encoded length. func (p *Buffer) EncodeMessage(pb Message) error { - t, base, err := getbase(pb) - if structPointer_IsNil(base) { - return ErrNil - } - if err == nil { - var state errorState - err = p.enc_len_struct(GetProperties(t.Elem()), base, &state) - } - return err -} - -// Marshal takes the protocol buffer -// and encodes it into the wire format, writing the result to the -// Buffer. -func (p *Buffer) Marshal(pb Message) error { - // Can the object marshal itself? - if m, ok := pb.(Marshaler); ok { - data, err := m.Marshal() - p.buf = append(p.buf, data...) - return err - } - - t, base, err := getbase(pb) - if structPointer_IsNil(base) { - return ErrNil - } - if err == nil { - err = p.enc_struct(GetProperties(t.Elem()), base) - } - - if collectStats { - (stats).Encode++ // Parens are to work around a goimports bug. - } - - if len(p.buf) > maxMarshalSize { - return ErrTooLarge - } - return err -} - -// Size returns the encoded size of a protocol buffer. -func Size(pb Message) (n int) { - // Can the object marshal itself? If so, Size is slow. - // TODO: add Size to Marshaler, or add a Sizer interface. - if m, ok := pb.(Marshaler); ok { - b, _ := m.Marshal() - return len(b) - } - - t, base, err := getbase(pb) - if structPointer_IsNil(base) { - return 0 - } - if err == nil { - n = size_struct(GetProperties(t.Elem()), base) - } - - if collectStats { - (stats).Size++ // Parens are to work around a goimports bug. - } - - return -} - -// Individual type encoders. - -// Encode a bool. -func (o *Buffer) enc_bool(p *Properties, base structPointer) error { - v := *structPointer_Bool(base, p.field) - if v == nil { - return ErrNil - } - x := 0 - if *v { - x = 1 - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error { - v := *structPointer_BoolVal(base, p.field) - if !v { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, 1) - return nil -} - -func size_bool(p *Properties, base structPointer) int { - v := *structPointer_Bool(base, p.field) - if v == nil { - return 0 - } - return len(p.tagcode) + 1 // each bool takes exactly one byte -} - -func size_proto3_bool(p *Properties, base structPointer) int { - v := *structPointer_BoolVal(base, p.field) - if !v && !p.oneof { - return 0 - } - return len(p.tagcode) + 1 // each bool takes exactly one byte -} - -// Encode an int32. -func (o *Buffer) enc_int32(p *Properties, base structPointer) error { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return ErrNil - } - x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error { - v := structPointer_Word32Val(base, p.field) - x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range - if x == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func size_int32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return 0 - } - x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -func size_proto3_int32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32Val(base, p.field) - x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range - if x == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -// Encode a uint32. -// Exactly the same as int32, except for no sign extension. -func (o *Buffer) enc_uint32(p *Properties, base structPointer) error { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return ErrNil - } - x := word32_Get(v) - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error { - v := structPointer_Word32Val(base, p.field) - x := word32Val_Get(v) - if x == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func size_uint32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return 0 - } - x := word32_Get(v) - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -func size_proto3_uint32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32Val(base, p.field) - x := word32Val_Get(v) - if x == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -// Encode an int64. -func (o *Buffer) enc_int64(p *Properties, base structPointer) error { - v := structPointer_Word64(base, p.field) - if word64_IsNil(v) { - return ErrNil - } - x := word64_Get(v) - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, x) - return nil -} - -func (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error { - v := structPointer_Word64Val(base, p.field) - x := word64Val_Get(v) - if x == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, x) - return nil -} - -func size_int64(p *Properties, base structPointer) (n int) { - v := structPointer_Word64(base, p.field) - if word64_IsNil(v) { - return 0 - } - x := word64_Get(v) - n += len(p.tagcode) - n += p.valSize(x) - return -} - -func size_proto3_int64(p *Properties, base structPointer) (n int) { - v := structPointer_Word64Val(base, p.field) - x := word64Val_Get(v) - if x == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += p.valSize(x) - return -} - -// Encode a string. -func (o *Buffer) enc_string(p *Properties, base structPointer) error { - v := *structPointer_String(base, p.field) - if v == nil { - return ErrNil - } - x := *v - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(x) - return nil -} - -func (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error { - v := *structPointer_StringVal(base, p.field) - if v == "" { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(v) - return nil -} - -func size_string(p *Properties, base structPointer) (n int) { - v := *structPointer_String(base, p.field) - if v == nil { - return 0 - } - x := *v - n += len(p.tagcode) - n += sizeStringBytes(x) - return -} - -func size_proto3_string(p *Properties, base structPointer) (n int) { - v := *structPointer_StringVal(base, p.field) - if v == "" && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += sizeStringBytes(v) - return + siz := Size(pb) + p.EncodeVarint(uint64(siz)) + return p.Marshal(pb) } // All protocol buffer fields are nillable, but be careful. @@ -538,825 +201,3 @@ func isNil(v reflect.Value) bool { } return false } - -// Encode a message struct. -func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error { - var state errorState - structp := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return ErrNil - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, err := m.Marshal() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - return state.err - } - - o.buf = append(o.buf, p.tagcode...) - return o.enc_len_struct(p.sprop, structp, &state) -} - -func size_struct_message(p *Properties, base structPointer) int { - structp := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return 0 - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, _ := m.Marshal() - n0 := len(p.tagcode) - n1 := sizeRawBytes(data) - return n0 + n1 - } - - n0 := len(p.tagcode) - n1 := size_struct(p.sprop, structp) - n2 := sizeVarint(uint64(n1)) // size of encoded length - return n0 + n1 + n2 -} - -// Encode a group struct. -func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error { - var state errorState - b := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(b) { - return ErrNil - } - - o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) - err := o.enc_struct(p.sprop, b) - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) - return state.err -} - -func size_struct_group(p *Properties, base structPointer) (n int) { - b := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(b) { - return 0 - } - - n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup)) - n += size_struct(p.sprop, b) - n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup)) - return -} - -// Encode a slice of bools ([]bool). -func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return ErrNil - } - for _, x := range s { - o.buf = append(o.buf, p.tagcode...) - v := uint64(0) - if x { - v = 1 - } - p.valEnc(o, v) - } - return nil -} - -func size_slice_bool(p *Properties, base structPointer) int { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return 0 - } - return l * (len(p.tagcode) + 1) // each bool takes exactly one byte -} - -// Encode a slice of bools ([]bool) in packed format. -func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(l)) // each bool takes exactly one byte - for _, x := range s { - v := uint64(0) - if x { - v = 1 - } - p.valEnc(o, v) - } - return nil -} - -func size_slice_packed_bool(p *Properties, base structPointer) (n int) { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return 0 - } - n += len(p.tagcode) - n += sizeVarint(uint64(l)) - n += l // each bool takes exactly one byte - return -} - -// Encode a slice of bytes ([]byte). -func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error { - s := *structPointer_Bytes(base, p.field) - if s == nil { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(s) - return nil -} - -func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error { - s := *structPointer_Bytes(base, p.field) - if len(s) == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(s) - return nil -} - -func size_slice_byte(p *Properties, base structPointer) (n int) { - s := *structPointer_Bytes(base, p.field) - if s == nil && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += sizeRawBytes(s) - return -} - -func size_proto3_slice_byte(p *Properties, base structPointer) (n int) { - s := *structPointer_Bytes(base, p.field) - if len(s) == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += sizeRawBytes(s) - return -} - -// Encode a slice of int32s ([]int32). -func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - p.valEnc(o, uint64(x)) - } - return nil -} - -func size_slice_int32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - for i := 0; i < l; i++ { - n += len(p.tagcode) - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - n += p.valSize(uint64(x)) - } - return -} - -// Encode a slice of int32s ([]int32) in packed format. -func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - // TODO: Reuse a Buffer. - buf := NewBuffer(nil) - for i := 0; i < l; i++ { - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - p.valEnc(buf, uint64(x)) - } - - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(len(buf.buf))) - o.buf = append(o.buf, buf.buf...) - return nil -} - -func size_slice_packed_int32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - var bufSize int - for i := 0; i < l; i++ { - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - bufSize += p.valSize(uint64(x)) - } - - n += len(p.tagcode) - n += sizeVarint(uint64(bufSize)) - n += bufSize - return -} - -// Encode a slice of uint32s ([]uint32). -// Exactly the same as int32, except for no sign extension. -func (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - x := s.Index(i) - p.valEnc(o, uint64(x)) - } - return nil -} - -func size_slice_uint32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - for i := 0; i < l; i++ { - n += len(p.tagcode) - x := s.Index(i) - n += p.valSize(uint64(x)) - } - return -} - -// Encode a slice of uint32s ([]uint32) in packed format. -// Exactly the same as int32, except for no sign extension. -func (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - // TODO: Reuse a Buffer. - buf := NewBuffer(nil) - for i := 0; i < l; i++ { - p.valEnc(buf, uint64(s.Index(i))) - } - - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(len(buf.buf))) - o.buf = append(o.buf, buf.buf...) - return nil -} - -func size_slice_packed_uint32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - var bufSize int - for i := 0; i < l; i++ { - bufSize += p.valSize(uint64(s.Index(i))) - } - - n += len(p.tagcode) - n += sizeVarint(uint64(bufSize)) - n += bufSize - return -} - -// Encode a slice of int64s ([]int64). -func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, s.Index(i)) - } - return nil -} - -func size_slice_int64(p *Properties, base structPointer) (n int) { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - for i := 0; i < l; i++ { - n += len(p.tagcode) - n += p.valSize(s.Index(i)) - } - return -} - -// Encode a slice of int64s ([]int64) in packed format. -func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - // TODO: Reuse a Buffer. - buf := NewBuffer(nil) - for i := 0; i < l; i++ { - p.valEnc(buf, s.Index(i)) - } - - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(len(buf.buf))) - o.buf = append(o.buf, buf.buf...) - return nil -} - -func size_slice_packed_int64(p *Properties, base structPointer) (n int) { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - var bufSize int - for i := 0; i < l; i++ { - bufSize += p.valSize(s.Index(i)) - } - - n += len(p.tagcode) - n += sizeVarint(uint64(bufSize)) - n += bufSize - return -} - -// Encode a slice of slice of bytes ([][]byte). -func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error { - ss := *structPointer_BytesSlice(base, p.field) - l := len(ss) - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(ss[i]) - } - return nil -} - -func size_slice_slice_byte(p *Properties, base structPointer) (n int) { - ss := *structPointer_BytesSlice(base, p.field) - l := len(ss) - if l == 0 { - return 0 - } - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - n += sizeRawBytes(ss[i]) - } - return -} - -// Encode a slice of strings ([]string). -func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error { - ss := *structPointer_StringSlice(base, p.field) - l := len(ss) - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(ss[i]) - } - return nil -} - -func size_slice_string(p *Properties, base structPointer) (n int) { - ss := *structPointer_StringSlice(base, p.field) - l := len(ss) - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - n += sizeStringBytes(ss[i]) - } - return -} - -// Encode a slice of message structs ([]*struct). -func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error { - var state errorState - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - - for i := 0; i < l; i++ { - structp := s.Index(i) - if structPointer_IsNil(structp) { - return errRepeatedHasNil - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, err := m.Marshal() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - continue - } - - o.buf = append(o.buf, p.tagcode...) - err := o.enc_len_struct(p.sprop, structp, &state) - if err != nil && !state.shouldContinue(err, nil) { - if err == ErrNil { - return errRepeatedHasNil - } - return err - } - } - return state.err -} - -func size_slice_struct_message(p *Properties, base structPointer) (n int) { - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - structp := s.Index(i) - if structPointer_IsNil(structp) { - return // return the size up to this point - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, _ := m.Marshal() - n += sizeRawBytes(data) - continue - } - - n0 := size_struct(p.sprop, structp) - n1 := sizeVarint(uint64(n0)) // size of encoded length - n += n0 + n1 - } - return -} - -// Encode a slice of group structs ([]*struct). -func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error { - var state errorState - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - - for i := 0; i < l; i++ { - b := s.Index(i) - if structPointer_IsNil(b) { - return errRepeatedHasNil - } - - o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) - - err := o.enc_struct(p.sprop, b) - - if err != nil && !state.shouldContinue(err, nil) { - if err == ErrNil { - return errRepeatedHasNil - } - return err - } - - o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) - } - return state.err -} - -func size_slice_struct_group(p *Properties, base structPointer) (n int) { - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - - n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup)) - n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup)) - for i := 0; i < l; i++ { - b := s.Index(i) - if structPointer_IsNil(b) { - return // return size up to this point - } - - n += size_struct(p.sprop, b) - } - return -} - -// Encode an extension map. -func (o *Buffer) enc_map(p *Properties, base structPointer) error { - exts := structPointer_ExtMap(base, p.field) - if err := encodeExtensionsMap(*exts); err != nil { - return err - } - - return o.enc_map_body(*exts) -} - -func (o *Buffer) enc_exts(p *Properties, base structPointer) error { - exts := structPointer_Extensions(base, p.field) - - v, mu := exts.extensionsRead() - if v == nil { - return nil - } - - mu.Lock() - defer mu.Unlock() - if err := encodeExtensionsMap(v); err != nil { - return err - } - - return o.enc_map_body(v) -} - -func (o *Buffer) enc_map_body(v map[int32]Extension) error { - // Fast-path for common cases: zero or one extensions. - if len(v) <= 1 { - for _, e := range v { - o.buf = append(o.buf, e.enc...) - } - return nil - } - - // Sort keys to provide a deterministic encoding. - keys := make([]int, 0, len(v)) - for k := range v { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - for _, k := range keys { - o.buf = append(o.buf, v[int32(k)].enc...) - } - return nil -} - -func size_map(p *Properties, base structPointer) int { - v := structPointer_ExtMap(base, p.field) - return extensionsMapSize(*v) -} - -func size_exts(p *Properties, base structPointer) int { - v := structPointer_Extensions(base, p.field) - return extensionsSize(v) -} - -// Encode a map field. -func (o *Buffer) enc_new_map(p *Properties, base structPointer) error { - var state errorState // XXX: or do we need to plumb this through? - - /* - A map defined as - map map_field = N; - is encoded in the same way as - message MapFieldEntry { - key_type key = 1; - value_type value = 2; - } - repeated MapFieldEntry map_field = N; - */ - - v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V - if v.Len() == 0 { - return nil - } - - keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) - - enc := func() error { - if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil { - return err - } - if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil && err != ErrNil { - return err - } - return nil - } - - // Don't sort map keys. It is not required by the spec, and C++ doesn't do it. - for _, key := range v.MapKeys() { - val := v.MapIndex(key) - - keycopy.Set(key) - valcopy.Set(val) - - o.buf = append(o.buf, p.tagcode...) - if err := o.enc_len_thing(enc, &state); err != nil { - return err - } - } - return nil -} - -func size_new_map(p *Properties, base structPointer) int { - v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V - - keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) - - n := 0 - for _, key := range v.MapKeys() { - val := v.MapIndex(key) - keycopy.Set(key) - valcopy.Set(val) - - // Tag codes for key and val are the responsibility of the sub-sizer. - keysize := p.mkeyprop.size(p.mkeyprop, keybase) - valsize := p.mvalprop.size(p.mvalprop, valbase) - entry := keysize + valsize - // Add on tag code and length of map entry itself. - n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry - } - return n -} - -// mapEncodeScratch returns a new reflect.Value matching the map's value type, -// and a structPointer suitable for passing to an encoder or sizer. -func mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) { - // Prepare addressable doubly-indirect placeholders for the key and value types. - // This is needed because the element-type encoders expect **T, but the map iteration produces T. - - keycopy = reflect.New(mapType.Key()).Elem() // addressable K - keyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K - keyptr.Set(keycopy.Addr()) // - keybase = toStructPointer(keyptr.Addr()) // **K - - // Value types are more varied and require special handling. - switch mapType.Elem().Kind() { - case reflect.Slice: - // []byte - var dummy []byte - valcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte - valbase = toStructPointer(valcopy.Addr()) - case reflect.Ptr: - // message; the generated field type is map[K]*Msg (so V is *Msg), - // so we only need one level of indirection. - valcopy = reflect.New(mapType.Elem()).Elem() // addressable V - valbase = toStructPointer(valcopy.Addr()) - default: - // everything else - valcopy = reflect.New(mapType.Elem()).Elem() // addressable V - valptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V - valptr.Set(valcopy.Addr()) // - valbase = toStructPointer(valptr.Addr()) // **V - } - return -} - -// Encode a struct. -func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error { - var state errorState - // Encode fields in tag order so that decoders may use optimizations - // that depend on the ordering. - // https://developers.google.com/protocol-buffers/docs/encoding#order - for _, i := range prop.order { - p := prop.Prop[i] - if p.enc != nil { - err := p.enc(o, p, base) - if err != nil { - if err == ErrNil { - if p.Required && state.err == nil { - state.err = &RequiredNotSetError{p.Name} - } - } else if err == errRepeatedHasNil { - // Give more context to nil values in repeated fields. - return errors.New("repeated field " + p.OrigName + " has nil element") - } else if !state.shouldContinue(err, p) { - return err - } - } - if len(o.buf) > maxMarshalSize { - return ErrTooLarge - } - } - } - - // Do oneof fields. - if prop.oneofMarshaler != nil { - m := structPointer_Interface(base, prop.stype).(Message) - if err := prop.oneofMarshaler(m, o); err == ErrNil { - return errOneofHasNil - } else if err != nil { - return err - } - } - - // Add unrecognized fields at the end. - if prop.unrecField.IsValid() { - v := *structPointer_Bytes(base, prop.unrecField) - if len(o.buf)+len(v) > maxMarshalSize { - return ErrTooLarge - } - if len(v) > 0 { - o.buf = append(o.buf, v...) - } - } - - return state.err -} - -func size_struct(prop *StructProperties, base structPointer) (n int) { - for _, i := range prop.order { - p := prop.Prop[i] - if p.size != nil { - n += p.size(p, base) - } - } - - // Add unrecognized fields at the end. - if prop.unrecField.IsValid() { - v := *structPointer_Bytes(base, prop.unrecField) - n += len(v) - } - - // Factor in any oneof fields. - if prop.oneofSizer != nil { - m := structPointer_Interface(base, prop.stype).(Message) - n += prop.oneofSizer(m) - } - - return -} - -var zeroes [20]byte // longer than any conceivable sizeVarint - -// Encode a struct, preceded by its encoded length (as a varint). -func (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error { - return o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state) -} - -// Encode something, preceded by its encoded length (as a varint). -func (o *Buffer) enc_len_thing(enc func() error, state *errorState) error { - iLen := len(o.buf) - o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length - iMsg := len(o.buf) - err := enc() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - lMsg := len(o.buf) - iMsg - lLen := sizeVarint(uint64(lMsg)) - switch x := lLen - (iMsg - iLen); { - case x > 0: // actual length is x bytes larger than the space we reserved - // Move msg x bytes right. - o.buf = append(o.buf, zeroes[:x]...) - copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) - case x < 0: // actual length is x bytes smaller than the space we reserved - // Move msg x bytes left. - copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) - o.buf = o.buf[:len(o.buf)+x] // x is negative - } - // Encode the length in the reserved space. - o.buf = o.buf[:iLen] - o.EncodeVarint(uint64(lMsg)) - o.buf = o.buf[:len(o.buf)+lMsg] - return state.err -} - -// errorState maintains the first error that occurs and updates that error -// with additional context. -type errorState struct { - err error -} - -// shouldContinue reports whether encoding should continue upon encountering the -// given error. If the error is RequiredNotSetError, shouldContinue returns true -// and, if this is the first appearance of that error, remembers it for future -// reporting. -// -// If prop is not nil, it may update any error with additional context about the -// field with the error. -func (s *errorState) shouldContinue(err error, prop *Properties) bool { - // Ignore unset required fields. - reqNotSet, ok := err.(*RequiredNotSetError) - if !ok { - return false - } - if s.err == nil { - if prop != nil { - err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field} - } - s.err = err - } - return true -} diff --git a/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/golang/protobuf/proto/equal.go index 2ed1cf596..d4db5a1c1 100644 --- a/vendor/github.com/golang/protobuf/proto/equal.go +++ b/vendor/github.com/golang/protobuf/proto/equal.go @@ -109,15 +109,6 @@ func equalStruct(v1, v2 reflect.Value) bool { // set/unset mismatch return false } - b1, ok := f1.Interface().(raw) - if ok { - b2 := f2.Interface().(raw) - // RawMessage - if !bytes.Equal(b1.Bytes(), b2.Bytes()) { - return false - } - continue - } f1, f2 = f1.Elem(), f2.Elem() } if !equalAny(f1, f2, sprop.Prop[i]) { @@ -146,11 +137,7 @@ func equalStruct(v1, v2 reflect.Value) bool { u1 := uf.Bytes() u2 := v2.FieldByName("XXX_unrecognized").Bytes() - if !bytes.Equal(u1, u2) { - return false - } - - return true + return bytes.Equal(u1, u2) } // v1 and v2 are known to have the same type. @@ -261,6 +248,15 @@ func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { m1, m2 := e1.value, e2.value + if m1 == nil && m2 == nil { + // Both have only encoded form. + if bytes.Equal(e1.enc, e2.enc) { + continue + } + // The bytes are different, but the extensions might still be + // equal. We need to decode them to compare. + } + if m1 != nil && m2 != nil { // Both are unencoded. if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { @@ -276,8 +272,12 @@ func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { desc = m[extNum] } if desc == nil { + // If both have only encoded form and the bytes are the same, + // it is handled above. We get here when the bytes are different. + // We don't know how to decode it, so just compare them as byte + // slices. log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) - continue + return false } var err error if m1 == nil { diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go index eaad21831..816a3b9d6 100644 --- a/vendor/github.com/golang/protobuf/proto/extensions.go +++ b/vendor/github.com/golang/protobuf/proto/extensions.go @@ -38,6 +38,7 @@ package proto import ( "errors" "fmt" + "io" "reflect" "strconv" "sync" @@ -91,14 +92,29 @@ func (n notLocker) Unlock() {} // extendable returns the extendableProto interface for the given generated proto message. // If the proto message has the old extension format, it returns a wrapper that implements // the extendableProto interface. -func extendable(p interface{}) (extendableProto, bool) { - if ep, ok := p.(extendableProto); ok { - return ep, ok +func extendable(p interface{}) (extendableProto, error) { + switch p := p.(type) { + case extendableProto: + if isNilPtr(p) { + return nil, fmt.Errorf("proto: nil %T is not extendable", p) + } + return p, nil + case extendableProtoV1: + if isNilPtr(p) { + return nil, fmt.Errorf("proto: nil %T is not extendable", p) + } + return extensionAdapter{p}, nil } - if ep, ok := p.(extendableProtoV1); ok { - return extensionAdapter{ep}, ok - } - return nil, false + // Don't allocate a specific error containing %T: + // this is the hot path for Clone and MarshalText. + return nil, errNotExtendable +} + +var errNotExtendable = errors.New("proto: not an extendable proto.Message") + +func isNilPtr(x interface{}) bool { + v := reflect.ValueOf(x) + return v.Kind() == reflect.Ptr && v.IsNil() } // XXX_InternalExtensions is an internal representation of proto extensions. @@ -143,9 +159,6 @@ func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Loc return e.p.extensionMap, &e.p.mu } -var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem() -var extendableProtoV1Type = reflect.TypeOf((*extendableProtoV1)(nil)).Elem() - // ExtensionDesc represents an extension specification. // Used in generated code from the protocol compiler. type ExtensionDesc struct { @@ -179,8 +192,8 @@ type Extension struct { // SetRawExtension is for testing only. func SetRawExtension(base Message, id int32, b []byte) { - epb, ok := extendable(base) - if !ok { + epb, err := extendable(base) + if err != nil { return } extmap := epb.extensionsWrite() @@ -205,7 +218,7 @@ func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { pbi = ea.extendableProtoV1 } if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b { - return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String()) + return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a) } // Check the range. if !isExtensionField(pb, extension.Field) { @@ -250,85 +263,11 @@ func extensionProperties(ed *ExtensionDesc) *Properties { return prop } -// encode encodes any unmarshaled (unencoded) extensions in e. -func encodeExtensions(e *XXX_InternalExtensions) error { - m, mu := e.extensionsRead() - if m == nil { - return nil // fast path - } - mu.Lock() - defer mu.Unlock() - return encodeExtensionsMap(m) -} - -// encode encodes any unmarshaled (unencoded) extensions in e. -func encodeExtensionsMap(m map[int32]Extension) error { - for k, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - et := reflect.TypeOf(e.desc.ExtensionType) - props := extensionProperties(e.desc) - - p := NewBuffer(nil) - // If e.value has type T, the encoder expects a *struct{ X T }. - // Pass a *T with a zero field and hope it all works out. - x := reflect.New(et) - x.Elem().Set(reflect.ValueOf(e.value)) - if err := props.enc(p, props, toStructPointer(x)); err != nil { - return err - } - e.enc = p.buf - m[k] = e - } - return nil -} - -func extensionsSize(e *XXX_InternalExtensions) (n int) { - m, mu := e.extensionsRead() - if m == nil { - return 0 - } - mu.Lock() - defer mu.Unlock() - return extensionsMapSize(m) -} - -func extensionsMapSize(m map[int32]Extension) (n int) { - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - n += len(e.enc) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - et := reflect.TypeOf(e.desc.ExtensionType) - props := extensionProperties(e.desc) - - // If e.value has type T, the encoder expects a *struct{ X T }. - // Pass a *T with a zero field and hope it all works out. - x := reflect.New(et) - x.Elem().Set(reflect.ValueOf(e.value)) - n += props.size(props, toStructPointer(x)) - } - return -} - // HasExtension returns whether the given extension is present in pb. func HasExtension(pb Message, extension *ExtensionDesc) bool { // TODO: Check types, field numbers, etc.? - epb, ok := extendable(pb) - if !ok { + epb, err := extendable(pb) + if err != nil { return false } extmap, mu := epb.extensionsRead() @@ -336,15 +275,15 @@ func HasExtension(pb Message, extension *ExtensionDesc) bool { return false } mu.Lock() - _, ok = extmap[extension.Field] + _, ok := extmap[extension.Field] mu.Unlock() return ok } // ClearExtension removes the given extension from pb. func ClearExtension(pb Message, extension *ExtensionDesc) { - epb, ok := extendable(pb) - if !ok { + epb, err := extendable(pb) + if err != nil { return } // TODO: Check types, field numbers, etc.? @@ -352,16 +291,26 @@ func ClearExtension(pb Message, extension *ExtensionDesc) { delete(extmap, extension.Field) } -// GetExtension parses and returns the given extension of pb. -// If the extension is not present and has no default value it returns ErrMissingExtension. +// GetExtension retrieves a proto2 extended field from pb. +// +// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil), +// then GetExtension parses the encoded field and returns a Go value of the specified type. +// If the field is not present, then the default value is returned (if one is specified), +// otherwise ErrMissingExtension is reported. +// +// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil), +// then GetExtension returns the raw encoded bytes of the field extension. func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { - epb, ok := extendable(pb) - if !ok { - return nil, errors.New("proto: not an extendable proto") + epb, err := extendable(pb) + if err != nil { + return nil, err } - if err := checkExtensionTypes(epb, extension); err != nil { - return nil, err + if extension.ExtendedType != nil { + // can only check type if this is a complete descriptor + if err := checkExtensionTypes(epb, extension); err != nil { + return nil, err + } } emap, mu := epb.extensionsRead() @@ -388,6 +337,11 @@ func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { return e.value, nil } + if extension.ExtensionType == nil { + // incomplete descriptor + return e.enc, nil + } + v, err := decodeExtension(e.enc, extension) if err != nil { return nil, err @@ -405,6 +359,11 @@ func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { // defaultExtensionValue returns the default value for extension. // If no default for an extension is defined ErrMissingExtension is returned. func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { + if extension.ExtensionType == nil { + // incomplete descriptor, so no default + return nil, ErrMissingExtension + } + t := reflect.TypeOf(extension.ExtensionType) props := extensionProperties(extension) @@ -439,31 +398,28 @@ func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { // decodeExtension decodes an extension encoded in b. func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { - o := NewBuffer(b) - t := reflect.TypeOf(extension.ExtensionType) - - props := extensionProperties(extension) + unmarshal := typeUnmarshaler(t, extension.Tag) // t is a pointer to a struct, pointer to basic type or a slice. - // Allocate a "field" to store the pointer/slice itself; the - // pointer/slice will be stored here. We pass - // the address of this field to props.dec. - // This passes a zero field and a *t and lets props.dec - // interpret it as a *struct{ x t }. + // Allocate space to store the pointer/slice. value := reflect.New(t).Elem() + var err error for { - // Discard wire type and field number varint. It isn't needed. - if _, err := o.DecodeVarint(); err != nil { + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + wire := int(x) & 7 + + b, err = unmarshal(b, valToPointer(value.Addr()), wire) + if err != nil { return nil, err } - if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil { - return nil, err - } - - if o.index >= len(o.buf) { + if len(b) == 0 { break } } @@ -473,9 +429,9 @@ func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { // GetExtensions returns a slice of the extensions present in pb that are also listed in es. // The returned slice has the same length as es; missing extensions will appear as nil elements. func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { - epb, ok := extendable(pb) - if !ok { - return nil, errors.New("proto: not an extendable proto") + epb, err := extendable(pb) + if err != nil { + return nil, err } extensions = make([]interface{}, len(es)) for i, e := range es { @@ -494,9 +450,9 @@ func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, e // For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing // just the Field field, which defines the extension's field number. func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { - epb, ok := extendable(pb) - if !ok { - return nil, fmt.Errorf("proto: %T is not an extendable proto.Message", pb) + epb, err := extendable(pb) + if err != nil { + return nil, err } registeredExtensions := RegisteredExtensions(pb) @@ -523,9 +479,9 @@ func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { // SetExtension sets the specified extension of pb to the specified value. func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { - epb, ok := extendable(pb) - if !ok { - return errors.New("proto: not an extendable proto") + epb, err := extendable(pb) + if err != nil { + return err } if err := checkExtensionTypes(epb, extension); err != nil { return err @@ -550,8 +506,8 @@ func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error // ClearAllExtensions clears all extensions from pb. func ClearAllExtensions(pb Message) { - epb, ok := extendable(pb) - if !ok { + epb, err := extendable(pb) + if err != nil { return } m := epb.extensionsWrite() diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go index 1c225504a..75565cc6d 100644 --- a/vendor/github.com/golang/protobuf/proto/lib.go +++ b/vendor/github.com/golang/protobuf/proto/lib.go @@ -273,6 +273,67 @@ import ( "sync" ) +// RequiredNotSetError is an error type returned by either Marshal or Unmarshal. +// Marshal reports this when a required field is not initialized. +// Unmarshal reports this when a required field is missing from the wire data. +type RequiredNotSetError struct{ field string } + +func (e *RequiredNotSetError) Error() string { + if e.field == "" { + return fmt.Sprintf("proto: required field not set") + } + return fmt.Sprintf("proto: required field %q not set", e.field) +} +func (e *RequiredNotSetError) RequiredNotSet() bool { + return true +} + +type invalidUTF8Error struct{ field string } + +func (e *invalidUTF8Error) Error() string { + if e.field == "" { + return "proto: invalid UTF-8 detected" + } + return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field) +} +func (e *invalidUTF8Error) InvalidUTF8() bool { + return true +} + +// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8. +// This error should not be exposed to the external API as such errors should +// be recreated with the field information. +var errInvalidUTF8 = &invalidUTF8Error{} + +// isNonFatal reports whether the error is either a RequiredNotSet error +// or a InvalidUTF8 error. +func isNonFatal(err error) bool { + if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() { + return true + } + if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() { + return true + } + return false +} + +type nonFatal struct{ E error } + +// Merge merges err into nf and reports whether it was successful. +// Otherwise it returns false for any fatal non-nil errors. +func (nf *nonFatal) Merge(err error) (ok bool) { + if err == nil { + return true // not an error + } + if !isNonFatal(err) { + return false // fatal error + } + if nf.E == nil { + nf.E = err // store first instance of non-fatal error + } + return true +} + // Message is implemented by generated protocol buffer messages. type Message interface { Reset() @@ -309,16 +370,7 @@ type Buffer struct { buf []byte // encode/decode byte stream index int // read point - // pools of basic types to amortize allocation. - bools []bool - uint32s []uint32 - uint64s []uint64 - - // extra pools, only used with pointer_reflect.go - int32s []int32 - int64s []int64 - float32s []float32 - float64s []float64 + deterministic bool } // NewBuffer allocates a new Buffer and initializes its internal data to @@ -343,6 +395,30 @@ func (p *Buffer) SetBuf(s []byte) { // Bytes returns the contents of the Buffer. func (p *Buffer) Bytes() []byte { return p.buf } +// SetDeterministic sets whether to use deterministic serialization. +// +// Deterministic serialization guarantees that for a given binary, equal +// messages will always be serialized to the same bytes. This implies: +// +// - Repeated serialization of a message will return the same bytes. +// - Different processes of the same binary (which may be executing on +// different machines) will serialize equal messages to the same bytes. +// +// Note that the deterministic serialization is NOT canonical across +// languages. It is not guaranteed to remain stable over time. It is unstable +// across different builds with schema changes due to unknown fields. +// Users who need canonical serialization (e.g., persistent storage in a +// canonical form, fingerprinting, etc.) should define their own +// canonicalization specification and implement their own serializer rather +// than relying on this API. +// +// If deterministic serialization is requested, map entries will be sorted +// by keys in lexographical order. This is an implementation detail and +// subject to change. +func (p *Buffer) SetDeterministic(deterministic bool) { + p.deterministic = deterministic +} + /* * Helper routines for simplifying the creation of optional fields of basic type. */ @@ -831,22 +907,12 @@ func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMes return sf, false, nil } +// mapKeys returns a sort.Interface to be used for sorting the map keys. // Map fields may have key types of non-float scalars, strings and enums. -// The easiest way to sort them in some deterministic order is to use fmt. -// If this turns out to be inefficient we can always consider other options, -// such as doing a Schwartzian transform. - func mapKeys(vs []reflect.Value) sort.Interface { - s := mapKeySorter{ - vs: vs, - // default Less function: textual comparison - less: func(a, b reflect.Value) bool { - return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface()) - }, - } + s := mapKeySorter{vs: vs} - // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps; - // numeric keys are sorted numerically. + // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps. if len(vs) == 0 { return s } @@ -855,6 +921,12 @@ func mapKeys(vs []reflect.Value) sort.Interface { s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } case reflect.Uint32, reflect.Uint64: s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } + case reflect.Bool: + s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true + case reflect.String: + s.less = func(a, b reflect.Value) bool { return a.String() < b.String() } + default: + panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind())) } return s @@ -895,3 +967,13 @@ const ProtoPackageIsVersion2 = true // ProtoPackageIsVersion1 is referenced from generated protocol buffer files // to assert that that code is compatible with this version of the proto package. const ProtoPackageIsVersion1 = true + +// InternalMessageInfo is a type used internally by generated .pb.go files. +// This type is not intended to be used by non-generated code. +// This type is not subject to any compatibility guarantee. +type InternalMessageInfo struct { + marshal *marshalInfo + unmarshal *unmarshalInfo + merge *mergeInfo + discard *discardInfo +} diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go index fd982decd..3b6ca41d5 100644 --- a/vendor/github.com/golang/protobuf/proto/message_set.go +++ b/vendor/github.com/golang/protobuf/proto/message_set.go @@ -42,6 +42,7 @@ import ( "fmt" "reflect" "sort" + "sync" ) // errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. @@ -94,10 +95,7 @@ func (ms *messageSet) find(pb Message) *_MessageSet_Item { } func (ms *messageSet) Has(pb Message) bool { - if ms.find(pb) != nil { - return true - } - return false + return ms.find(pb) != nil } func (ms *messageSet) Unmarshal(pb Message) error { @@ -150,46 +148,42 @@ func skipVarint(buf []byte) []byte { // MarshalMessageSet encodes the extension map represented by m in the message set wire format. // It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. func MarshalMessageSet(exts interface{}) ([]byte, error) { - var m map[int32]Extension + return marshalMessageSet(exts, false) +} + +// marshaMessageSet implements above function, with the opt to turn on / off deterministic during Marshal. +func marshalMessageSet(exts interface{}, deterministic bool) ([]byte, error) { switch exts := exts.(type) { case *XXX_InternalExtensions: - if err := encodeExtensions(exts); err != nil { - return nil, err - } - m, _ = exts.extensionsRead() + var u marshalInfo + siz := u.sizeMessageSet(exts) + b := make([]byte, 0, siz) + return u.appendMessageSet(b, exts, deterministic) + case map[int32]Extension: - if err := encodeExtensionsMap(exts); err != nil { - return nil, err + // This is an old-style extension map. + // Wrap it in a new-style XXX_InternalExtensions. + ie := XXX_InternalExtensions{ + p: &struct { + mu sync.Mutex + extensionMap map[int32]Extension + }{ + extensionMap: exts, + }, } - m = exts + + var u marshalInfo + siz := u.sizeMessageSet(&ie) + b := make([]byte, 0, siz) + return u.appendMessageSet(b, &ie, deterministic) + default: return nil, errors.New("proto: not an extension map") } - - // Sort extension IDs to provide a deterministic encoding. - // See also enc_map in encode.go. - ids := make([]int, 0, len(m)) - for id := range m { - ids = append(ids, int(id)) - } - sort.Ints(ids) - - ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))} - for _, id := range ids { - e := m[int32(id)] - // Remove the wire type and field number varint, as well as the length varint. - msg := skipVarint(skipVarint(e.enc)) - - ms.Item = append(ms.Item, &_MessageSet_Item{ - TypeId: Int32(int32(id)), - Message: msg, - }) - } - return Marshal(ms) } // UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. -// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option. +// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option. func UnmarshalMessageSet(buf []byte, exts interface{}) error { var m map[int32]Extension switch exts := exts.(type) { @@ -235,7 +229,15 @@ func MarshalMessageSetJSON(exts interface{}) ([]byte, error) { var m map[int32]Extension switch exts := exts.(type) { case *XXX_InternalExtensions: - m, _ = exts.extensionsRead() + var mu sync.Locker + m, mu = exts.extensionsRead() + if m != nil { + // Keep the extensions map locked until we're done marshaling to prevent + // races between marshaling and unmarshaling the lazily-{en,de}coded + // values. + mu.Lock() + defer mu.Unlock() + } case map[int32]Extension: m = exts default: @@ -253,15 +255,16 @@ func MarshalMessageSetJSON(exts interface{}) ([]byte, error) { for i, id := range ids { ext := m[id] - if i > 0 { - b.WriteByte(',') - } - msd, ok := messageSetMap[id] if !ok { // Unknown type; we can't render it, so skip it. continue } + + if i > 0 && b.Len() > 1 { + b.WriteByte(',') + } + fmt.Fprintf(&b, `"[%s]":`, msd.name) x := ext.value diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go index fb512e2e1..b6cad9083 100644 --- a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go +++ b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go @@ -29,7 +29,7 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// +build appengine js +// +build purego appengine js // This file contains an implementation of proto field accesses using package reflect. // It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can @@ -38,32 +38,13 @@ package proto import ( - "math" "reflect" + "sync" ) -// A structPointer is a pointer to a struct. -type structPointer struct { - v reflect.Value -} +const unsafeAllowed = false -// toStructPointer returns a structPointer equivalent to the given reflect value. -// The reflect value must itself be a pointer to a struct. -func toStructPointer(v reflect.Value) structPointer { - return structPointer{v} -} - -// IsNil reports whether p is nil. -func structPointer_IsNil(p structPointer) bool { - return p.v.IsNil() -} - -// Interface returns the struct pointer as an interface value. -func structPointer_Interface(p structPointer, _ reflect.Type) interface{} { - return p.v.Interface() -} - -// A field identifies a field in a struct, accessible from a structPointer. +// A field identifies a field in a struct, accessible from a pointer. // In this implementation, a field is identified by the sequence of field indices // passed to reflect's FieldByIndex. type field []int @@ -76,409 +57,301 @@ func toField(f *reflect.StructField) field { // invalidField is an invalid field identifier. var invalidField = field(nil) +// zeroField is a noop when calling pointer.offset. +var zeroField = field([]int{}) + // IsValid reports whether the field identifier is valid. func (f field) IsValid() bool { return f != nil } -// field returns the given field in the struct as a reflect value. -func structPointer_field(p structPointer, f field) reflect.Value { - // Special case: an extension map entry with a value of type T - // passes a *T to the struct-handling code with a zero field, - // expecting that it will be treated as equivalent to *struct{ X T }, - // which has the same memory layout. We have to handle that case - // specially, because reflect will panic if we call FieldByIndex on a - // non-struct. - if f == nil { - return p.v.Elem() - } - - return p.v.Elem().FieldByIndex(f) -} - -// ifield returns the given field in the struct as an interface value. -func structPointer_ifield(p structPointer, f field) interface{} { - return structPointer_field(p, f).Addr().Interface() -} - -// Bytes returns the address of a []byte field in the struct. -func structPointer_Bytes(p structPointer, f field) *[]byte { - return structPointer_ifield(p, f).(*[]byte) -} - -// BytesSlice returns the address of a [][]byte field in the struct. -func structPointer_BytesSlice(p structPointer, f field) *[][]byte { - return structPointer_ifield(p, f).(*[][]byte) -} - -// Bool returns the address of a *bool field in the struct. -func structPointer_Bool(p structPointer, f field) **bool { - return structPointer_ifield(p, f).(**bool) -} - -// BoolVal returns the address of a bool field in the struct. -func structPointer_BoolVal(p structPointer, f field) *bool { - return structPointer_ifield(p, f).(*bool) -} - -// BoolSlice returns the address of a []bool field in the struct. -func structPointer_BoolSlice(p structPointer, f field) *[]bool { - return structPointer_ifield(p, f).(*[]bool) -} - -// String returns the address of a *string field in the struct. -func structPointer_String(p structPointer, f field) **string { - return structPointer_ifield(p, f).(**string) -} - -// StringVal returns the address of a string field in the struct. -func structPointer_StringVal(p structPointer, f field) *string { - return structPointer_ifield(p, f).(*string) -} - -// StringSlice returns the address of a []string field in the struct. -func structPointer_StringSlice(p structPointer, f field) *[]string { - return structPointer_ifield(p, f).(*[]string) -} - -// Extensions returns the address of an extension map field in the struct. -func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions { - return structPointer_ifield(p, f).(*XXX_InternalExtensions) -} - -// ExtMap returns the address of an extension map field in the struct. -func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { - return structPointer_ifield(p, f).(*map[int32]Extension) -} - -// NewAt returns the reflect.Value for a pointer to a field in the struct. -func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { - return structPointer_field(p, f).Addr() -} - -// SetStructPointer writes a *struct field in the struct. -func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { - structPointer_field(p, f).Set(q.v) -} - -// GetStructPointer reads a *struct field in the struct. -func structPointer_GetStructPointer(p structPointer, f field) structPointer { - return structPointer{structPointer_field(p, f)} -} - -// StructPointerSlice the address of a []*struct field in the struct. -func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice { - return structPointerSlice{structPointer_field(p, f)} -} - -// A structPointerSlice represents the address of a slice of pointers to structs -// (themselves messages or groups). That is, v.Type() is *[]*struct{...}. -type structPointerSlice struct { +// The pointer type is for the table-driven decoder. +// The implementation here uses a reflect.Value of pointer type to +// create a generic pointer. In pointer_unsafe.go we use unsafe +// instead of reflect to implement the same (but faster) interface. +type pointer struct { v reflect.Value } -func (p structPointerSlice) Len() int { return p.v.Len() } -func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} } -func (p structPointerSlice) Append(q structPointer) { - p.v.Set(reflect.Append(p.v, q.v)) +// toPointer converts an interface of pointer type to a pointer +// that points to the same target. +func toPointer(i *Message) pointer { + return pointer{v: reflect.ValueOf(*i)} } -var ( - int32Type = reflect.TypeOf(int32(0)) - uint32Type = reflect.TypeOf(uint32(0)) - float32Type = reflect.TypeOf(float32(0)) - int64Type = reflect.TypeOf(int64(0)) - uint64Type = reflect.TypeOf(uint64(0)) - float64Type = reflect.TypeOf(float64(0)) -) - -// A word32 represents a field of type *int32, *uint32, *float32, or *enum. -// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable. -type word32 struct { - v reflect.Value +// toAddrPointer converts an interface to a pointer that points to +// the interface data. +func toAddrPointer(i *interface{}, isptr bool) pointer { + v := reflect.ValueOf(*i) + u := reflect.New(v.Type()) + u.Elem().Set(v) + return pointer{v: u} } -// IsNil reports whether p is nil. -func word32_IsNil(p word32) bool { +// valToPointer converts v to a pointer. v must be of pointer type. +func valToPointer(v reflect.Value) pointer { + return pointer{v: v} +} + +// offset converts from a pointer to a structure to a pointer to +// one of its fields. +func (p pointer) offset(f field) pointer { + return pointer{v: p.v.Elem().FieldByIndex(f).Addr()} +} + +func (p pointer) isNil() bool { return p.v.IsNil() } -// Set sets p to point at a newly allocated word with bits set to x. -func word32_Set(p word32, o *Buffer, x uint32) { - t := p.v.Type().Elem() - switch t { - case int32Type: - if len(o.int32s) == 0 { - o.int32s = make([]int32, uint32PoolSize) - } - o.int32s[0] = int32(x) - p.v.Set(reflect.ValueOf(&o.int32s[0])) - o.int32s = o.int32s[1:] - return - case uint32Type: - if len(o.uint32s) == 0 { - o.uint32s = make([]uint32, uint32PoolSize) - } - o.uint32s[0] = x - p.v.Set(reflect.ValueOf(&o.uint32s[0])) - o.uint32s = o.uint32s[1:] - return - case float32Type: - if len(o.float32s) == 0 { - o.float32s = make([]float32, uint32PoolSize) - } - o.float32s[0] = math.Float32frombits(x) - p.v.Set(reflect.ValueOf(&o.float32s[0])) - o.float32s = o.float32s[1:] - return - } - - // must be enum - p.v.Set(reflect.New(t)) - p.v.Elem().SetInt(int64(int32(x))) -} - -// Get gets the bits pointed at by p, as a uint32. -func word32_Get(p word32) uint32 { - elem := p.v.Elem() - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) - } - panic("unreachable") -} - -// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct. -func structPointer_Word32(p structPointer, f field) word32 { - return word32{structPointer_field(p, f)} -} - -// A word32Val represents a field of type int32, uint32, float32, or enum. -// That is, v.Type() is int32, uint32, float32, or enum and v is assignable. -type word32Val struct { - v reflect.Value -} - -// Set sets *p to x. -func word32Val_Set(p word32Val, x uint32) { - switch p.v.Type() { - case int32Type: - p.v.SetInt(int64(x)) - return - case uint32Type: - p.v.SetUint(uint64(x)) - return - case float32Type: - p.v.SetFloat(float64(math.Float32frombits(x))) - return - } - - // must be enum - p.v.SetInt(int64(int32(x))) -} - -// Get gets the bits pointed at by p, as a uint32. -func word32Val_Get(p word32Val) uint32 { - elem := p.v - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) - } - panic("unreachable") -} - -// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct. -func structPointer_Word32Val(p structPointer, f field) word32Val { - return word32Val{structPointer_field(p, f)} -} - -// A word32Slice is a slice of 32-bit values. -// That is, v.Type() is []int32, []uint32, []float32, or []enum. -type word32Slice struct { - v reflect.Value -} - -func (p word32Slice) Append(x uint32) { - n, m := p.v.Len(), p.v.Cap() +// grow updates the slice s in place to make it one element longer. +// s must be addressable. +// Returns the (addressable) new element. +func grow(s reflect.Value) reflect.Value { + n, m := s.Len(), s.Cap() if n < m { - p.v.SetLen(n + 1) + s.SetLen(n + 1) } else { - t := p.v.Type().Elem() - p.v.Set(reflect.Append(p.v, reflect.Zero(t))) + s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem()))) } - elem := p.v.Index(n) - switch elem.Kind() { - case reflect.Int32: - elem.SetInt(int64(int32(x))) - case reflect.Uint32: - elem.SetUint(uint64(x)) - case reflect.Float32: - elem.SetFloat(float64(math.Float32frombits(x))) + return s.Index(n) +} + +func (p pointer) toInt64() *int64 { + return p.v.Interface().(*int64) +} +func (p pointer) toInt64Ptr() **int64 { + return p.v.Interface().(**int64) +} +func (p pointer) toInt64Slice() *[]int64 { + return p.v.Interface().(*[]int64) +} + +var int32ptr = reflect.TypeOf((*int32)(nil)) + +func (p pointer) toInt32() *int32 { + return p.v.Convert(int32ptr).Interface().(*int32) +} + +// The toInt32Ptr/Slice methods don't work because of enums. +// Instead, we must use set/get methods for the int32ptr/slice case. +/* + func (p pointer) toInt32Ptr() **int32 { + return p.v.Interface().(**int32) +} + func (p pointer) toInt32Slice() *[]int32 { + return p.v.Interface().(*[]int32) +} +*/ +func (p pointer) getInt32Ptr() *int32 { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + return p.v.Elem().Interface().(*int32) } + // an enum + return p.v.Elem().Convert(int32PtrType).Interface().(*int32) +} +func (p pointer) setInt32Ptr(v int32) { + // Allocate value in a *int32. Possibly convert that to a *enum. + // Then assign it to a **int32 or **enum. + // Note: we can convert *int32 to *enum, but we can't convert + // **int32 to **enum! + p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem())) } -func (p word32Slice) Len() int { - return p.v.Len() -} - -func (p word32Slice) Index(i int) uint32 { - elem := p.v.Index(i) - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) +// getInt32Slice copies []int32 from p as a new slice. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) getInt32Slice() []int32 { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + return p.v.Elem().Interface().([]int32) } - panic("unreachable") + // an enum + // Allocate a []int32, then assign []enum's values into it. + // Note: we can't convert []enum to []int32. + slice := p.v.Elem() + s := make([]int32, slice.Len()) + for i := 0; i < slice.Len(); i++ { + s[i] = int32(slice.Index(i).Int()) + } + return s } -// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct. -func structPointer_Word32Slice(p structPointer, f field) word32Slice { - return word32Slice{structPointer_field(p, f)} -} - -// word64 is like word32 but for 64-bit values. -type word64 struct { - v reflect.Value -} - -func word64_Set(p word64, o *Buffer, x uint64) { - t := p.v.Type().Elem() - switch t { - case int64Type: - if len(o.int64s) == 0 { - o.int64s = make([]int64, uint64PoolSize) - } - o.int64s[0] = int64(x) - p.v.Set(reflect.ValueOf(&o.int64s[0])) - o.int64s = o.int64s[1:] - return - case uint64Type: - if len(o.uint64s) == 0 { - o.uint64s = make([]uint64, uint64PoolSize) - } - o.uint64s[0] = x - p.v.Set(reflect.ValueOf(&o.uint64s[0])) - o.uint64s = o.uint64s[1:] - return - case float64Type: - if len(o.float64s) == 0 { - o.float64s = make([]float64, uint64PoolSize) - } - o.float64s[0] = math.Float64frombits(x) - p.v.Set(reflect.ValueOf(&o.float64s[0])) - o.float64s = o.float64s[1:] +// setInt32Slice copies []int32 into p as a new slice. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) setInt32Slice(v []int32) { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + p.v.Elem().Set(reflect.ValueOf(v)) return } - panic("unreachable") -} - -func word64_IsNil(p word64) bool { - return p.v.IsNil() -} - -func word64_Get(p word64) uint64 { - elem := p.v.Elem() - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return elem.Uint() - case reflect.Float64: - return math.Float64bits(elem.Float()) + // an enum + // Allocate a []enum, then assign []int32's values into it. + // Note: we can't convert []enum to []int32. + slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v)) + for i, x := range v { + slice.Index(i).SetInt(int64(x)) } - panic("unreachable") + p.v.Elem().Set(slice) +} +func (p pointer) appendInt32Slice(v int32) { + grow(p.v.Elem()).SetInt(int64(v)) } -func structPointer_Word64(p structPointer, f field) word64 { - return word64{structPointer_field(p, f)} +func (p pointer) toUint64() *uint64 { + return p.v.Interface().(*uint64) +} +func (p pointer) toUint64Ptr() **uint64 { + return p.v.Interface().(**uint64) +} +func (p pointer) toUint64Slice() *[]uint64 { + return p.v.Interface().(*[]uint64) +} +func (p pointer) toUint32() *uint32 { + return p.v.Interface().(*uint32) +} +func (p pointer) toUint32Ptr() **uint32 { + return p.v.Interface().(**uint32) +} +func (p pointer) toUint32Slice() *[]uint32 { + return p.v.Interface().(*[]uint32) +} +func (p pointer) toBool() *bool { + return p.v.Interface().(*bool) +} +func (p pointer) toBoolPtr() **bool { + return p.v.Interface().(**bool) +} +func (p pointer) toBoolSlice() *[]bool { + return p.v.Interface().(*[]bool) +} +func (p pointer) toFloat64() *float64 { + return p.v.Interface().(*float64) +} +func (p pointer) toFloat64Ptr() **float64 { + return p.v.Interface().(**float64) +} +func (p pointer) toFloat64Slice() *[]float64 { + return p.v.Interface().(*[]float64) +} +func (p pointer) toFloat32() *float32 { + return p.v.Interface().(*float32) +} +func (p pointer) toFloat32Ptr() **float32 { + return p.v.Interface().(**float32) +} +func (p pointer) toFloat32Slice() *[]float32 { + return p.v.Interface().(*[]float32) +} +func (p pointer) toString() *string { + return p.v.Interface().(*string) +} +func (p pointer) toStringPtr() **string { + return p.v.Interface().(**string) +} +func (p pointer) toStringSlice() *[]string { + return p.v.Interface().(*[]string) +} +func (p pointer) toBytes() *[]byte { + return p.v.Interface().(*[]byte) +} +func (p pointer) toBytesSlice() *[][]byte { + return p.v.Interface().(*[][]byte) +} +func (p pointer) toExtensions() *XXX_InternalExtensions { + return p.v.Interface().(*XXX_InternalExtensions) +} +func (p pointer) toOldExtensions() *map[int32]Extension { + return p.v.Interface().(*map[int32]Extension) +} +func (p pointer) getPointer() pointer { + return pointer{v: p.v.Elem()} +} +func (p pointer) setPointer(q pointer) { + p.v.Elem().Set(q.v) +} +func (p pointer) appendPointer(q pointer) { + grow(p.v.Elem()).Set(q.v) } -// word64Val is like word32Val but for 64-bit values. -type word64Val struct { - v reflect.Value +// getPointerSlice copies []*T from p as a new []pointer. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) getPointerSlice() []pointer { + if p.v.IsNil() { + return nil + } + n := p.v.Elem().Len() + s := make([]pointer, n) + for i := 0; i < n; i++ { + s[i] = pointer{v: p.v.Elem().Index(i)} + } + return s } -func word64Val_Set(p word64Val, o *Buffer, x uint64) { - switch p.v.Type() { - case int64Type: - p.v.SetInt(int64(x)) - return - case uint64Type: - p.v.SetUint(x) - return - case float64Type: - p.v.SetFloat(math.Float64frombits(x)) +// setPointerSlice copies []pointer into p as a new []*T. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) setPointerSlice(v []pointer) { + if v == nil { + p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem()) return } - panic("unreachable") -} - -func word64Val_Get(p word64Val) uint64 { - elem := p.v - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return elem.Uint() - case reflect.Float64: - return math.Float64bits(elem.Float()) + s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v)) + for _, p := range v { + s = reflect.Append(s, p.v) } - panic("unreachable") + p.v.Elem().Set(s) } -func structPointer_Word64Val(p structPointer, f field) word64Val { - return word64Val{structPointer_field(p, f)} -} - -type word64Slice struct { - v reflect.Value -} - -func (p word64Slice) Append(x uint64) { - n, m := p.v.Len(), p.v.Cap() - if n < m { - p.v.SetLen(n + 1) - } else { - t := p.v.Type().Elem() - p.v.Set(reflect.Append(p.v, reflect.Zero(t))) - } - elem := p.v.Index(n) - switch elem.Kind() { - case reflect.Int64: - elem.SetInt(int64(int64(x))) - case reflect.Uint64: - elem.SetUint(uint64(x)) - case reflect.Float64: - elem.SetFloat(float64(math.Float64frombits(x))) +// getInterfacePointer returns a pointer that points to the +// interface data of the interface pointed by p. +func (p pointer) getInterfacePointer() pointer { + if p.v.Elem().IsNil() { + return pointer{v: p.v.Elem()} } + return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct } -func (p word64Slice) Len() int { - return p.v.Len() +func (p pointer) asPointerTo(t reflect.Type) reflect.Value { + // TODO: check that p.v.Type().Elem() == t? + return p.v } -func (p word64Slice) Index(i int) uint64 { - elem := p.v.Index(i) - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return uint64(elem.Uint()) - case reflect.Float64: - return math.Float64bits(float64(elem.Float())) - } - panic("unreachable") +func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} +func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} +func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} +func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v } -func structPointer_Word64Slice(p structPointer, f field) word64Slice { - return word64Slice{structPointer_field(p, f)} -} +var atomicLock sync.Mutex diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go index 6b5567d47..d55a335d9 100644 --- a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go +++ b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go @@ -29,7 +29,7 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// +build !appengine,!js +// +build !purego,!appengine,!js // This file contains the implementation of the proto field accesses using package unsafe. @@ -37,38 +37,13 @@ package proto import ( "reflect" + "sync/atomic" "unsafe" ) -// NOTE: These type_Foo functions would more idiomatically be methods, -// but Go does not allow methods on pointer types, and we must preserve -// some pointer type for the garbage collector. We use these -// funcs with clunky names as our poor approximation to methods. -// -// An alternative would be -// type structPointer struct { p unsafe.Pointer } -// but that does not registerize as well. +const unsafeAllowed = true -// A structPointer is a pointer to a struct. -type structPointer unsafe.Pointer - -// toStructPointer returns a structPointer equivalent to the given reflect value. -func toStructPointer(v reflect.Value) structPointer { - return structPointer(unsafe.Pointer(v.Pointer())) -} - -// IsNil reports whether p is nil. -func structPointer_IsNil(p structPointer) bool { - return p == nil -} - -// Interface returns the struct pointer, assumed to have element type t, -// as an interface value. -func structPointer_Interface(p structPointer, t reflect.Type) interface{} { - return reflect.NewAt(t, unsafe.Pointer(p)).Interface() -} - -// A field identifies a field in a struct, accessible from a structPointer. +// A field identifies a field in a struct, accessible from a pointer. // In this implementation, a field is identified by its byte offset from the start of the struct. type field uintptr @@ -80,191 +55,254 @@ func toField(f *reflect.StructField) field { // invalidField is an invalid field identifier. const invalidField = ^field(0) +// zeroField is a noop when calling pointer.offset. +const zeroField = field(0) + // IsValid reports whether the field identifier is valid. func (f field) IsValid() bool { - return f != ^field(0) + return f != invalidField } -// Bytes returns the address of a []byte field in the struct. -func structPointer_Bytes(p structPointer, f field) *[]byte { - return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) +// The pointer type below is for the new table-driven encoder/decoder. +// The implementation here uses unsafe.Pointer to create a generic pointer. +// In pointer_reflect.go we use reflect instead of unsafe to implement +// the same (but slower) interface. +type pointer struct { + p unsafe.Pointer } -// BytesSlice returns the address of a [][]byte field in the struct. -func structPointer_BytesSlice(p structPointer, f field) *[][]byte { - return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) +// size of pointer +var ptrSize = unsafe.Sizeof(uintptr(0)) + +// toPointer converts an interface of pointer type to a pointer +// that points to the same target. +func toPointer(i *Message) pointer { + // Super-tricky - read pointer out of data word of interface value. + // Saves ~25ns over the equivalent: + // return valToPointer(reflect.ValueOf(*i)) + return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} } -// Bool returns the address of a *bool field in the struct. -func structPointer_Bool(p structPointer, f field) **bool { - return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// BoolVal returns the address of a bool field in the struct. -func structPointer_BoolVal(p structPointer, f field) *bool { - return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// BoolSlice returns the address of a []bool field in the struct. -func structPointer_BoolSlice(p structPointer, f field) *[]bool { - return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// String returns the address of a *string field in the struct. -func structPointer_String(p structPointer, f field) **string { - return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// StringVal returns the address of a string field in the struct. -func structPointer_StringVal(p structPointer, f field) *string { - return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// StringSlice returns the address of a []string field in the struct. -func structPointer_StringSlice(p structPointer, f field) *[]string { - return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// ExtMap returns the address of an extension map field in the struct. -func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions { - return (*XXX_InternalExtensions)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { - return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// NewAt returns the reflect.Value for a pointer to a field in the struct. -func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { - return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f))) -} - -// SetStructPointer writes a *struct field in the struct. -func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { - *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q -} - -// GetStructPointer reads a *struct field in the struct. -func structPointer_GetStructPointer(p structPointer, f field) structPointer { - return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// StructPointerSlice the address of a []*struct field in the struct. -func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice { - return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups). -type structPointerSlice []structPointer - -func (v *structPointerSlice) Len() int { return len(*v) } -func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] } -func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) } - -// A word32 is the address of a "pointer to 32-bit value" field. -type word32 **uint32 - -// IsNil reports whether *v is nil. -func word32_IsNil(p word32) bool { - return *p == nil -} - -// Set sets *v to point at a newly allocated word set to x. -func word32_Set(p word32, o *Buffer, x uint32) { - if len(o.uint32s) == 0 { - o.uint32s = make([]uint32, uint32PoolSize) +// toAddrPointer converts an interface to a pointer that points to +// the interface data. +func toAddrPointer(i *interface{}, isptr bool) pointer { + // Super-tricky - read or get the address of data word of interface value. + if isptr { + // The interface is of pointer type, thus it is a direct interface. + // The data word is the pointer data itself. We take its address. + return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)} } - o.uint32s[0] = x - *p = &o.uint32s[0] - o.uint32s = o.uint32s[1:] + // The interface is not of pointer type. The data word is the pointer + // to the data. + return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} } -// Get gets the value pointed at by *v. -func word32_Get(p word32) uint32 { - return **p +// valToPointer converts v to a pointer. v must be of pointer type. +func valToPointer(v reflect.Value) pointer { + return pointer{p: unsafe.Pointer(v.Pointer())} } -// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct. -func structPointer_Word32(p structPointer, f field) word32 { - return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +// offset converts from a pointer to a structure to a pointer to +// one of its fields. +func (p pointer) offset(f field) pointer { + // For safety, we should panic if !f.IsValid, however calling panic causes + // this to no longer be inlineable, which is a serious performance cost. + /* + if !f.IsValid() { + panic("invalid field") + } + */ + return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))} } -// A word32Val is the address of a 32-bit value field. -type word32Val *uint32 - -// Set sets *p to x. -func word32Val_Set(p word32Val, x uint32) { - *p = x +func (p pointer) isNil() bool { + return p.p == nil } -// Get gets the value pointed at by p. -func word32Val_Get(p word32Val) uint32 { - return *p +func (p pointer) toInt64() *int64 { + return (*int64)(p.p) +} +func (p pointer) toInt64Ptr() **int64 { + return (**int64)(p.p) +} +func (p pointer) toInt64Slice() *[]int64 { + return (*[]int64)(p.p) +} +func (p pointer) toInt32() *int32 { + return (*int32)(p.p) } -// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct. -func structPointer_Word32Val(p structPointer, f field) word32Val { - return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) -} - -// A word32Slice is a slice of 32-bit values. -type word32Slice []uint32 - -func (v *word32Slice) Append(x uint32) { *v = append(*v, x) } -func (v *word32Slice) Len() int { return len(*v) } -func (v *word32Slice) Index(i int) uint32 { return (*v)[i] } - -// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct. -func structPointer_Word32Slice(p structPointer, f field) *word32Slice { - return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// word64 is like word32 but for 64-bit values. -type word64 **uint64 - -func word64_Set(p word64, o *Buffer, x uint64) { - if len(o.uint64s) == 0 { - o.uint64s = make([]uint64, uint64PoolSize) +// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist. +/* + func (p pointer) toInt32Ptr() **int32 { + return (**int32)(p.p) } - o.uint64s[0] = x - *p = &o.uint64s[0] - o.uint64s = o.uint64s[1:] + func (p pointer) toInt32Slice() *[]int32 { + return (*[]int32)(p.p) + } +*/ +func (p pointer) getInt32Ptr() *int32 { + return *(**int32)(p.p) +} +func (p pointer) setInt32Ptr(v int32) { + *(**int32)(p.p) = &v } -func word64_IsNil(p word64) bool { - return *p == nil +// getInt32Slice loads a []int32 from p. +// The value returned is aliased with the original slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) getInt32Slice() []int32 { + return *(*[]int32)(p.p) } -func word64_Get(p word64) uint64 { - return **p +// setInt32Slice stores a []int32 to p. +// The value set is aliased with the input slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) setInt32Slice(v []int32) { + *(*[]int32)(p.p) = v } -func structPointer_Word64(p structPointer, f field) word64 { - return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead? +func (p pointer) appendInt32Slice(v int32) { + s := (*[]int32)(p.p) + *s = append(*s, v) } -// word64Val is like word32Val but for 64-bit values. -type word64Val *uint64 - -func word64Val_Set(p word64Val, o *Buffer, x uint64) { - *p = x +func (p pointer) toUint64() *uint64 { + return (*uint64)(p.p) +} +func (p pointer) toUint64Ptr() **uint64 { + return (**uint64)(p.p) +} +func (p pointer) toUint64Slice() *[]uint64 { + return (*[]uint64)(p.p) +} +func (p pointer) toUint32() *uint32 { + return (*uint32)(p.p) +} +func (p pointer) toUint32Ptr() **uint32 { + return (**uint32)(p.p) +} +func (p pointer) toUint32Slice() *[]uint32 { + return (*[]uint32)(p.p) +} +func (p pointer) toBool() *bool { + return (*bool)(p.p) +} +func (p pointer) toBoolPtr() **bool { + return (**bool)(p.p) +} +func (p pointer) toBoolSlice() *[]bool { + return (*[]bool)(p.p) +} +func (p pointer) toFloat64() *float64 { + return (*float64)(p.p) +} +func (p pointer) toFloat64Ptr() **float64 { + return (**float64)(p.p) +} +func (p pointer) toFloat64Slice() *[]float64 { + return (*[]float64)(p.p) +} +func (p pointer) toFloat32() *float32 { + return (*float32)(p.p) +} +func (p pointer) toFloat32Ptr() **float32 { + return (**float32)(p.p) +} +func (p pointer) toFloat32Slice() *[]float32 { + return (*[]float32)(p.p) +} +func (p pointer) toString() *string { + return (*string)(p.p) +} +func (p pointer) toStringPtr() **string { + return (**string)(p.p) +} +func (p pointer) toStringSlice() *[]string { + return (*[]string)(p.p) +} +func (p pointer) toBytes() *[]byte { + return (*[]byte)(p.p) +} +func (p pointer) toBytesSlice() *[][]byte { + return (*[][]byte)(p.p) +} +func (p pointer) toExtensions() *XXX_InternalExtensions { + return (*XXX_InternalExtensions)(p.p) +} +func (p pointer) toOldExtensions() *map[int32]Extension { + return (*map[int32]Extension)(p.p) } -func word64Val_Get(p word64Val) uint64 { - return *p +// getPointerSlice loads []*T from p as a []pointer. +// The value returned is aliased with the original slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) getPointerSlice() []pointer { + // Super-tricky - p should point to a []*T where T is a + // message type. We load it as []pointer. + return *(*[]pointer)(p.p) } -func structPointer_Word64Val(p structPointer, f field) word64Val { - return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +// setPointerSlice stores []pointer into p as a []*T. +// The value set is aliased with the input slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) setPointerSlice(v []pointer) { + // Super-tricky - p should point to a []*T where T is a + // message type. We store it as []pointer. + *(*[]pointer)(p.p) = v } -// word64Slice is like word32Slice but for 64-bit values. -type word64Slice []uint64 - -func (v *word64Slice) Append(x uint64) { *v = append(*v, x) } -func (v *word64Slice) Len() int { return len(*v) } -func (v *word64Slice) Index(i int) uint64 { return (*v)[i] } - -func structPointer_Word64Slice(p structPointer, f field) *word64Slice { - return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +// getPointer loads the pointer at p and returns it. +func (p pointer) getPointer() pointer { + return pointer{p: *(*unsafe.Pointer)(p.p)} +} + +// setPointer stores the pointer q at p. +func (p pointer) setPointer(q pointer) { + *(*unsafe.Pointer)(p.p) = q.p +} + +// append q to the slice pointed to by p. +func (p pointer) appendPointer(q pointer) { + s := (*[]unsafe.Pointer)(p.p) + *s = append(*s, q.p) +} + +// getInterfacePointer returns a pointer that points to the +// interface data of the interface pointed by p. +func (p pointer) getInterfacePointer() pointer { + // Super-tricky - read pointer out of data word of interface value. + return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]} +} + +// asPointerTo returns a reflect.Value that is a pointer to an +// object of type t stored at p. +func (p pointer) asPointerTo(t reflect.Type) reflect.Value { + return reflect.NewAt(t, p.p) +} + +func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { + return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { + return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { + return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { + return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) } diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go index ec2289c00..50b99b83a 100644 --- a/vendor/github.com/golang/protobuf/proto/properties.go +++ b/vendor/github.com/golang/protobuf/proto/properties.go @@ -58,42 +58,6 @@ const ( WireFixed32 = 5 ) -const startSize = 10 // initial slice/string sizes - -// Encoders are defined in encode.go -// An encoder outputs the full representation of a field, including its -// tag and encoder type. -type encoder func(p *Buffer, prop *Properties, base structPointer) error - -// A valueEncoder encodes a single integer in a particular encoding. -type valueEncoder func(o *Buffer, x uint64) error - -// Sizers are defined in encode.go -// A sizer returns the encoded size of a field, including its tag and encoder -// type. -type sizer func(prop *Properties, base structPointer) int - -// A valueSizer returns the encoded size of a single integer in a particular -// encoding. -type valueSizer func(x uint64) int - -// Decoders are defined in decode.go -// A decoder creates a value from its wire representation. -// Unrecognized subelements are saved in unrec. -type decoder func(p *Buffer, prop *Properties, base structPointer) error - -// A valueDecoder decodes a single integer in a particular encoding. -type valueDecoder func(o *Buffer) (x uint64, err error) - -// A oneofMarshaler does the marshaling for all oneof fields in a message. -type oneofMarshaler func(Message, *Buffer) error - -// A oneofUnmarshaler does the unmarshaling for a oneof field in a message. -type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error) - -// A oneofSizer does the sizing for all oneof fields in a message. -type oneofSizer func(Message) int - // tagMap is an optimization over map[int]int for typical protocol buffer // use-cases. Encoded protocol buffers are often in tag order with small tag // numbers. @@ -140,13 +104,6 @@ type StructProperties struct { decoderTags tagMap // map from proto tag to struct field number decoderOrigNames map[string]int // map from original name to struct field number order []int // list of struct field numbers in tag order - unrecField field // field id of the XXX_unrecognized []byte field - extendable bool // is this an extendable proto - - oneofMarshaler oneofMarshaler - oneofUnmarshaler oneofUnmarshaler - oneofSizer oneofSizer - stype reflect.Type // OneofTypes contains information about the oneof fields in this message. // It is keyed by the original name of a field. @@ -182,41 +139,24 @@ type Properties struct { Repeated bool Packed bool // relevant for repeated primitives only Enum string // set for enum types only - proto3 bool // whether this is known to be a proto3 field; set for []byte only + proto3 bool // whether this is known to be a proto3 field oneof bool // whether this is a oneof field Default string // default value HasDefault bool // whether an explicit default was provided - def_uint64 uint64 - enc encoder - valEnc valueEncoder // set for bool and numeric types only - field field - tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType) - tagbuf [8]byte - stype reflect.Type // set for struct types only - sprop *StructProperties // set for struct types only - isMarshaler bool - isUnmarshaler bool + stype reflect.Type // set for struct types only + sprop *StructProperties // set for struct types only - mtype reflect.Type // set for map types only - mkeyprop *Properties // set for map types only - mvalprop *Properties // set for map types only - - size sizer - valSize valueSizer // set for bool and numeric types only - - dec decoder - valDec valueDecoder // set for bool and numeric types only - - // If this is a packable field, this will be the decoder for the packed version of the field. - packedDec decoder + mtype reflect.Type // set for map types only + MapKeyProp *Properties // set for map types only + MapValProp *Properties // set for map types only } // String formats the properties in the protobuf struct field tag style. func (p *Properties) String() string { s := p.Wire - s = "," + s += "," s += strconv.Itoa(p.Tag) if p.Required { s += ",req" @@ -262,29 +202,14 @@ func (p *Properties) Parse(s string) { switch p.Wire { case "varint": p.WireType = WireVarint - p.valEnc = (*Buffer).EncodeVarint - p.valDec = (*Buffer).DecodeVarint - p.valSize = sizeVarint case "fixed32": p.WireType = WireFixed32 - p.valEnc = (*Buffer).EncodeFixed32 - p.valDec = (*Buffer).DecodeFixed32 - p.valSize = sizeFixed32 case "fixed64": p.WireType = WireFixed64 - p.valEnc = (*Buffer).EncodeFixed64 - p.valDec = (*Buffer).DecodeFixed64 - p.valSize = sizeFixed64 case "zigzag32": p.WireType = WireVarint - p.valEnc = (*Buffer).EncodeZigzag32 - p.valDec = (*Buffer).DecodeZigzag32 - p.valSize = sizeZigzag32 case "zigzag64": p.WireType = WireVarint - p.valEnc = (*Buffer).EncodeZigzag64 - p.valDec = (*Buffer).DecodeZigzag64 - p.valSize = sizeZigzag64 case "bytes", "group": p.WireType = WireBytes // no numeric converter for non-numeric types @@ -299,6 +224,7 @@ func (p *Properties) Parse(s string) { return } +outer: for i := 2; i < len(fields); i++ { f := fields[i] switch { @@ -326,256 +252,41 @@ func (p *Properties) Parse(s string) { if i+1 < len(fields) { // Commas aren't escaped, and def is always last. p.Default += "," + strings.Join(fields[i+1:], ",") - break + break outer } } } } -func logNoSliceEnc(t1, t2 reflect.Type) { - fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2) -} - var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() -// Initialize the fields for encoding and decoding. -func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { - p.enc = nil - p.dec = nil - p.size = nil - +// setFieldProps initializes the field properties for submessages and maps. +func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { switch t1 := typ; t1.Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1) - - // proto3 scalar types - - case reflect.Bool: - p.enc = (*Buffer).enc_proto3_bool - p.dec = (*Buffer).dec_proto3_bool - p.size = size_proto3_bool - case reflect.Int32: - p.enc = (*Buffer).enc_proto3_int32 - p.dec = (*Buffer).dec_proto3_int32 - p.size = size_proto3_int32 - case reflect.Uint32: - p.enc = (*Buffer).enc_proto3_uint32 - p.dec = (*Buffer).dec_proto3_int32 // can reuse - p.size = size_proto3_uint32 - case reflect.Int64, reflect.Uint64: - p.enc = (*Buffer).enc_proto3_int64 - p.dec = (*Buffer).dec_proto3_int64 - p.size = size_proto3_int64 - case reflect.Float32: - p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits - p.dec = (*Buffer).dec_proto3_int32 - p.size = size_proto3_uint32 - case reflect.Float64: - p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits - p.dec = (*Buffer).dec_proto3_int64 - p.size = size_proto3_int64 - case reflect.String: - p.enc = (*Buffer).enc_proto3_string - p.dec = (*Buffer).dec_proto3_string - p.size = size_proto3_string - case reflect.Ptr: - switch t2 := t1.Elem(); t2.Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2) - break - case reflect.Bool: - p.enc = (*Buffer).enc_bool - p.dec = (*Buffer).dec_bool - p.size = size_bool - case reflect.Int32: - p.enc = (*Buffer).enc_int32 - p.dec = (*Buffer).dec_int32 - p.size = size_int32 - case reflect.Uint32: - p.enc = (*Buffer).enc_uint32 - p.dec = (*Buffer).dec_int32 // can reuse - p.size = size_uint32 - case reflect.Int64, reflect.Uint64: - p.enc = (*Buffer).enc_int64 - p.dec = (*Buffer).dec_int64 - p.size = size_int64 - case reflect.Float32: - p.enc = (*Buffer).enc_uint32 // can just treat them as bits - p.dec = (*Buffer).dec_int32 - p.size = size_uint32 - case reflect.Float64: - p.enc = (*Buffer).enc_int64 // can just treat them as bits - p.dec = (*Buffer).dec_int64 - p.size = size_int64 - case reflect.String: - p.enc = (*Buffer).enc_string - p.dec = (*Buffer).dec_string - p.size = size_string - case reflect.Struct: + if t1.Elem().Kind() == reflect.Struct { p.stype = t1.Elem() - p.isMarshaler = isMarshaler(t1) - p.isUnmarshaler = isUnmarshaler(t1) - if p.Wire == "bytes" { - p.enc = (*Buffer).enc_struct_message - p.dec = (*Buffer).dec_struct_message - p.size = size_struct_message - } else { - p.enc = (*Buffer).enc_struct_group - p.dec = (*Buffer).dec_struct_group - p.size = size_struct_group - } } case reflect.Slice: - switch t2 := t1.Elem(); t2.Kind() { - default: - logNoSliceEnc(t1, t2) - break - case reflect.Bool: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_bool - p.size = size_slice_packed_bool - } else { - p.enc = (*Buffer).enc_slice_bool - p.size = size_slice_bool - } - p.dec = (*Buffer).dec_slice_bool - p.packedDec = (*Buffer).dec_slice_packed_bool - case reflect.Int32: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_int32 - p.size = size_slice_packed_int32 - } else { - p.enc = (*Buffer).enc_slice_int32 - p.size = size_slice_int32 - } - p.dec = (*Buffer).dec_slice_int32 - p.packedDec = (*Buffer).dec_slice_packed_int32 - case reflect.Uint32: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_uint32 - p.size = size_slice_packed_uint32 - } else { - p.enc = (*Buffer).enc_slice_uint32 - p.size = size_slice_uint32 - } - p.dec = (*Buffer).dec_slice_int32 - p.packedDec = (*Buffer).dec_slice_packed_int32 - case reflect.Int64, reflect.Uint64: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_int64 - p.size = size_slice_packed_int64 - } else { - p.enc = (*Buffer).enc_slice_int64 - p.size = size_slice_int64 - } - p.dec = (*Buffer).dec_slice_int64 - p.packedDec = (*Buffer).dec_slice_packed_int64 - case reflect.Uint8: - p.dec = (*Buffer).dec_slice_byte - if p.proto3 { - p.enc = (*Buffer).enc_proto3_slice_byte - p.size = size_proto3_slice_byte - } else { - p.enc = (*Buffer).enc_slice_byte - p.size = size_slice_byte - } - case reflect.Float32, reflect.Float64: - switch t2.Bits() { - case 32: - // can just treat them as bits - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_uint32 - p.size = size_slice_packed_uint32 - } else { - p.enc = (*Buffer).enc_slice_uint32 - p.size = size_slice_uint32 - } - p.dec = (*Buffer).dec_slice_int32 - p.packedDec = (*Buffer).dec_slice_packed_int32 - case 64: - // can just treat them as bits - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_int64 - p.size = size_slice_packed_int64 - } else { - p.enc = (*Buffer).enc_slice_int64 - p.size = size_slice_int64 - } - p.dec = (*Buffer).dec_slice_int64 - p.packedDec = (*Buffer).dec_slice_packed_int64 - default: - logNoSliceEnc(t1, t2) - break - } - case reflect.String: - p.enc = (*Buffer).enc_slice_string - p.dec = (*Buffer).dec_slice_string - p.size = size_slice_string - case reflect.Ptr: - switch t3 := t2.Elem(); t3.Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3) - break - case reflect.Struct: - p.stype = t2.Elem() - p.isMarshaler = isMarshaler(t2) - p.isUnmarshaler = isUnmarshaler(t2) - if p.Wire == "bytes" { - p.enc = (*Buffer).enc_slice_struct_message - p.dec = (*Buffer).dec_slice_struct_message - p.size = size_slice_struct_message - } else { - p.enc = (*Buffer).enc_slice_struct_group - p.dec = (*Buffer).dec_slice_struct_group - p.size = size_slice_struct_group - } - } - case reflect.Slice: - switch t2.Elem().Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem()) - break - case reflect.Uint8: - p.enc = (*Buffer).enc_slice_slice_byte - p.dec = (*Buffer).dec_slice_slice_byte - p.size = size_slice_slice_byte - } + if t2 := t1.Elem(); t2.Kind() == reflect.Ptr && t2.Elem().Kind() == reflect.Struct { + p.stype = t2.Elem() } case reflect.Map: - p.enc = (*Buffer).enc_new_map - p.dec = (*Buffer).dec_new_map - p.size = size_new_map - p.mtype = t1 - p.mkeyprop = &Properties{} - p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) - p.mvalprop = &Properties{} + p.MapKeyProp = &Properties{} + p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) + p.MapValProp = &Properties{} vtype := p.mtype.Elem() if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { // The value type is not a message (*T) or bytes ([]byte), // so we need encoders for the pointer to this type. vtype = reflect.PtrTo(vtype) } - p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) + p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) } - // precalculate tag code - wire := p.WireType - if p.Packed { - wire = WireBytes - } - x := uint32(p.Tag)<<3 | uint32(wire) - i := 0 - for i = 0; x > 127; i++ { - p.tagbuf[i] = 0x80 | uint8(x&0x7F) - x >>= 7 - } - p.tagbuf[i] = uint8(x) - p.tagcode = p.tagbuf[0 : i+1] - if p.stype != nil { if lockGetProp { p.sprop = GetProperties(p.stype) @@ -586,32 +297,9 @@ func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lock } var ( - marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() - unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() + marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() ) -// isMarshaler reports whether type t implements Marshaler. -func isMarshaler(t reflect.Type) bool { - // We're checking for (likely) pointer-receiver methods - // so if t is not a pointer, something is very wrong. - // The calls above only invoke isMarshaler on pointer types. - if t.Kind() != reflect.Ptr { - panic("proto: misuse of isMarshaler") - } - return t.Implements(marshalerType) -} - -// isUnmarshaler reports whether type t implements Unmarshaler. -func isUnmarshaler(t reflect.Type) bool { - // We're checking for (likely) pointer-receiver methods - // so if t is not a pointer, something is very wrong. - // The calls above only invoke isUnmarshaler on pointer types. - if t.Kind() != reflect.Ptr { - panic("proto: misuse of isUnmarshaler") - } - return t.Implements(unmarshalerType) -} - // Init populates the properties from a protocol buffer struct tag. func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { p.init(typ, name, tag, f, true) @@ -621,14 +309,11 @@ func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructF // "bytes,49,opt,def=hello!" p.Name = name p.OrigName = name - if f != nil { - p.field = toField(f) - } if tag == "" { return } p.Parse(tag) - p.setEncAndDec(typ, f, lockGetProp) + p.setFieldProps(typ, f, lockGetProp) } var ( @@ -678,9 +363,6 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { propertiesMap[t] = prop // build properties - prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) || - reflect.PtrTo(t).Implements(extendableProtoV1Type) - prop.unrecField = invalidField prop.Prop = make([]*Properties, t.NumField()) prop.order = make([]int, t.NumField()) @@ -690,17 +372,6 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { name := f.Name p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) - if f.Name == "XXX_InternalExtensions" { // special case - p.enc = (*Buffer).enc_exts - p.dec = nil // not needed - p.size = size_exts - } else if f.Name == "XXX_extensions" { // special case - p.enc = (*Buffer).enc_map - p.dec = nil // not needed - p.size = size_map - } else if f.Name == "XXX_unrecognized" { // special case - prop.unrecField = toField(&f) - } oneof := f.Tag.Get("protobuf_oneof") // special case if oneof != "" { // Oneof fields don't use the traditional protobuf tag. @@ -715,9 +386,6 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { } print("\n") } - if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && oneof == "" { - fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]") - } } // Re-order prop.order. @@ -728,8 +396,7 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { } if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok { var oots []interface{} - prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs() - prop.stype = t + _, _, _, oots = om.XXX_OneofFuncs() // Interpret oneof metadata. prop.OneofTypes = make(map[string]*OneofProperties) @@ -779,30 +446,6 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { return prop } -// Return the Properties object for the x[0]'th field of the structure. -func propByIndex(t reflect.Type, x []int) *Properties { - if len(x) != 1 { - fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t) - return nil - } - prop := GetProperties(t) - return prop.Prop[x[0]] -} - -// Get the address and type of a pointer to a struct from an interface. -func getbase(pb Message) (t reflect.Type, b structPointer, err error) { - if pb == nil { - err = ErrNil - return - } - // get the reflect type of the pointer to the struct. - t = reflect.TypeOf(pb) - // get the address of the struct. - value := reflect.ValueOf(pb) - b = toStructPointer(value) - return -} - // A global registry of enum types. // The generated code will register the generated maps by calling RegisterEnum. @@ -826,20 +469,42 @@ func EnumValueMap(enumType string) map[string]int32 { // A registry of all linked message types. // The string is a fully-qualified proto name ("pkg.Message"). var ( - protoTypes = make(map[string]reflect.Type) - revProtoTypes = make(map[reflect.Type]string) + protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers + protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types + revProtoTypes = make(map[reflect.Type]string) ) // RegisterType is called from generated code and maps from the fully qualified // proto name to the type (pointer to struct) of the protocol buffer. func RegisterType(x Message, name string) { - if _, ok := protoTypes[name]; ok { + if _, ok := protoTypedNils[name]; ok { // TODO: Some day, make this a panic. log.Printf("proto: duplicate proto type registered: %s", name) return } t := reflect.TypeOf(x) - protoTypes[name] = t + if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 { + // Generated code always calls RegisterType with nil x. + // This check is just for extra safety. + protoTypedNils[name] = x + } else { + protoTypedNils[name] = reflect.Zero(t).Interface().(Message) + } + revProtoTypes[t] = name +} + +// RegisterMapType is called from generated code and maps from the fully qualified +// proto name to the native map type of the proto map definition. +func RegisterMapType(x interface{}, name string) { + if reflect.TypeOf(x).Kind() != reflect.Map { + panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name)) + } + if _, ok := protoMapTypes[name]; ok { + log.Printf("proto: duplicate proto type registered: %s", name) + return + } + t := reflect.TypeOf(x) + protoMapTypes[name] = t revProtoTypes[t] = name } @@ -855,7 +520,14 @@ func MessageName(x Message) string { } // MessageType returns the message type (pointer to struct) for a named message. -func MessageType(name string) reflect.Type { return protoTypes[name] } +// The type is not guaranteed to implement proto.Message if the name refers to a +// map entry. +func MessageType(name string) reflect.Type { + if t, ok := protoTypedNils[name]; ok { + return reflect.TypeOf(t) + } + return protoMapTypes[name] +} // A registry of all linked proto files. var ( diff --git a/vendor/github.com/golang/protobuf/proto/table_marshal.go b/vendor/github.com/golang/protobuf/proto/table_marshal.go new file mode 100644 index 000000000..b16794496 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/table_marshal.go @@ -0,0 +1,2767 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "errors" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "unicode/utf8" +) + +// a sizer takes a pointer to a field and the size of its tag, computes the size of +// the encoded data. +type sizer func(pointer, int) int + +// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format), +// marshals the field to the end of the slice, returns the slice and error (if any). +type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) + +// marshalInfo is the information used for marshaling a message. +type marshalInfo struct { + typ reflect.Type + fields []*marshalFieldInfo + unrecognized field // offset of XXX_unrecognized + extensions field // offset of XXX_InternalExtensions + v1extensions field // offset of XXX_extensions + sizecache field // offset of XXX_sizecache + initialized int32 // 0 -- only typ is set, 1 -- fully initialized + messageset bool // uses message set wire format + hasmarshaler bool // has custom marshaler + sync.RWMutex // protect extElems map, also for initialization + extElems map[int32]*marshalElemInfo // info of extension elements +} + +// marshalFieldInfo is the information used for marshaling a field of a message. +type marshalFieldInfo struct { + field field + wiretag uint64 // tag in wire format + tagsize int // size of tag in wire format + sizer sizer + marshaler marshaler + isPointer bool + required bool // field is required + name string // name of the field, for error reporting + oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements +} + +// marshalElemInfo is the information used for marshaling an extension or oneof element. +type marshalElemInfo struct { + wiretag uint64 // tag in wire format + tagsize int // size of tag in wire format + sizer sizer + marshaler marshaler + isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only) +} + +var ( + marshalInfoMap = map[reflect.Type]*marshalInfo{} + marshalInfoLock sync.Mutex +) + +// getMarshalInfo returns the information to marshal a given type of message. +// The info it returns may not necessarily initialized. +// t is the type of the message (NOT the pointer to it). +func getMarshalInfo(t reflect.Type) *marshalInfo { + marshalInfoLock.Lock() + u, ok := marshalInfoMap[t] + if !ok { + u = &marshalInfo{typ: t} + marshalInfoMap[t] = u + } + marshalInfoLock.Unlock() + return u +} + +// Size is the entry point from generated code, +// and should be ONLY called by generated code. +// It computes the size of encoded data of msg. +// a is a pointer to a place to store cached marshal info. +func (a *InternalMessageInfo) Size(msg Message) int { + u := getMessageMarshalInfo(msg, a) + ptr := toPointer(&msg) + if ptr.isNil() { + // We get here if msg is a typed nil ((*SomeMessage)(nil)), + // so it satisfies the interface, and msg == nil wouldn't + // catch it. We don't want crash in this case. + return 0 + } + return u.size(ptr) +} + +// Marshal is the entry point from generated code, +// and should be ONLY called by generated code. +// It marshals msg to the end of b. +// a is a pointer to a place to store cached marshal info. +func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) { + u := getMessageMarshalInfo(msg, a) + ptr := toPointer(&msg) + if ptr.isNil() { + // We get here if msg is a typed nil ((*SomeMessage)(nil)), + // so it satisfies the interface, and msg == nil wouldn't + // catch it. We don't want crash in this case. + return b, ErrNil + } + return u.marshal(b, ptr, deterministic) +} + +func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo { + // u := a.marshal, but atomically. + // We use an atomic here to ensure memory consistency. + u := atomicLoadMarshalInfo(&a.marshal) + if u == nil { + // Get marshal information from type of message. + t := reflect.ValueOf(msg).Type() + if t.Kind() != reflect.Ptr { + panic(fmt.Sprintf("cannot handle non-pointer message type %v", t)) + } + u = getMarshalInfo(t.Elem()) + // Store it in the cache for later users. + // a.marshal = u, but atomically. + atomicStoreMarshalInfo(&a.marshal, u) + } + return u +} + +// size is the main function to compute the size of the encoded data of a message. +// ptr is the pointer to the message. +func (u *marshalInfo) size(ptr pointer) int { + if atomic.LoadInt32(&u.initialized) == 0 { + u.computeMarshalInfo() + } + + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if u.hasmarshaler { + m := ptr.asPointerTo(u.typ).Interface().(Marshaler) + b, _ := m.Marshal() + return len(b) + } + + n := 0 + for _, f := range u.fields { + if f.isPointer && ptr.offset(f.field).getPointer().isNil() { + // nil pointer always marshals to nothing + continue + } + n += f.sizer(ptr.offset(f.field), f.tagsize) + } + if u.extensions.IsValid() { + e := ptr.offset(u.extensions).toExtensions() + if u.messageset { + n += u.sizeMessageSet(e) + } else { + n += u.sizeExtensions(e) + } + } + if u.v1extensions.IsValid() { + m := *ptr.offset(u.v1extensions).toOldExtensions() + n += u.sizeV1Extensions(m) + } + if u.unrecognized.IsValid() { + s := *ptr.offset(u.unrecognized).toBytes() + n += len(s) + } + // cache the result for use in marshal + if u.sizecache.IsValid() { + atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n)) + } + return n +} + +// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated), +// fall back to compute the size. +func (u *marshalInfo) cachedsize(ptr pointer) int { + if u.sizecache.IsValid() { + return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32())) + } + return u.size(ptr) +} + +// marshal is the main function to marshal a message. It takes a byte slice and appends +// the encoded data to the end of the slice, returns the slice and error (if any). +// ptr is the pointer to the message. +// If deterministic is true, map is marshaled in deterministic order. +func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) { + if atomic.LoadInt32(&u.initialized) == 0 { + u.computeMarshalInfo() + } + + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if u.hasmarshaler { + m := ptr.asPointerTo(u.typ).Interface().(Marshaler) + b1, err := m.Marshal() + b = append(b, b1...) + return b, err + } + + var err, errLater error + // The old marshaler encodes extensions at beginning. + if u.extensions.IsValid() { + e := ptr.offset(u.extensions).toExtensions() + if u.messageset { + b, err = u.appendMessageSet(b, e, deterministic) + } else { + b, err = u.appendExtensions(b, e, deterministic) + } + if err != nil { + return b, err + } + } + if u.v1extensions.IsValid() { + m := *ptr.offset(u.v1extensions).toOldExtensions() + b, err = u.appendV1Extensions(b, m, deterministic) + if err != nil { + return b, err + } + } + for _, f := range u.fields { + if f.required { + if ptr.offset(f.field).getPointer().isNil() { + // Required field is not set. + // We record the error but keep going, to give a complete marshaling. + if errLater == nil { + errLater = &RequiredNotSetError{f.name} + } + continue + } + } + if f.isPointer && ptr.offset(f.field).getPointer().isNil() { + // nil pointer always marshals to nothing + continue + } + b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic) + if err != nil { + if err1, ok := err.(*RequiredNotSetError); ok { + // Required field in submessage is not set. + // We record the error but keep going, to give a complete marshaling. + if errLater == nil { + errLater = &RequiredNotSetError{f.name + "." + err1.field} + } + continue + } + if err == errRepeatedHasNil { + err = errors.New("proto: repeated field " + f.name + " has nil element") + } + if err == errInvalidUTF8 { + if errLater == nil { + fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name + errLater = &invalidUTF8Error{fullName} + } + continue + } + return b, err + } + } + if u.unrecognized.IsValid() { + s := *ptr.offset(u.unrecognized).toBytes() + b = append(b, s...) + } + return b, errLater +} + +// computeMarshalInfo initializes the marshal info. +func (u *marshalInfo) computeMarshalInfo() { + u.Lock() + defer u.Unlock() + if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock + return + } + + t := u.typ + u.unrecognized = invalidField + u.extensions = invalidField + u.v1extensions = invalidField + u.sizecache = invalidField + + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if reflect.PtrTo(t).Implements(marshalerType) { + u.hasmarshaler = true + atomic.StoreInt32(&u.initialized, 1) + return + } + + // get oneof implementers + var oneofImplementers []interface{} + if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok { + _, _, _, oneofImplementers = m.XXX_OneofFuncs() + } + + n := t.NumField() + + // deal with XXX fields first + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if !strings.HasPrefix(f.Name, "XXX_") { + continue + } + switch f.Name { + case "XXX_sizecache": + u.sizecache = toField(&f) + case "XXX_unrecognized": + u.unrecognized = toField(&f) + case "XXX_InternalExtensions": + u.extensions = toField(&f) + u.messageset = f.Tag.Get("protobuf_messageset") == "1" + case "XXX_extensions": + u.v1extensions = toField(&f) + case "XXX_NoUnkeyedLiteral": + // nothing to do + default: + panic("unknown XXX field: " + f.Name) + } + n-- + } + + // normal fields + fields := make([]marshalFieldInfo, n) // batch allocation + u.fields = make([]*marshalFieldInfo, 0, n) + for i, j := 0, 0; i < t.NumField(); i++ { + f := t.Field(i) + + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + field := &fields[j] + j++ + field.name = f.Name + u.fields = append(u.fields, field) + if f.Tag.Get("protobuf_oneof") != "" { + field.computeOneofFieldInfo(&f, oneofImplementers) + continue + } + if f.Tag.Get("protobuf") == "" { + // field has no tag (not in generated message), ignore it + u.fields = u.fields[:len(u.fields)-1] + j-- + continue + } + field.computeMarshalFieldInfo(&f) + } + + // fields are marshaled in tag order on the wire. + sort.Sort(byTag(u.fields)) + + atomic.StoreInt32(&u.initialized, 1) +} + +// helper for sorting fields by tag +type byTag []*marshalFieldInfo + +func (a byTag) Len() int { return len(a) } +func (a byTag) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag } + +// getExtElemInfo returns the information to marshal an extension element. +// The info it returns is initialized. +func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo { + // get from cache first + u.RLock() + e, ok := u.extElems[desc.Field] + u.RUnlock() + if ok { + return e + } + + t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct + tags := strings.Split(desc.Tag, ",") + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + sizer, marshaler := typeMarshaler(t, tags, false, false) + e = &marshalElemInfo{ + wiretag: uint64(tag)<<3 | wt, + tagsize: SizeVarint(uint64(tag) << 3), + sizer: sizer, + marshaler: marshaler, + isptr: t.Kind() == reflect.Ptr, + } + + // update cache + u.Lock() + if u.extElems == nil { + u.extElems = make(map[int32]*marshalElemInfo) + } + u.extElems[desc.Field] = e + u.Unlock() + return e +} + +// computeMarshalFieldInfo fills up the information to marshal a field. +func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) { + // parse protobuf tag of the field. + // tag has format of "bytes,49,opt,name=foo,def=hello!" + tags := strings.Split(f.Tag.Get("protobuf"), ",") + if tags[0] == "" { + return + } + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + if tags[2] == "req" { + fi.required = true + } + fi.setTag(f, tag, wt) + fi.setMarshaler(f, tags) +} + +func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) { + fi.field = toField(f) + fi.wiretag = 1<<31 - 1 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire. + fi.isPointer = true + fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f) + fi.oneofElems = make(map[reflect.Type]*marshalElemInfo) + + ityp := f.Type // interface type + for _, o := range oneofImplementers { + t := reflect.TypeOf(o) + if !t.Implements(ityp) { + continue + } + sf := t.Elem().Field(0) // oneof implementer is a struct with a single field + tags := strings.Split(sf.Tag.Get("protobuf"), ",") + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + sizer, marshaler := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value + fi.oneofElems[t.Elem()] = &marshalElemInfo{ + wiretag: uint64(tag)<<3 | wt, + tagsize: SizeVarint(uint64(tag) << 3), + sizer: sizer, + marshaler: marshaler, + } + } +} + +type oneofMessage interface { + XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) +} + +// wiretype returns the wire encoding of the type. +func wiretype(encoding string) uint64 { + switch encoding { + case "fixed32": + return WireFixed32 + case "fixed64": + return WireFixed64 + case "varint", "zigzag32", "zigzag64": + return WireVarint + case "bytes": + return WireBytes + case "group": + return WireStartGroup + } + panic("unknown wire type " + encoding) +} + +// setTag fills up the tag (in wire format) and its size in the info of a field. +func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) { + fi.field = toField(f) + fi.wiretag = uint64(tag)<<3 | wt + fi.tagsize = SizeVarint(uint64(tag) << 3) +} + +// setMarshaler fills up the sizer and marshaler in the info of a field. +func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) { + switch f.Type.Kind() { + case reflect.Map: + // map field + fi.isPointer = true + fi.sizer, fi.marshaler = makeMapMarshaler(f) + return + case reflect.Ptr, reflect.Slice: + fi.isPointer = true + } + fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false) +} + +// typeMarshaler returns the sizer and marshaler of a given field. +// t is the type of the field. +// tags is the generated "protobuf" tag of the field. +// If nozero is true, zero value is not marshaled to the wire. +// If oneof is true, it is a oneof field. +func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) { + encoding := tags[0] + + pointer := false + slice := false + if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { + slice = true + t = t.Elem() + } + if t.Kind() == reflect.Ptr { + pointer = true + t = t.Elem() + } + + packed := false + proto3 := false + validateUTF8 := true + for i := 2; i < len(tags); i++ { + if tags[i] == "packed" { + packed = true + } + if tags[i] == "proto3" { + proto3 = true + } + } + validateUTF8 = validateUTF8 && proto3 + + switch t.Kind() { + case reflect.Bool: + if pointer { + return sizeBoolPtr, appendBoolPtr + } + if slice { + if packed { + return sizeBoolPackedSlice, appendBoolPackedSlice + } + return sizeBoolSlice, appendBoolSlice + } + if nozero { + return sizeBoolValueNoZero, appendBoolValueNoZero + } + return sizeBoolValue, appendBoolValue + case reflect.Uint32: + switch encoding { + case "fixed32": + if pointer { + return sizeFixed32Ptr, appendFixed32Ptr + } + if slice { + if packed { + return sizeFixed32PackedSlice, appendFixed32PackedSlice + } + return sizeFixed32Slice, appendFixed32Slice + } + if nozero { + return sizeFixed32ValueNoZero, appendFixed32ValueNoZero + } + return sizeFixed32Value, appendFixed32Value + case "varint": + if pointer { + return sizeVarint32Ptr, appendVarint32Ptr + } + if slice { + if packed { + return sizeVarint32PackedSlice, appendVarint32PackedSlice + } + return sizeVarint32Slice, appendVarint32Slice + } + if nozero { + return sizeVarint32ValueNoZero, appendVarint32ValueNoZero + } + return sizeVarint32Value, appendVarint32Value + } + case reflect.Int32: + switch encoding { + case "fixed32": + if pointer { + return sizeFixedS32Ptr, appendFixedS32Ptr + } + if slice { + if packed { + return sizeFixedS32PackedSlice, appendFixedS32PackedSlice + } + return sizeFixedS32Slice, appendFixedS32Slice + } + if nozero { + return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero + } + return sizeFixedS32Value, appendFixedS32Value + case "varint": + if pointer { + return sizeVarintS32Ptr, appendVarintS32Ptr + } + if slice { + if packed { + return sizeVarintS32PackedSlice, appendVarintS32PackedSlice + } + return sizeVarintS32Slice, appendVarintS32Slice + } + if nozero { + return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero + } + return sizeVarintS32Value, appendVarintS32Value + case "zigzag32": + if pointer { + return sizeZigzag32Ptr, appendZigzag32Ptr + } + if slice { + if packed { + return sizeZigzag32PackedSlice, appendZigzag32PackedSlice + } + return sizeZigzag32Slice, appendZigzag32Slice + } + if nozero { + return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero + } + return sizeZigzag32Value, appendZigzag32Value + } + case reflect.Uint64: + switch encoding { + case "fixed64": + if pointer { + return sizeFixed64Ptr, appendFixed64Ptr + } + if slice { + if packed { + return sizeFixed64PackedSlice, appendFixed64PackedSlice + } + return sizeFixed64Slice, appendFixed64Slice + } + if nozero { + return sizeFixed64ValueNoZero, appendFixed64ValueNoZero + } + return sizeFixed64Value, appendFixed64Value + case "varint": + if pointer { + return sizeVarint64Ptr, appendVarint64Ptr + } + if slice { + if packed { + return sizeVarint64PackedSlice, appendVarint64PackedSlice + } + return sizeVarint64Slice, appendVarint64Slice + } + if nozero { + return sizeVarint64ValueNoZero, appendVarint64ValueNoZero + } + return sizeVarint64Value, appendVarint64Value + } + case reflect.Int64: + switch encoding { + case "fixed64": + if pointer { + return sizeFixedS64Ptr, appendFixedS64Ptr + } + if slice { + if packed { + return sizeFixedS64PackedSlice, appendFixedS64PackedSlice + } + return sizeFixedS64Slice, appendFixedS64Slice + } + if nozero { + return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero + } + return sizeFixedS64Value, appendFixedS64Value + case "varint": + if pointer { + return sizeVarintS64Ptr, appendVarintS64Ptr + } + if slice { + if packed { + return sizeVarintS64PackedSlice, appendVarintS64PackedSlice + } + return sizeVarintS64Slice, appendVarintS64Slice + } + if nozero { + return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero + } + return sizeVarintS64Value, appendVarintS64Value + case "zigzag64": + if pointer { + return sizeZigzag64Ptr, appendZigzag64Ptr + } + if slice { + if packed { + return sizeZigzag64PackedSlice, appendZigzag64PackedSlice + } + return sizeZigzag64Slice, appendZigzag64Slice + } + if nozero { + return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero + } + return sizeZigzag64Value, appendZigzag64Value + } + case reflect.Float32: + if pointer { + return sizeFloat32Ptr, appendFloat32Ptr + } + if slice { + if packed { + return sizeFloat32PackedSlice, appendFloat32PackedSlice + } + return sizeFloat32Slice, appendFloat32Slice + } + if nozero { + return sizeFloat32ValueNoZero, appendFloat32ValueNoZero + } + return sizeFloat32Value, appendFloat32Value + case reflect.Float64: + if pointer { + return sizeFloat64Ptr, appendFloat64Ptr + } + if slice { + if packed { + return sizeFloat64PackedSlice, appendFloat64PackedSlice + } + return sizeFloat64Slice, appendFloat64Slice + } + if nozero { + return sizeFloat64ValueNoZero, appendFloat64ValueNoZero + } + return sizeFloat64Value, appendFloat64Value + case reflect.String: + if validateUTF8 { + if pointer { + return sizeStringPtr, appendUTF8StringPtr + } + if slice { + return sizeStringSlice, appendUTF8StringSlice + } + if nozero { + return sizeStringValueNoZero, appendUTF8StringValueNoZero + } + return sizeStringValue, appendUTF8StringValue + } + if pointer { + return sizeStringPtr, appendStringPtr + } + if slice { + return sizeStringSlice, appendStringSlice + } + if nozero { + return sizeStringValueNoZero, appendStringValueNoZero + } + return sizeStringValue, appendStringValue + case reflect.Slice: + if slice { + return sizeBytesSlice, appendBytesSlice + } + if oneof { + // Oneof bytes field may also have "proto3" tag. + // We want to marshal it as a oneof field. Do this + // check before the proto3 check. + return sizeBytesOneof, appendBytesOneof + } + if proto3 { + return sizeBytes3, appendBytes3 + } + return sizeBytes, appendBytes + case reflect.Struct: + switch encoding { + case "group": + if slice { + return makeGroupSliceMarshaler(getMarshalInfo(t)) + } + return makeGroupMarshaler(getMarshalInfo(t)) + case "bytes": + if slice { + return makeMessageSliceMarshaler(getMarshalInfo(t)) + } + return makeMessageMarshaler(getMarshalInfo(t)) + } + } + panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding)) +} + +// Below are functions to size/marshal a specific type of a field. +// They are stored in the field's info, and called by function pointers. +// They have type sizer or marshaler. + +func sizeFixed32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFixed32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFixed32Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + return (4 + tagsize) * len(s) +} +func sizeFixed32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFixedS32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFixedS32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFixedS32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + return (4 + tagsize) * len(s) +} +func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFloat32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int { + v := math.Float32bits(*ptr.toFloat32()) + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFloat32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toFloat32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFloat32Slice(ptr pointer, tagsize int) int { + s := *ptr.toFloat32Slice() + return (4 + tagsize) * len(s) +} +func sizeFloat32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toFloat32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFixed64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFixed64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFixed64Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + return (8 + tagsize) * len(s) +} +func sizeFixed64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeFixedS64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFixedS64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFixedS64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + return (8 + tagsize) * len(s) +} +func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeFloat64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int { + v := math.Float64bits(*ptr.toFloat64()) + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFloat64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toFloat64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFloat64Slice(ptr pointer, tagsize int) int { + s := *ptr.toFloat64Slice() + return (8 + tagsize) * len(s) +} +func sizeFloat64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toFloat64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeVarint32Value(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarint32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint32Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarint32Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarint32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarintS32Value(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarintS32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarint64Value(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + return SizeVarint(v) + tagsize +} +func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + if v == 0 { + return 0 + } + return SizeVarint(v) + tagsize +} +func sizeVarint64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint64Ptr() + if p == nil { + return 0 + } + return SizeVarint(*p) + tagsize +} +func sizeVarint64Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(v) + tagsize + } + return n +} +func sizeVarint64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(v) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarintS64Value(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarintS64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeZigzag32Value(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + v := *p + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize + } + return n +} +func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeZigzag64Value(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + v := *p + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize + } + return n +} +func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeBoolValue(_ pointer, tagsize int) int { + return 1 + tagsize +} +func sizeBoolValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toBool() + if !v { + return 0 + } + return 1 + tagsize +} +func sizeBoolPtr(ptr pointer, tagsize int) int { + p := *ptr.toBoolPtr() + if p == nil { + return 0 + } + return 1 + tagsize +} +func sizeBoolSlice(ptr pointer, tagsize int) int { + s := *ptr.toBoolSlice() + return (1 + tagsize) * len(s) +} +func sizeBoolPackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toBoolSlice() + if len(s) == 0 { + return 0 + } + return len(s) + SizeVarint(uint64(len(s))) + tagsize +} +func sizeStringValue(ptr pointer, tagsize int) int { + v := *ptr.toString() + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toString() + if v == "" { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringPtr(ptr pointer, tagsize int) int { + p := *ptr.toStringPtr() + if p == nil { + return 0 + } + v := *p + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringSlice(ptr pointer, tagsize int) int { + s := *ptr.toStringSlice() + n := 0 + for _, v := range s { + n += len(v) + SizeVarint(uint64(len(v))) + tagsize + } + return n +} +func sizeBytes(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + if v == nil { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytes3(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + if len(v) == 0 { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytesOneof(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytesSlice(ptr pointer, tagsize int) int { + s := *ptr.toBytesSlice() + n := 0 + for _, v := range s { + n += len(v) + SizeVarint(uint64(len(v))) + tagsize + } + return n +} + +// appendFixed32 appends an encoded fixed32 to b. +func appendFixed32(b []byte, v uint32) []byte { + b = append(b, + byte(v), + byte(v>>8), + byte(v>>16), + byte(v>>24)) + return b +} + +// appendFixed64 appends an encoded fixed64 to b. +func appendFixed64(b []byte, v uint64) []byte { + b = append(b, + byte(v), + byte(v>>8), + byte(v>>16), + byte(v>>24), + byte(v>>32), + byte(v>>40), + byte(v>>48), + byte(v>>56)) + return b +} + +// appendVarint appends an encoded varint to b. +func appendVarint(b []byte, v uint64) []byte { + // TODO: make 1-byte (maybe 2-byte) case inline-able, once we + // have non-leaf inliner. + switch { + case v < 1<<7: + b = append(b, byte(v)) + case v < 1<<14: + b = append(b, + byte(v&0x7f|0x80), + byte(v>>7)) + case v < 1<<21: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte(v>>14)) + case v < 1<<28: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte(v>>21)) + case v < 1<<35: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte(v>>28)) + case v < 1<<42: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte(v>>35)) + case v < 1<<49: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte(v>>42)) + case v < 1<<56: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte(v>>49)) + case v < 1<<63: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte((v>>49)&0x7f|0x80), + byte(v>>56)) + default: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte((v>>49)&0x7f|0x80), + byte((v>>56)&0x7f|0x80), + 1) + } + return b +} + +func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, *p) + return b, nil +} +func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + } + return b, nil +} +func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, v) + } + return b, nil +} +func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + return b, nil +} +func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + return b, nil +} +func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(*p)) + return b, nil +} +func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + } + return b, nil +} +func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, uint32(v)) + } + return b, nil +} +func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float32bits(*ptr.toFloat32()) + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float32bits(*ptr.toFloat32()) + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toFloat32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, math.Float32bits(*p)) + return b, nil +} +func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, math.Float32bits(v)) + } + return b, nil +} +func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, math.Float32bits(v)) + } + return b, nil +} +func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, *p) + return b, nil +} +func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + } + return b, nil +} +func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, v) + } + return b, nil +} +func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + return b, nil +} +func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + return b, nil +} +func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(*p)) + return b, nil +} +func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + } + return b, nil +} +func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, uint64(v)) + } + return b, nil +} +func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float64bits(*ptr.toFloat64()) + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float64bits(*ptr.toFloat64()) + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toFloat64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, math.Float64bits(*p)) + return b, nil +} +func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, math.Float64bits(v)) + } + return b, nil +} +func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, math.Float64bits(v)) + } + return b, nil +} +func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + return b, nil +} +func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + return b, nil +} +func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, *p) + return b, nil +} +func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + } + return b, nil +} +func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(v) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, v) + } + return b, nil +} +func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + v := *p + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + } + return b, nil +} +func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + } + return b, nil +} +func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + v := *p + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + } + return b, nil +} +func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + } + return b, nil +} +func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBool() + b = appendVarint(b, wiretag) + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + return b, nil +} +func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBool() + if !v { + return b, nil + } + b = appendVarint(b, wiretag) + b = append(b, 1) + return b, nil +} + +func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toBoolPtr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + if *p { + b = append(b, 1) + } else { + b = append(b, 0) + } + return b, nil +} +func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBoolSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + } + return b, nil +} +func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBoolSlice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(len(s))) + for _, v := range s { + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + } + return b, nil +} +func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toString() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toString() + if v == "" { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toStringPtr() + if p == nil { + return b, nil + } + v := *p + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toStringSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + return b, nil +} +func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + v := *ptr.toString() + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + v := *ptr.toString() + if v == "" { + return b, nil + } + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + p := *ptr.toStringPtr() + if p == nil { + return b, nil + } + v := *p + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + s := *ptr.toStringSlice() + for _, v := range s { + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + if v == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + if len(v) == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBytesSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + return b, nil +} + +// makeGroupMarshaler returns the sizer and marshaler for a group. +// u is the marshal info of the underlying message. +func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + p := ptr.getPointer() + if p.isNil() { + return 0 + } + return u.size(p) + 2*tagsize + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + p := ptr.getPointer() + if p.isNil() { + return b, nil + } + var err error + b = appendVarint(b, wiretag) // start group + b, err = u.marshal(b, p, deterministic) + b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group + return b, err + } +} + +// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice. +// u is the marshal info of the underlying message. +func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getPointerSlice() + n := 0 + for _, v := range s { + if v.isNil() { + continue + } + n += u.size(v) + 2*tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getPointerSlice() + var err error + var nerr nonFatal + for _, v := range s { + if v.isNil() { + return b, errRepeatedHasNil + } + b = appendVarint(b, wiretag) // start group + b, err = u.marshal(b, v, deterministic) + b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group + if !nerr.Merge(err) { + if err == ErrNil { + err = errRepeatedHasNil + } + return b, err + } + } + return b, nerr.E + } +} + +// makeMessageMarshaler returns the sizer and marshaler for a message field. +// u is the marshal info of the message. +func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + p := ptr.getPointer() + if p.isNil() { + return 0 + } + siz := u.size(p) + return siz + SizeVarint(uint64(siz)) + tagsize + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + p := ptr.getPointer() + if p.isNil() { + return b, nil + } + b = appendVarint(b, wiretag) + siz := u.cachedsize(p) + b = appendVarint(b, uint64(siz)) + return u.marshal(b, p, deterministic) + } +} + +// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice. +// u is the marshal info of the message. +func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getPointerSlice() + n := 0 + for _, v := range s { + if v.isNil() { + continue + } + siz := u.size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getPointerSlice() + var err error + var nerr nonFatal + for _, v := range s { + if v.isNil() { + return b, errRepeatedHasNil + } + b = appendVarint(b, wiretag) + siz := u.cachedsize(v) + b = appendVarint(b, uint64(siz)) + b, err = u.marshal(b, v, deterministic) + + if !nerr.Merge(err) { + if err == ErrNil { + err = errRepeatedHasNil + } + return b, err + } + } + return b, nerr.E + } +} + +// makeMapMarshaler returns the sizer and marshaler for a map field. +// f is the pointer to the reflect data structure of the field. +func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { + // figure out key and value type + t := f.Type + keyType := t.Key() + valType := t.Elem() + keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",") + valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") + keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map + valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map + keyWireTag := 1<<3 | wiretype(keyTags[0]) + valWireTag := 2<<3 | wiretype(valTags[0]) + + // We create an interface to get the addresses of the map key and value. + // If value is pointer-typed, the interface is a direct interface, the + // idata itself is the value. Otherwise, the idata is the pointer to the + // value. + // Key cannot be pointer-typed. + valIsPtr := valType.Kind() == reflect.Ptr + + // If value is a message with nested maps, calling + // valSizer in marshal may be quadratic. We should use + // cached version in marshal (but not in size). + // If value is not message type, we don't have size cache, + // but it cannot be nested either. Just use valSizer. + valCachedSizer := valSizer + if valIsPtr && valType.Elem().Kind() == reflect.Struct { + u := getMarshalInfo(valType.Elem()) + valCachedSizer = func(ptr pointer, tagsize int) int { + // Same as message sizer, but use cache. + p := ptr.getPointer() + if p.isNil() { + return 0 + } + siz := u.cachedsize(p) + return siz + SizeVarint(uint64(siz)) + tagsize + } + } + return func(ptr pointer, tagsize int) int { + m := ptr.asPointerTo(t).Elem() // the map + n := 0 + for _, k := range m.MapKeys() { + ki := k.Interface() + vi := m.MapIndex(k).Interface() + kaddr := toAddrPointer(&ki, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value + siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) { + m := ptr.asPointerTo(t).Elem() // the map + var err error + keys := m.MapKeys() + if len(keys) > 1 && deterministic { + sort.Sort(mapKeys(keys)) + } + + var nerr nonFatal + for _, k := range keys { + ki := k.Interface() + vi := m.MapIndex(k).Interface() + kaddr := toAddrPointer(&ki, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value + b = appendVarint(b, tag) + siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) + b = appendVarint(b, uint64(siz)) + b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic) + if !nerr.Merge(err) { + return b, err + } + b, err = valMarshaler(b, vaddr, valWireTag, deterministic) + if err != ErrNil && !nerr.Merge(err) { // allow nil value in map + return b, err + } + } + return b, nerr.E + } +} + +// makeOneOfMarshaler returns the sizer and marshaler for a oneof field. +// fi is the marshal info of the field. +// f is the pointer to the reflect data structure of the field. +func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) { + // Oneof field is an interface. We need to get the actual data type on the fly. + t := f.Type + return func(ptr pointer, _ int) int { + p := ptr.getInterfacePointer() + if p.isNil() { + return 0 + } + v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct + telem := v.Type() + e := fi.oneofElems[telem] + return e.sizer(p, e.tagsize) + }, + func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) { + p := ptr.getInterfacePointer() + if p.isNil() { + return b, nil + } + v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct + telem := v.Type() + if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() { + return b, errOneofHasNil + } + e := fi.oneofElems[telem] + return e.marshaler(b, p, e.wiretag, deterministic) + } +} + +// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field. +func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int { + m, mu := ext.extensionsRead() + if m == nil { + return 0 + } + mu.Lock() + + n := 0 + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + n += ei.sizer(p, ei.tagsize) + } + mu.Unlock() + return n +} + +// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b. +func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { + m, mu := ext.extensionsRead() + if m == nil { + return b, nil + } + mu.Lock() + defer mu.Unlock() + + var err error + var nerr nonFatal + + // Fast-path for common cases: zero or one extensions. + // Don't bother sorting the keys. + if len(m) <= 1 { + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if !nerr.Merge(err) { + return b, err + } + } + return b, nerr.E + } + + // Sort the keys to provide a deterministic encoding. + // Not sure this is required, but the old code does it. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, k := range keys { + e := m[int32(k)] + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if !nerr.Merge(err) { + return b, err + } + } + return b, nerr.E +} + +// message set format is: +// message MessageSet { +// repeated group Item = 1 { +// required int32 type_id = 2; +// required string message = 3; +// }; +// } + +// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field +// in message set format (above). +func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int { + m, mu := ext.extensionsRead() + if m == nil { + return 0 + } + mu.Lock() + + n := 0 + for id, e := range m { + n += 2 // start group, end group. tag = 1 (size=1) + n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + siz := len(msgWithLen) + n += siz + 1 // message, tag = 3 (size=1) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + n += ei.sizer(p, 1) // message, tag = 3 (size=1) + } + mu.Unlock() + return n +} + +// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above) +// to the end of byte slice b. +func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { + m, mu := ext.extensionsRead() + if m == nil { + return b, nil + } + mu.Lock() + defer mu.Unlock() + + var err error + var nerr nonFatal + + // Fast-path for common cases: zero or one extensions. + // Don't bother sorting the keys. + if len(m) <= 1 { + for id, e := range m { + b = append(b, 1<<3|WireStartGroup) + b = append(b, 2<<3|WireVarint) + b = appendVarint(b, uint64(id)) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + b = append(b, 3<<3|WireBytes) + b = append(b, msgWithLen...) + b = append(b, 1<<3|WireEndGroup) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) + if !nerr.Merge(err) { + return b, err + } + b = append(b, 1<<3|WireEndGroup) + } + return b, nerr.E + } + + // Sort the keys to provide a deterministic encoding. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, id := range keys { + e := m[int32(id)] + b = append(b, 1<<3|WireStartGroup) + b = append(b, 2<<3|WireVarint) + b = appendVarint(b, uint64(id)) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + b = append(b, 3<<3|WireBytes) + b = append(b, msgWithLen...) + b = append(b, 1<<3|WireEndGroup) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) + b = append(b, 1<<3|WireEndGroup) + if !nerr.Merge(err) { + return b, err + } + } + return b, nerr.E +} + +// sizeV1Extensions computes the size of encoded data for a V1-API extension field. +func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int { + if m == nil { + return 0 + } + + n := 0 + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + n += ei.sizer(p, ei.tagsize) + } + return n +} + +// appendV1Extensions marshals a V1-API extension field to the end of byte slice b. +func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) { + if m == nil { + return b, nil + } + + // Sort the keys to provide a deterministic encoding. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + var err error + var nerr nonFatal + for _, k := range keys { + e := m[int32(k)] + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if !nerr.Merge(err) { + return b, err + } + } + return b, nerr.E +} + +// newMarshaler is the interface representing objects that can marshal themselves. +// +// This exists to support protoc-gen-go generated messages. +// The proto package will stop type-asserting to this interface in the future. +// +// DO NOT DEPEND ON THIS. +type newMarshaler interface { + XXX_Size() int + XXX_Marshal(b []byte, deterministic bool) ([]byte, error) +} + +// Size returns the encoded size of a protocol buffer message. +// This is the main entry point. +func Size(pb Message) int { + if m, ok := pb.(newMarshaler); ok { + return m.XXX_Size() + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + b, _ := m.Marshal() + return len(b) + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return 0 + } + var info InternalMessageInfo + return info.Size(pb) +} + +// Marshal takes a protocol buffer message +// and encodes it into the wire format, returning the data. +// This is the main entry point. +func Marshal(pb Message) ([]byte, error) { + if m, ok := pb.(newMarshaler); ok { + siz := m.XXX_Size() + b := make([]byte, 0, siz) + return m.XXX_Marshal(b, false) + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + return m.Marshal() + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return nil, ErrNil + } + var info InternalMessageInfo + siz := info.Size(pb) + b := make([]byte, 0, siz) + return info.Marshal(b, pb, false) +} + +// Marshal takes a protocol buffer message +// and encodes it into the wire format, writing the result to the +// Buffer. +// This is an alternative entry point. It is not necessary to use +// a Buffer for most applications. +func (p *Buffer) Marshal(pb Message) error { + var err error + if m, ok := pb.(newMarshaler); ok { + siz := m.XXX_Size() + p.grow(siz) // make sure buf has enough capacity + p.buf, err = m.XXX_Marshal(p.buf, p.deterministic) + return err + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + b, err := m.Marshal() + p.buf = append(p.buf, b...) + return err + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return ErrNil + } + var info InternalMessageInfo + siz := info.Size(pb) + p.grow(siz) // make sure buf has enough capacity + p.buf, err = info.Marshal(p.buf, pb, p.deterministic) + return err +} + +// grow grows the buffer's capacity, if necessary, to guarantee space for +// another n bytes. After grow(n), at least n bytes can be written to the +// buffer without another allocation. +func (p *Buffer) grow(n int) { + need := len(p.buf) + n + if need <= cap(p.buf) { + return + } + newCap := len(p.buf) * 2 + if newCap < need { + newCap = need + } + p.buf = append(make([]byte, 0, newCap), p.buf...) +} diff --git a/vendor/github.com/golang/protobuf/proto/table_merge.go b/vendor/github.com/golang/protobuf/proto/table_merge.go new file mode 100644 index 000000000..5525def6a --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/table_merge.go @@ -0,0 +1,654 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "reflect" + "strings" + "sync" + "sync/atomic" +) + +// Merge merges the src message into dst. +// This assumes that dst and src of the same type and are non-nil. +func (a *InternalMessageInfo) Merge(dst, src Message) { + mi := atomicLoadMergeInfo(&a.merge) + if mi == nil { + mi = getMergeInfo(reflect.TypeOf(dst).Elem()) + atomicStoreMergeInfo(&a.merge, mi) + } + mi.merge(toPointer(&dst), toPointer(&src)) +} + +type mergeInfo struct { + typ reflect.Type + + initialized int32 // 0: only typ is valid, 1: everything is valid + lock sync.Mutex + + fields []mergeFieldInfo + unrecognized field // Offset of XXX_unrecognized +} + +type mergeFieldInfo struct { + field field // Offset of field, guaranteed to be valid + + // isPointer reports whether the value in the field is a pointer. + // This is true for the following situations: + // * Pointer to struct + // * Pointer to basic type (proto2 only) + // * Slice (first value in slice header is a pointer) + // * String (first value in string header is a pointer) + isPointer bool + + // basicWidth reports the width of the field assuming that it is directly + // embedded in the struct (as is the case for basic types in proto3). + // The possible values are: + // 0: invalid + // 1: bool + // 4: int32, uint32, float32 + // 8: int64, uint64, float64 + basicWidth int + + // Where dst and src are pointers to the types being merged. + merge func(dst, src pointer) +} + +var ( + mergeInfoMap = map[reflect.Type]*mergeInfo{} + mergeInfoLock sync.Mutex +) + +func getMergeInfo(t reflect.Type) *mergeInfo { + mergeInfoLock.Lock() + defer mergeInfoLock.Unlock() + mi := mergeInfoMap[t] + if mi == nil { + mi = &mergeInfo{typ: t} + mergeInfoMap[t] = mi + } + return mi +} + +// merge merges src into dst assuming they are both of type *mi.typ. +func (mi *mergeInfo) merge(dst, src pointer) { + if dst.isNil() { + panic("proto: nil destination") + } + if src.isNil() { + return // Nothing to do. + } + + if atomic.LoadInt32(&mi.initialized) == 0 { + mi.computeMergeInfo() + } + + for _, fi := range mi.fields { + sfp := src.offset(fi.field) + + // As an optimization, we can avoid the merge function call cost + // if we know for sure that the source will have no effect + // by checking if it is the zero value. + if unsafeAllowed { + if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string + continue + } + if fi.basicWidth > 0 { + switch { + case fi.basicWidth == 1 && !*sfp.toBool(): + continue + case fi.basicWidth == 4 && *sfp.toUint32() == 0: + continue + case fi.basicWidth == 8 && *sfp.toUint64() == 0: + continue + } + } + } + + dfp := dst.offset(fi.field) + fi.merge(dfp, sfp) + } + + // TODO: Make this faster? + out := dst.asPointerTo(mi.typ).Elem() + in := src.asPointerTo(mi.typ).Elem() + if emIn, err := extendable(in.Addr().Interface()); err == nil { + emOut, _ := extendable(out.Addr().Interface()) + mIn, muIn := emIn.extensionsRead() + if mIn != nil { + mOut := emOut.extensionsWrite() + muIn.Lock() + mergeExtension(mOut, mIn) + muIn.Unlock() + } + } + + if mi.unrecognized.IsValid() { + if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 { + *dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...) + } + } +} + +func (mi *mergeInfo) computeMergeInfo() { + mi.lock.Lock() + defer mi.lock.Unlock() + if mi.initialized != 0 { + return + } + t := mi.typ + n := t.NumField() + + props := GetProperties(t) + for i := 0; i < n; i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + + mfi := mergeFieldInfo{field: toField(&f)} + tf := f.Type + + // As an optimization, we can avoid the merge function call cost + // if we know for sure that the source will have no effect + // by checking if it is the zero value. + if unsafeAllowed { + switch tf.Kind() { + case reflect.Ptr, reflect.Slice, reflect.String: + // As a special case, we assume slices and strings are pointers + // since we know that the first field in the SliceSlice or + // StringHeader is a data pointer. + mfi.isPointer = true + case reflect.Bool: + mfi.basicWidth = 1 + case reflect.Int32, reflect.Uint32, reflect.Float32: + mfi.basicWidth = 4 + case reflect.Int64, reflect.Uint64, reflect.Float64: + mfi.basicWidth = 8 + } + } + + // Unwrap tf to get at its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic("both pointer and slice for basic type in " + tf.Name()) + } + + switch tf.Kind() { + case reflect.Int32: + switch { + case isSlice: // E.g., []int32 + mfi.merge = func(dst, src pointer) { + // NOTE: toInt32Slice is not defined (see pointer_reflect.go). + /* + sfsp := src.toInt32Slice() + if *sfsp != nil { + dfsp := dst.toInt32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []int64{} + } + } + */ + sfs := src.getInt32Slice() + if sfs != nil { + dfs := dst.getInt32Slice() + dfs = append(dfs, sfs...) + if dfs == nil { + dfs = []int32{} + } + dst.setInt32Slice(dfs) + } + } + case isPointer: // E.g., *int32 + mfi.merge = func(dst, src pointer) { + // NOTE: toInt32Ptr is not defined (see pointer_reflect.go). + /* + sfpp := src.toInt32Ptr() + if *sfpp != nil { + dfpp := dst.toInt32Ptr() + if *dfpp == nil { + *dfpp = Int32(**sfpp) + } else { + **dfpp = **sfpp + } + } + */ + sfp := src.getInt32Ptr() + if sfp != nil { + dfp := dst.getInt32Ptr() + if dfp == nil { + dst.setInt32Ptr(*sfp) + } else { + *dfp = *sfp + } + } + } + default: // E.g., int32 + mfi.merge = func(dst, src pointer) { + if v := *src.toInt32(); v != 0 { + *dst.toInt32() = v + } + } + } + case reflect.Int64: + switch { + case isSlice: // E.g., []int64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toInt64Slice() + if *sfsp != nil { + dfsp := dst.toInt64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []int64{} + } + } + } + case isPointer: // E.g., *int64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toInt64Ptr() + if *sfpp != nil { + dfpp := dst.toInt64Ptr() + if *dfpp == nil { + *dfpp = Int64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., int64 + mfi.merge = func(dst, src pointer) { + if v := *src.toInt64(); v != 0 { + *dst.toInt64() = v + } + } + } + case reflect.Uint32: + switch { + case isSlice: // E.g., []uint32 + mfi.merge = func(dst, src pointer) { + sfsp := src.toUint32Slice() + if *sfsp != nil { + dfsp := dst.toUint32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []uint32{} + } + } + } + case isPointer: // E.g., *uint32 + mfi.merge = func(dst, src pointer) { + sfpp := src.toUint32Ptr() + if *sfpp != nil { + dfpp := dst.toUint32Ptr() + if *dfpp == nil { + *dfpp = Uint32(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., uint32 + mfi.merge = func(dst, src pointer) { + if v := *src.toUint32(); v != 0 { + *dst.toUint32() = v + } + } + } + case reflect.Uint64: + switch { + case isSlice: // E.g., []uint64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toUint64Slice() + if *sfsp != nil { + dfsp := dst.toUint64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []uint64{} + } + } + } + case isPointer: // E.g., *uint64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toUint64Ptr() + if *sfpp != nil { + dfpp := dst.toUint64Ptr() + if *dfpp == nil { + *dfpp = Uint64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., uint64 + mfi.merge = func(dst, src pointer) { + if v := *src.toUint64(); v != 0 { + *dst.toUint64() = v + } + } + } + case reflect.Float32: + switch { + case isSlice: // E.g., []float32 + mfi.merge = func(dst, src pointer) { + sfsp := src.toFloat32Slice() + if *sfsp != nil { + dfsp := dst.toFloat32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []float32{} + } + } + } + case isPointer: // E.g., *float32 + mfi.merge = func(dst, src pointer) { + sfpp := src.toFloat32Ptr() + if *sfpp != nil { + dfpp := dst.toFloat32Ptr() + if *dfpp == nil { + *dfpp = Float32(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., float32 + mfi.merge = func(dst, src pointer) { + if v := *src.toFloat32(); v != 0 { + *dst.toFloat32() = v + } + } + } + case reflect.Float64: + switch { + case isSlice: // E.g., []float64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toFloat64Slice() + if *sfsp != nil { + dfsp := dst.toFloat64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []float64{} + } + } + } + case isPointer: // E.g., *float64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toFloat64Ptr() + if *sfpp != nil { + dfpp := dst.toFloat64Ptr() + if *dfpp == nil { + *dfpp = Float64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., float64 + mfi.merge = func(dst, src pointer) { + if v := *src.toFloat64(); v != 0 { + *dst.toFloat64() = v + } + } + } + case reflect.Bool: + switch { + case isSlice: // E.g., []bool + mfi.merge = func(dst, src pointer) { + sfsp := src.toBoolSlice() + if *sfsp != nil { + dfsp := dst.toBoolSlice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []bool{} + } + } + } + case isPointer: // E.g., *bool + mfi.merge = func(dst, src pointer) { + sfpp := src.toBoolPtr() + if *sfpp != nil { + dfpp := dst.toBoolPtr() + if *dfpp == nil { + *dfpp = Bool(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., bool + mfi.merge = func(dst, src pointer) { + if v := *src.toBool(); v { + *dst.toBool() = v + } + } + } + case reflect.String: + switch { + case isSlice: // E.g., []string + mfi.merge = func(dst, src pointer) { + sfsp := src.toStringSlice() + if *sfsp != nil { + dfsp := dst.toStringSlice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []string{} + } + } + } + case isPointer: // E.g., *string + mfi.merge = func(dst, src pointer) { + sfpp := src.toStringPtr() + if *sfpp != nil { + dfpp := dst.toStringPtr() + if *dfpp == nil { + *dfpp = String(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., string + mfi.merge = func(dst, src pointer) { + if v := *src.toString(); v != "" { + *dst.toString() = v + } + } + } + case reflect.Slice: + isProto3 := props.Prop[i].proto3 + switch { + case isPointer: + panic("bad pointer in byte slice case in " + tf.Name()) + case tf.Elem().Kind() != reflect.Uint8: + panic("bad element kind in byte slice case in " + tf.Name()) + case isSlice: // E.g., [][]byte + mfi.merge = func(dst, src pointer) { + sbsp := src.toBytesSlice() + if *sbsp != nil { + dbsp := dst.toBytesSlice() + for _, sb := range *sbsp { + if sb == nil { + *dbsp = append(*dbsp, nil) + } else { + *dbsp = append(*dbsp, append([]byte{}, sb...)) + } + } + if *dbsp == nil { + *dbsp = [][]byte{} + } + } + } + default: // E.g., []byte + mfi.merge = func(dst, src pointer) { + sbp := src.toBytes() + if *sbp != nil { + dbp := dst.toBytes() + if !isProto3 || len(*sbp) > 0 { + *dbp = append([]byte{}, *sbp...) + } + } + } + } + case reflect.Struct: + switch { + case !isPointer: + panic(fmt.Sprintf("message field %s without pointer", tf)) + case isSlice: // E.g., []*pb.T + mi := getMergeInfo(tf) + mfi.merge = func(dst, src pointer) { + sps := src.getPointerSlice() + if sps != nil { + dps := dst.getPointerSlice() + for _, sp := range sps { + var dp pointer + if !sp.isNil() { + dp = valToPointer(reflect.New(tf)) + mi.merge(dp, sp) + } + dps = append(dps, dp) + } + if dps == nil { + dps = []pointer{} + } + dst.setPointerSlice(dps) + } + } + default: // E.g., *pb.T + mi := getMergeInfo(tf) + mfi.merge = func(dst, src pointer) { + sp := src.getPointer() + if !sp.isNil() { + dp := dst.getPointer() + if dp.isNil() { + dp = valToPointer(reflect.New(tf)) + dst.setPointer(dp) + } + mi.merge(dp, sp) + } + } + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic("bad pointer or slice in map case in " + tf.Name()) + default: // E.g., map[K]V + mfi.merge = func(dst, src pointer) { + sm := src.asPointerTo(tf).Elem() + if sm.Len() == 0 { + return + } + dm := dst.asPointerTo(tf).Elem() + if dm.IsNil() { + dm.Set(reflect.MakeMap(tf)) + } + + switch tf.Elem().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + val = reflect.ValueOf(Clone(val.Interface().(Message))) + dm.SetMapIndex(key, val) + } + case reflect.Slice: // E.g. Bytes type (e.g., []byte) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) + dm.SetMapIndex(key, val) + } + default: // Basic type (e.g., string) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + dm.SetMapIndex(key, val) + } + } + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic("bad pointer or slice in interface case in " + tf.Name()) + default: // E.g., interface{} + // TODO: Make this faster? + mfi.merge = func(dst, src pointer) { + su := src.asPointerTo(tf).Elem() + if !su.IsNil() { + du := dst.asPointerTo(tf).Elem() + typ := su.Elem().Type() + if du.IsNil() || du.Elem().Type() != typ { + du.Set(reflect.New(typ.Elem())) // Initialize interface if empty + } + sv := su.Elem().Elem().Field(0) + if sv.Kind() == reflect.Ptr && sv.IsNil() { + return + } + dv := du.Elem().Elem().Field(0) + if dv.Kind() == reflect.Ptr && dv.IsNil() { + dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty + } + switch sv.Type().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + Merge(dv.Interface().(Message), sv.Interface().(Message)) + case reflect.Slice: // E.g. Bytes type (e.g., []byte) + dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...))) + default: // Basic type (e.g., string) + dv.Set(sv) + } + } + } + } + default: + panic(fmt.Sprintf("merger not found for type:%s", tf)) + } + mi.fields = append(mi.fields, mfi) + } + + mi.unrecognized = invalidField + if f, ok := t.FieldByName("XXX_unrecognized"); ok { + if f.Type != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + mi.unrecognized = toField(&f) + } + + atomic.StoreInt32(&mi.initialized, 1) +} diff --git a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go new file mode 100644 index 000000000..ebf1caa56 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go @@ -0,0 +1,2051 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "errors" + "fmt" + "io" + "math" + "reflect" + "strconv" + "strings" + "sync" + "sync/atomic" + "unicode/utf8" +) + +// Unmarshal is the entry point from the generated .pb.go files. +// This function is not intended to be used by non-generated code. +// This function is not subject to any compatibility guarantee. +// msg contains a pointer to a protocol buffer struct. +// b is the data to be unmarshaled into the protocol buffer. +// a is a pointer to a place to store cached unmarshal information. +func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error { + // Load the unmarshal information for this message type. + // The atomic load ensures memory consistency. + u := atomicLoadUnmarshalInfo(&a.unmarshal) + if u == nil { + // Slow path: find unmarshal info for msg, update a with it. + u = getUnmarshalInfo(reflect.TypeOf(msg).Elem()) + atomicStoreUnmarshalInfo(&a.unmarshal, u) + } + // Then do the unmarshaling. + err := u.unmarshal(toPointer(&msg), b) + return err +} + +type unmarshalInfo struct { + typ reflect.Type // type of the protobuf struct + + // 0 = only typ field is initialized + // 1 = completely initialized + initialized int32 + lock sync.Mutex // prevents double initialization + dense []unmarshalFieldInfo // fields indexed by tag # + sparse map[uint64]unmarshalFieldInfo // fields indexed by tag # + reqFields []string // names of required fields + reqMask uint64 // 1< 0 { + // Read tag and wire type. + // Special case 1 and 2 byte varints. + var x uint64 + if b[0] < 128 { + x = uint64(b[0]) + b = b[1:] + } else if len(b) >= 2 && b[1] < 128 { + x = uint64(b[0]&0x7f) + uint64(b[1])<<7 + b = b[2:] + } else { + var n int + x, n = decodeVarint(b) + if n == 0 { + return io.ErrUnexpectedEOF + } + b = b[n:] + } + tag := x >> 3 + wire := int(x) & 7 + + // Dispatch on the tag to one of the unmarshal* functions below. + var f unmarshalFieldInfo + if tag < uint64(len(u.dense)) { + f = u.dense[tag] + } else { + f = u.sparse[tag] + } + if fn := f.unmarshal; fn != nil { + var err error + b, err = fn(b, m.offset(f.field), wire) + if err == nil { + reqMask |= f.reqMask + continue + } + if r, ok := err.(*RequiredNotSetError); ok { + // Remember this error, but keep parsing. We need to produce + // a full parse even if a required field is missing. + if errLater == nil { + errLater = r + } + reqMask |= f.reqMask + continue + } + if err != errInternalBadWireType { + if err == errInvalidUTF8 { + if errLater == nil { + fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name + errLater = &invalidUTF8Error{fullName} + } + continue + } + return err + } + // Fragments with bad wire type are treated as unknown fields. + } + + // Unknown tag. + if !u.unrecognized.IsValid() { + // Don't keep unrecognized data; just skip it. + var err error + b, err = skipField(b, wire) + if err != nil { + return err + } + continue + } + // Keep unrecognized data around. + // maybe in extensions, maybe in the unrecognized field. + z := m.offset(u.unrecognized).toBytes() + var emap map[int32]Extension + var e Extension + for _, r := range u.extensionRanges { + if uint64(r.Start) <= tag && tag <= uint64(r.End) { + if u.extensions.IsValid() { + mp := m.offset(u.extensions).toExtensions() + emap = mp.extensionsWrite() + e = emap[int32(tag)] + z = &e.enc + break + } + if u.oldExtensions.IsValid() { + p := m.offset(u.oldExtensions).toOldExtensions() + emap = *p + if emap == nil { + emap = map[int32]Extension{} + *p = emap + } + e = emap[int32(tag)] + z = &e.enc + break + } + panic("no extensions field available") + } + } + + // Use wire type to skip data. + var err error + b0 := b + b, err = skipField(b, wire) + if err != nil { + return err + } + *z = encodeVarint(*z, tag<<3|uint64(wire)) + *z = append(*z, b0[:len(b0)-len(b)]...) + + if emap != nil { + emap[int32(tag)] = e + } + } + if reqMask != u.reqMask && errLater == nil { + // A required field of this message is missing. + for _, n := range u.reqFields { + if reqMask&1 == 0 { + errLater = &RequiredNotSetError{n} + } + reqMask >>= 1 + } + } + return errLater +} + +// computeUnmarshalInfo fills in u with information for use +// in unmarshaling protocol buffers of type u.typ. +func (u *unmarshalInfo) computeUnmarshalInfo() { + u.lock.Lock() + defer u.lock.Unlock() + if u.initialized != 0 { + return + } + t := u.typ + n := t.NumField() + + // Set up the "not found" value for the unrecognized byte buffer. + // This is the default for proto3. + u.unrecognized = invalidField + u.extensions = invalidField + u.oldExtensions = invalidField + + // List of the generated type and offset for each oneof field. + type oneofField struct { + ityp reflect.Type // interface type of oneof field + field field // offset in containing message + } + var oneofFields []oneofField + + for i := 0; i < n; i++ { + f := t.Field(i) + if f.Name == "XXX_unrecognized" { + // The byte slice used to hold unrecognized input is special. + if f.Type != reflect.TypeOf(([]byte)(nil)) { + panic("bad type for XXX_unrecognized field: " + f.Type.Name()) + } + u.unrecognized = toField(&f) + continue + } + if f.Name == "XXX_InternalExtensions" { + // Ditto here. + if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) { + panic("bad type for XXX_InternalExtensions field: " + f.Type.Name()) + } + u.extensions = toField(&f) + if f.Tag.Get("protobuf_messageset") == "1" { + u.isMessageSet = true + } + continue + } + if f.Name == "XXX_extensions" { + // An older form of the extensions field. + if f.Type != reflect.TypeOf((map[int32]Extension)(nil)) { + panic("bad type for XXX_extensions field: " + f.Type.Name()) + } + u.oldExtensions = toField(&f) + continue + } + if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" { + continue + } + + oneof := f.Tag.Get("protobuf_oneof") + if oneof != "" { + oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)}) + // The rest of oneof processing happens below. + continue + } + + tags := f.Tag.Get("protobuf") + tagArray := strings.Split(tags, ",") + if len(tagArray) < 2 { + panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags) + } + tag, err := strconv.Atoi(tagArray[1]) + if err != nil { + panic("protobuf tag field not an integer: " + tagArray[1]) + } + + name := "" + for _, tag := range tagArray[3:] { + if strings.HasPrefix(tag, "name=") { + name = tag[5:] + } + } + + // Extract unmarshaling function from the field (its type and tags). + unmarshal := fieldUnmarshaler(&f) + + // Required field? + var reqMask uint64 + if tagArray[2] == "req" { + bit := len(u.reqFields) + u.reqFields = append(u.reqFields, name) + reqMask = uint64(1) << uint(bit) + // TODO: if we have more than 64 required fields, we end up + // not verifying that all required fields are present. + // Fix this, perhaps using a count of required fields? + } + + // Store the info in the correct slot in the message. + u.setTag(tag, toField(&f), unmarshal, reqMask, name) + } + + // Find any types associated with oneof fields. + // TODO: XXX_OneofFuncs returns more info than we need. Get rid of some of it? + fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("XXX_OneofFuncs") + if fn.IsValid() { + res := fn.Call(nil)[3] // last return value from XXX_OneofFuncs: []interface{} + for i := res.Len() - 1; i >= 0; i-- { + v := res.Index(i) // interface{} + tptr := reflect.ValueOf(v.Interface()).Type() // *Msg_X + typ := tptr.Elem() // Msg_X + + f := typ.Field(0) // oneof implementers have one field + baseUnmarshal := fieldUnmarshaler(&f) + tags := strings.Split(f.Tag.Get("protobuf"), ",") + fieldNum, err := strconv.Atoi(tags[1]) + if err != nil { + panic("protobuf tag field not an integer: " + tags[1]) + } + var name string + for _, tag := range tags { + if strings.HasPrefix(tag, "name=") { + name = strings.TrimPrefix(tag, "name=") + break + } + } + + // Find the oneof field that this struct implements. + // Might take O(n^2) to process all of the oneofs, but who cares. + for _, of := range oneofFields { + if tptr.Implements(of.ityp) { + // We have found the corresponding interface for this struct. + // That lets us know where this struct should be stored + // when we encounter it during unmarshaling. + unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal) + u.setTag(fieldNum, of.field, unmarshal, 0, name) + } + } + } + } + + // Get extension ranges, if any. + fn = reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray") + if fn.IsValid() { + if !u.extensions.IsValid() && !u.oldExtensions.IsValid() { + panic("a message with extensions, but no extensions field in " + t.Name()) + } + u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange) + } + + // Explicitly disallow tag 0. This will ensure we flag an error + // when decoding a buffer of all zeros. Without this code, we + // would decode and skip an all-zero buffer of even length. + // [0 0] is [tag=0/wiretype=varint varint-encoded-0]. + u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) { + return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w) + }, 0, "") + + // Set mask for required field check. + u.reqMask = uint64(1)<= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here? + for len(u.dense) <= tag { + u.dense = append(u.dense, unmarshalFieldInfo{}) + } + u.dense[tag] = i + return + } + if u.sparse == nil { + u.sparse = map[uint64]unmarshalFieldInfo{} + } + u.sparse[uint64(tag)] = i +} + +// fieldUnmarshaler returns an unmarshaler for the given field. +func fieldUnmarshaler(f *reflect.StructField) unmarshaler { + if f.Type.Kind() == reflect.Map { + return makeUnmarshalMap(f) + } + return typeUnmarshaler(f.Type, f.Tag.Get("protobuf")) +} + +// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair. +func typeUnmarshaler(t reflect.Type, tags string) unmarshaler { + tagArray := strings.Split(tags, ",") + encoding := tagArray[0] + name := "unknown" + proto3 := false + validateUTF8 := true + for _, tag := range tagArray[3:] { + if strings.HasPrefix(tag, "name=") { + name = tag[5:] + } + if tag == "proto3" { + proto3 = true + } + } + validateUTF8 = validateUTF8 && proto3 + + // Figure out packaging (pointer, slice, or both) + slice := false + pointer := false + if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { + slice = true + t = t.Elem() + } + if t.Kind() == reflect.Ptr { + pointer = true + t = t.Elem() + } + + // We'll never have both pointer and slice for basic types. + if pointer && slice && t.Kind() != reflect.Struct { + panic("both pointer and slice for basic type in " + t.Name()) + } + + switch t.Kind() { + case reflect.Bool: + if pointer { + return unmarshalBoolPtr + } + if slice { + return unmarshalBoolSlice + } + return unmarshalBoolValue + case reflect.Int32: + switch encoding { + case "fixed32": + if pointer { + return unmarshalFixedS32Ptr + } + if slice { + return unmarshalFixedS32Slice + } + return unmarshalFixedS32Value + case "varint": + // this could be int32 or enum + if pointer { + return unmarshalInt32Ptr + } + if slice { + return unmarshalInt32Slice + } + return unmarshalInt32Value + case "zigzag32": + if pointer { + return unmarshalSint32Ptr + } + if slice { + return unmarshalSint32Slice + } + return unmarshalSint32Value + } + case reflect.Int64: + switch encoding { + case "fixed64": + if pointer { + return unmarshalFixedS64Ptr + } + if slice { + return unmarshalFixedS64Slice + } + return unmarshalFixedS64Value + case "varint": + if pointer { + return unmarshalInt64Ptr + } + if slice { + return unmarshalInt64Slice + } + return unmarshalInt64Value + case "zigzag64": + if pointer { + return unmarshalSint64Ptr + } + if slice { + return unmarshalSint64Slice + } + return unmarshalSint64Value + } + case reflect.Uint32: + switch encoding { + case "fixed32": + if pointer { + return unmarshalFixed32Ptr + } + if slice { + return unmarshalFixed32Slice + } + return unmarshalFixed32Value + case "varint": + if pointer { + return unmarshalUint32Ptr + } + if slice { + return unmarshalUint32Slice + } + return unmarshalUint32Value + } + case reflect.Uint64: + switch encoding { + case "fixed64": + if pointer { + return unmarshalFixed64Ptr + } + if slice { + return unmarshalFixed64Slice + } + return unmarshalFixed64Value + case "varint": + if pointer { + return unmarshalUint64Ptr + } + if slice { + return unmarshalUint64Slice + } + return unmarshalUint64Value + } + case reflect.Float32: + if pointer { + return unmarshalFloat32Ptr + } + if slice { + return unmarshalFloat32Slice + } + return unmarshalFloat32Value + case reflect.Float64: + if pointer { + return unmarshalFloat64Ptr + } + if slice { + return unmarshalFloat64Slice + } + return unmarshalFloat64Value + case reflect.Map: + panic("map type in typeUnmarshaler in " + t.Name()) + case reflect.Slice: + if pointer { + panic("bad pointer in slice case in " + t.Name()) + } + if slice { + return unmarshalBytesSlice + } + return unmarshalBytesValue + case reflect.String: + if validateUTF8 { + if pointer { + return unmarshalUTF8StringPtr + } + if slice { + return unmarshalUTF8StringSlice + } + return unmarshalUTF8StringValue + } + if pointer { + return unmarshalStringPtr + } + if slice { + return unmarshalStringSlice + } + return unmarshalStringValue + case reflect.Struct: + // message or group field + if !pointer { + panic(fmt.Sprintf("message/group field %s:%s without pointer", t, encoding)) + } + switch encoding { + case "bytes": + if slice { + return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name) + } + return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name) + case "group": + if slice { + return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name) + } + return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name) + } + } + panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding)) +} + +// Below are all the unmarshalers for individual fields of various types. + +func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + *f.toInt64() = v + return b, nil +} + +func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + *f.toInt64Ptr() = &v + return b, nil +} + +func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + s := f.toInt64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + s := f.toInt64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + *f.toInt64() = v + return b, nil +} + +func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + *f.toInt64Ptr() = &v + return b, nil +} + +func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + s := f.toInt64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + s := f.toInt64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + *f.toUint64() = v + return b, nil +} + +func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + *f.toUint64Ptr() = &v + return b, nil +} + +func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + s := f.toUint64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + s := f.toUint64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + *f.toInt32() = v + return b, nil +} + +func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.setInt32Ptr(v) + return b, nil +} + +func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.appendInt32Slice(v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.appendInt32Slice(v) + return b, nil +} + +func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + *f.toInt32() = v + return b, nil +} + +func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.setInt32Ptr(v) + return b, nil +} + +func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.appendInt32Slice(v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.appendInt32Slice(v) + return b, nil +} + +func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + *f.toUint32() = v + return b, nil +} + +func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + *f.toUint32Ptr() = &v + return b, nil +} + +func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + s := f.toUint32Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + s := f.toUint32Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + *f.toUint64() = v + return b[8:], nil +} + +func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + *f.toUint64Ptr() = &v + return b[8:], nil +} + +func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + s := f.toUint64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + s := f.toUint64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + *f.toInt64() = v + return b[8:], nil +} + +func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + *f.toInt64Ptr() = &v + return b[8:], nil +} + +func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + s := f.toInt64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + s := f.toInt64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + *f.toUint32() = v + return b[4:], nil +} + +func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + *f.toUint32Ptr() = &v + return b[4:], nil +} + +func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + s := f.toUint32Slice() + *s = append(*s, v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + s := f.toUint32Slice() + *s = append(*s, v) + return b[4:], nil +} + +func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + *f.toInt32() = v + return b[4:], nil +} + +func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.setInt32Ptr(v) + return b[4:], nil +} + +func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.appendInt32Slice(v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.appendInt32Slice(v) + return b[4:], nil +} + +func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + // Note: any length varint is allowed, even though any sane + // encoder will use one byte. + // See https://github.com/golang/protobuf/issues/76 + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + // TODO: check if x>1? Tests seem to indicate no. + v := x != 0 + *f.toBool() = v + return b[n:], nil +} + +func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + *f.toBoolPtr() = &v + return b[n:], nil +} + +func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + s := f.toBoolSlice() + *s = append(*s, v) + b = b[n:] + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + s := f.toBoolSlice() + *s = append(*s, v) + return b[n:], nil +} + +func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + *f.toFloat64() = v + return b[8:], nil +} + +func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + *f.toFloat64Ptr() = &v + return b[8:], nil +} + +func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + s := f.toFloat64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + s := f.toFloat64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + *f.toFloat32() = v + return b[4:], nil +} + +func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + *f.toFloat32Ptr() = &v + return b[4:], nil +} + +func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + s := f.toFloat32Slice() + *s = append(*s, v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + s := f.toFloat32Slice() + *s = append(*s, v) + return b[4:], nil +} + +func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toString() = v + return b[x:], nil +} + +func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toStringPtr() = &v + return b[x:], nil +} + +func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + s := f.toStringSlice() + *s = append(*s, v) + return b[x:], nil +} + +func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toString() = v + if !utf8.ValidString(v) { + return b[x:], errInvalidUTF8 + } + return b[x:], nil +} + +func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toStringPtr() = &v + if !utf8.ValidString(v) { + return b[x:], errInvalidUTF8 + } + return b[x:], nil +} + +func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + s := f.toStringSlice() + *s = append(*s, v) + if !utf8.ValidString(v) { + return b[x:], errInvalidUTF8 + } + return b[x:], nil +} + +var emptyBuf [0]byte + +func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + // The use of append here is a trick which avoids the zeroing + // that would be required if we used a make/copy pair. + // We append to emptyBuf instead of nil because we want + // a non-nil result even when the length is 0. + v := append(emptyBuf[:], b[:x]...) + *f.toBytes() = v + return b[x:], nil +} + +func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := append(emptyBuf[:], b[:x]...) + s := f.toBytesSlice() + *s = append(*s, v) + return b[x:], nil +} + +func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + // First read the message field to see if something is there. + // The semantics of multiple submessages are weird. Instead of + // the last one winning (as it is for all other fields), multiple + // submessages are merged. + v := f.getPointer() + if v.isNil() { + v = valToPointer(reflect.New(sub.typ)) + f.setPointer(v) + } + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + return b[x:], err + } +} + +func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := valToPointer(reflect.New(sub.typ)) + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + f.appendPointer(v) + return b[x:], err + } +} + +func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireStartGroup { + return b, errInternalBadWireType + } + x, y := findEndGroup(b) + if x < 0 { + return nil, io.ErrUnexpectedEOF + } + v := f.getPointer() + if v.isNil() { + v = valToPointer(reflect.New(sub.typ)) + f.setPointer(v) + } + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + return b[y:], err + } +} + +func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireStartGroup { + return b, errInternalBadWireType + } + x, y := findEndGroup(b) + if x < 0 { + return nil, io.ErrUnexpectedEOF + } + v := valToPointer(reflect.New(sub.typ)) + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + f.appendPointer(v) + return b[y:], err + } +} + +func makeUnmarshalMap(f *reflect.StructField) unmarshaler { + t := f.Type + kt := t.Key() + vt := t.Elem() + unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key")) + unmarshalVal := typeUnmarshaler(vt, f.Tag.Get("protobuf_val")) + return func(b []byte, f pointer, w int) ([]byte, error) { + // The map entry is a submessage. Figure out how big it is. + if w != WireBytes { + return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes) + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + r := b[x:] // unused data to return + b = b[:x] // data for map entry + + // Note: we could use #keys * #values ~= 200 functions + // to do map decoding without reflection. Probably not worth it. + // Maps will be somewhat slow. Oh well. + + // Read key and value from data. + var nerr nonFatal + k := reflect.New(kt) + v := reflect.New(vt) + for len(b) > 0 { + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + wire := int(x) & 7 + b = b[n:] + + var err error + switch x >> 3 { + case 1: + b, err = unmarshalKey(b, valToPointer(k), wire) + case 2: + b, err = unmarshalVal(b, valToPointer(v), wire) + default: + err = errInternalBadWireType // skip unknown tag + } + + if nerr.Merge(err) { + continue + } + if err != errInternalBadWireType { + return nil, err + } + + // Skip past unknown fields. + b, err = skipField(b, wire) + if err != nil { + return nil, err + } + } + + // Get map, allocate if needed. + m := f.asPointerTo(t).Elem() // an addressable map[K]T + if m.IsNil() { + m.Set(reflect.MakeMap(t)) + } + + // Insert into map. + m.SetMapIndex(k.Elem(), v.Elem()) + + return r, nerr.E + } +} + +// makeUnmarshalOneof makes an unmarshaler for oneof fields. +// for: +// message Msg { +// oneof F { +// int64 X = 1; +// float64 Y = 2; +// } +// } +// typ is the type of the concrete entry for a oneof case (e.g. Msg_X). +// ityp is the interface type of the oneof field (e.g. isMsg_F). +// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64). +// Note that this function will be called once for each case in the oneof. +func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler { + sf := typ.Field(0) + field0 := toField(&sf) + return func(b []byte, f pointer, w int) ([]byte, error) { + // Allocate holder for value. + v := reflect.New(typ) + + // Unmarshal data into holder. + // We unmarshal into the first field of the holder object. + var err error + var nerr nonFatal + b, err = unmarshal(b, valToPointer(v).offset(field0), w) + if !nerr.Merge(err) { + return nil, err + } + + // Write pointer to holder into target field. + f.asPointerTo(ityp).Elem().Set(v) + + return b, nerr.E + } +} + +// Error used by decode internally. +var errInternalBadWireType = errors.New("proto: internal error: bad wiretype") + +// skipField skips past a field of type wire and returns the remaining bytes. +func skipField(b []byte, wire int) ([]byte, error) { + switch wire { + case WireVarint: + _, k := decodeVarint(b) + if k == 0 { + return b, io.ErrUnexpectedEOF + } + b = b[k:] + case WireFixed32: + if len(b) < 4 { + return b, io.ErrUnexpectedEOF + } + b = b[4:] + case WireFixed64: + if len(b) < 8 { + return b, io.ErrUnexpectedEOF + } + b = b[8:] + case WireBytes: + m, k := decodeVarint(b) + if k == 0 || uint64(len(b)-k) < m { + return b, io.ErrUnexpectedEOF + } + b = b[uint64(k)+m:] + case WireStartGroup: + _, i := findEndGroup(b) + if i == -1 { + return b, io.ErrUnexpectedEOF + } + b = b[i:] + default: + return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire) + } + return b, nil +} + +// findEndGroup finds the index of the next EndGroup tag. +// Groups may be nested, so the "next" EndGroup tag is the first +// unpaired EndGroup. +// findEndGroup returns the indexes of the start and end of the EndGroup tag. +// Returns (-1,-1) if it can't find one. +func findEndGroup(b []byte) (int, int) { + depth := 1 + i := 0 + for { + x, n := decodeVarint(b[i:]) + if n == 0 { + return -1, -1 + } + j := i + i += n + switch x & 7 { + case WireVarint: + _, k := decodeVarint(b[i:]) + if k == 0 { + return -1, -1 + } + i += k + case WireFixed32: + if len(b)-4 < i { + return -1, -1 + } + i += 4 + case WireFixed64: + if len(b)-8 < i { + return -1, -1 + } + i += 8 + case WireBytes: + m, k := decodeVarint(b[i:]) + if k == 0 { + return -1, -1 + } + i += k + if uint64(len(b)-i) < m { + return -1, -1 + } + i += int(m) + case WireStartGroup: + depth++ + case WireEndGroup: + depth-- + if depth == 0 { + return j, i + } + default: + return -1, -1 + } + } +} + +// encodeVarint appends a varint-encoded integer to b and returns the result. +func encodeVarint(b []byte, x uint64) []byte { + for x >= 1<<7 { + b = append(b, byte(x&0x7f|0x80)) + x >>= 7 + } + return append(b, byte(x)) +} + +// decodeVarint reads a varint-encoded integer from b. +// Returns the decoded integer and the number of bytes read. +// If there is an error, it returns 0,0. +func decodeVarint(b []byte) (uint64, int) { + var x, y uint64 + if len(b) <= 0 { + goto bad + } + x = uint64(b[0]) + if x < 0x80 { + return x, 1 + } + x -= 0x80 + + if len(b) <= 1 { + goto bad + } + y = uint64(b[1]) + x += y << 7 + if y < 0x80 { + return x, 2 + } + x -= 0x80 << 7 + + if len(b) <= 2 { + goto bad + } + y = uint64(b[2]) + x += y << 14 + if y < 0x80 { + return x, 3 + } + x -= 0x80 << 14 + + if len(b) <= 3 { + goto bad + } + y = uint64(b[3]) + x += y << 21 + if y < 0x80 { + return x, 4 + } + x -= 0x80 << 21 + + if len(b) <= 4 { + goto bad + } + y = uint64(b[4]) + x += y << 28 + if y < 0x80 { + return x, 5 + } + x -= 0x80 << 28 + + if len(b) <= 5 { + goto bad + } + y = uint64(b[5]) + x += y << 35 + if y < 0x80 { + return x, 6 + } + x -= 0x80 << 35 + + if len(b) <= 6 { + goto bad + } + y = uint64(b[6]) + x += y << 42 + if y < 0x80 { + return x, 7 + } + x -= 0x80 << 42 + + if len(b) <= 7 { + goto bad + } + y = uint64(b[7]) + x += y << 49 + if y < 0x80 { + return x, 8 + } + x -= 0x80 << 49 + + if len(b) <= 8 { + goto bad + } + y = uint64(b[8]) + x += y << 56 + if y < 0x80 { + return x, 9 + } + x -= 0x80 << 56 + + if len(b) <= 9 { + goto bad + } + y = uint64(b[9]) + x += y << 63 + if y < 2 { + return x, 10 + } + +bad: + return 0, 0 +} diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go index 965876bf0..1aaee725b 100644 --- a/vendor/github.com/golang/protobuf/proto/text.go +++ b/vendor/github.com/golang/protobuf/proto/text.go @@ -50,7 +50,6 @@ import ( var ( newline = []byte("\n") spaces = []byte(" ") - gtNewline = []byte(">\n") endBraceNewline = []byte("}\n") backslashN = []byte{'\\', 'n'} backslashR = []byte{'\\', 'r'} @@ -170,11 +169,6 @@ func writeName(w *textWriter, props *Properties) error { return nil } -// raw is the interface satisfied by RawMessage. -type raw interface { - Bytes() []byte -} - func requiresQuotes(u string) bool { // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. for _, ch := range u { @@ -269,6 +263,10 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { props := sprops.Prop[i] name := st.Field(i).Name + if name == "XXX_NoUnkeyedLiteral" { + continue + } + if strings.HasPrefix(name, "XXX_") { // There are two XXX_ fields: // XXX_unrecognized []byte @@ -355,7 +353,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { return err } } - if err := tm.writeAny(w, key, props.mkeyprop); err != nil { + if err := tm.writeAny(w, key, props.MapKeyProp); err != nil { return err } if err := w.WriteByte('\n'); err != nil { @@ -372,7 +370,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { return err } } - if err := tm.writeAny(w, val, props.mvalprop); err != nil { + if err := tm.writeAny(w, val, props.MapValProp); err != nil { return err } if err := w.WriteByte('\n'); err != nil { @@ -436,12 +434,6 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { return err } } - if b, ok := fv.Interface().(raw); ok { - if err := writeRaw(w, b.Bytes()); err != nil { - return err - } - continue - } // Enums have a String method, so writeAny will work fine. if err := tm.writeAny(w, fv, props); err != nil { @@ -455,7 +447,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { // Extensions (the XXX_extensions field). pv := sv.Addr() - if _, ok := extendable(pv.Interface()); ok { + if _, err := extendable(pv.Interface()); err == nil { if err := tm.writeExtensions(w, pv); err != nil { return err } @@ -464,27 +456,6 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { return nil } -// writeRaw writes an uninterpreted raw message. -func writeRaw(w *textWriter, b []byte) error { - if err := w.WriteByte('<'); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - if err := writeUnknownStruct(w, b); err != nil { - return err - } - w.unindent() - if err := w.WriteByte('>'); err != nil { - return err - } - return nil -} - // writeAny writes an arbitrary field. func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { v = reflect.Indirect(v) @@ -535,6 +506,19 @@ func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Propert } } w.indent() + if v.CanAddr() { + // Calling v.Interface on a struct causes the reflect package to + // copy the entire struct. This is racy with the new Marshaler + // since we atomically update the XXX_sizecache. + // + // Thus, we retrieve a pointer to the struct if possible to avoid + // a race since v.Interface on the pointer doesn't copy the struct. + // + // If v is not addressable, then we are not worried about a race + // since it implies that the binary Marshaler cannot possibly be + // mutating this value. + v = v.Addr() + } if etm, ok := v.Interface().(encoding.TextMarshaler); ok { text, err := etm.MarshalText() if err != nil { @@ -543,8 +527,13 @@ func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Propert if _, err = w.Write(text); err != nil { return err } - } else if err := tm.writeStruct(w, v); err != nil { - return err + } else { + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + if err := tm.writeStruct(w, v); err != nil { + return err + } } w.unindent() if err := w.WriteByte(ket); err != nil { diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go index 5e14513f2..bb55a3af2 100644 --- a/vendor/github.com/golang/protobuf/proto/text_parser.go +++ b/vendor/github.com/golang/protobuf/proto/text_parser.go @@ -206,7 +206,6 @@ func (p *textParser) advance() { var ( errBadUTF8 = errors.New("proto: bad UTF-8") - errBadHex = errors.New("proto: bad hexadecimal") ) func unquoteC(s string, quote rune) (string, error) { @@ -277,60 +276,47 @@ func unescape(s string) (ch string, tail string, err error) { return "?", s, nil // trigraph workaround case '\'', '"', '\\': return string(r), s, nil - case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X': + case '0', '1', '2', '3', '4', '5', '6', '7': if len(s) < 2 { return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) } - base := 8 - ss := s[:2] + ss := string(r) + s[:2] s = s[2:] - if r == 'x' || r == 'X' { - base = 16 - } else { - ss = string(r) + ss - } - i, err := strconv.ParseUint(ss, base, 8) + i, err := strconv.ParseUint(ss, 8, 8) if err != nil { - return "", "", err + return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss) } return string([]byte{byte(i)}), s, nil - case 'u', 'U': - n := 4 - if r == 'U' { + case 'x', 'X', 'u', 'U': + var n int + switch r { + case 'x', 'X': + n = 2 + case 'u': + n = 4 + case 'U': n = 8 } if len(s) < n { - return "", "", fmt.Errorf(`\%c requires %d digits`, r, n) - } - - bs := make([]byte, n/2) - for i := 0; i < n; i += 2 { - a, ok1 := unhex(s[i]) - b, ok2 := unhex(s[i+1]) - if !ok1 || !ok2 { - return "", "", errBadHex - } - bs[i/2] = a<<4 | b + return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n) } + ss := s[:n] s = s[n:] - return string(bs), s, nil + i, err := strconv.ParseUint(ss, 16, 64) + if err != nil { + return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss) + } + if r == 'x' || r == 'X' { + return string([]byte{byte(i)}), s, nil + } + if i > utf8.MaxRune { + return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) + } + return string(i), s, nil } return "", "", fmt.Errorf(`unknown escape \%c`, r) } -// Adapted from src/pkg/strconv/quote.go. -func unhex(b byte) (v byte, ok bool) { - switch { - case '0' <= b && b <= '9': - return b - '0', true - case 'a' <= b && b <= 'f': - return b - 'a' + 10, true - case 'A' <= b && b <= 'F': - return b - 'A' + 10, true - } - return 0, false -} - // Back off the parser by one token. Can only be done between calls to next(). // It makes the next advance() a no-op. func (p *textParser) back() { p.backed = true } @@ -644,17 +630,17 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error { if err := p.consumeToken(":"); err != nil { return err } - if err := p.readAny(key, props.mkeyprop); err != nil { + if err := p.readAny(key, props.MapKeyProp); err != nil { return err } if err := p.consumeOptionalSeparator(); err != nil { return err } case "value": - if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil { + if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil { return err } - if err := p.readAny(val, props.mvalprop); err != nil { + if err := p.readAny(val, props.MapValProp); err != nil { return err } if err := p.consumeOptionalSeparator(); err != nil { @@ -728,6 +714,9 @@ func (p *textParser) consumeExtName() (string, error) { if tok.err != nil { return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) } + if p.done && tok.value != "]" { + return "", p.errorf("unclosed type_url or extension name") + } } return strings.Join(parts, ""), nil } @@ -865,7 +854,7 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error { return p.readStruct(fv, terminator) case reflect.Uint32: if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { - fv.SetUint(x) + fv.SetUint(uint64(x)) return nil } case reflect.Uint64: @@ -883,13 +872,9 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error { // UnmarshalText returns *RequiredNotSetError. func UnmarshalText(s string, pb Message) error { if um, ok := pb.(encoding.TextUnmarshaler); ok { - err := um.UnmarshalText([]byte(s)) - return err + return um.UnmarshalText([]byte(s)) } pb.Reset() v := reflect.ValueOf(pb) - if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil { - return pe - } - return nil + return newTextParser(s).readStruct(v.Elem(), "") } diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go new file mode 100644 index 000000000..e855b1f5c --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go @@ -0,0 +1,2812 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/descriptor.proto + +package descriptor // import "github.com/golang/protobuf/protoc-gen-go/descriptor" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type FieldDescriptorProto_Type int32 + +const ( + // 0 is reserved for errors. + // Order is weird for historical reasons. + FieldDescriptorProto_TYPE_DOUBLE FieldDescriptorProto_Type = 1 + FieldDescriptorProto_TYPE_FLOAT FieldDescriptorProto_Type = 2 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT64 FieldDescriptorProto_Type = 3 + FieldDescriptorProto_TYPE_UINT64 FieldDescriptorProto_Type = 4 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT32 FieldDescriptorProto_Type = 5 + FieldDescriptorProto_TYPE_FIXED64 FieldDescriptorProto_Type = 6 + FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7 + FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8 + FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9 + // Tag-delimited aggregate. + // Group type is deprecated and not supported in proto3. However, Proto3 + // implementations should still be able to parse the group wire format and + // treat group fields as unknown fields. + FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10 + FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11 + // New in version 2. + FieldDescriptorProto_TYPE_BYTES FieldDescriptorProto_Type = 12 + FieldDescriptorProto_TYPE_UINT32 FieldDescriptorProto_Type = 13 + FieldDescriptorProto_TYPE_ENUM FieldDescriptorProto_Type = 14 + FieldDescriptorProto_TYPE_SFIXED32 FieldDescriptorProto_Type = 15 + FieldDescriptorProto_TYPE_SFIXED64 FieldDescriptorProto_Type = 16 + FieldDescriptorProto_TYPE_SINT32 FieldDescriptorProto_Type = 17 + FieldDescriptorProto_TYPE_SINT64 FieldDescriptorProto_Type = 18 +) + +var FieldDescriptorProto_Type_name = map[int32]string{ + 1: "TYPE_DOUBLE", + 2: "TYPE_FLOAT", + 3: "TYPE_INT64", + 4: "TYPE_UINT64", + 5: "TYPE_INT32", + 6: "TYPE_FIXED64", + 7: "TYPE_FIXED32", + 8: "TYPE_BOOL", + 9: "TYPE_STRING", + 10: "TYPE_GROUP", + 11: "TYPE_MESSAGE", + 12: "TYPE_BYTES", + 13: "TYPE_UINT32", + 14: "TYPE_ENUM", + 15: "TYPE_SFIXED32", + 16: "TYPE_SFIXED64", + 17: "TYPE_SINT32", + 18: "TYPE_SINT64", +} +var FieldDescriptorProto_Type_value = map[string]int32{ + "TYPE_DOUBLE": 1, + "TYPE_FLOAT": 2, + "TYPE_INT64": 3, + "TYPE_UINT64": 4, + "TYPE_INT32": 5, + "TYPE_FIXED64": 6, + "TYPE_FIXED32": 7, + "TYPE_BOOL": 8, + "TYPE_STRING": 9, + "TYPE_GROUP": 10, + "TYPE_MESSAGE": 11, + "TYPE_BYTES": 12, + "TYPE_UINT32": 13, + "TYPE_ENUM": 14, + "TYPE_SFIXED32": 15, + "TYPE_SFIXED64": 16, + "TYPE_SINT32": 17, + "TYPE_SINT64": 18, +} + +func (x FieldDescriptorProto_Type) Enum() *FieldDescriptorProto_Type { + p := new(FieldDescriptorProto_Type) + *p = x + return p +} +func (x FieldDescriptorProto_Type) String() string { + return proto.EnumName(FieldDescriptorProto_Type_name, int32(x)) +} +func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Type_value, data, "FieldDescriptorProto_Type") + if err != nil { + return err + } + *x = FieldDescriptorProto_Type(value) + return nil +} +func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{4, 0} +} + +type FieldDescriptorProto_Label int32 + +const ( + // 0 is reserved for errors + FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1 + FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2 + FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3 +) + +var FieldDescriptorProto_Label_name = map[int32]string{ + 1: "LABEL_OPTIONAL", + 2: "LABEL_REQUIRED", + 3: "LABEL_REPEATED", +} +var FieldDescriptorProto_Label_value = map[string]int32{ + "LABEL_OPTIONAL": 1, + "LABEL_REQUIRED": 2, + "LABEL_REPEATED": 3, +} + +func (x FieldDescriptorProto_Label) Enum() *FieldDescriptorProto_Label { + p := new(FieldDescriptorProto_Label) + *p = x + return p +} +func (x FieldDescriptorProto_Label) String() string { + return proto.EnumName(FieldDescriptorProto_Label_name, int32(x)) +} +func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Label_value, data, "FieldDescriptorProto_Label") + if err != nil { + return err + } + *x = FieldDescriptorProto_Label(value) + return nil +} +func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{4, 1} +} + +// Generated classes can be optimized for speed or code size. +type FileOptions_OptimizeMode int32 + +const ( + FileOptions_SPEED FileOptions_OptimizeMode = 1 + // etc. + FileOptions_CODE_SIZE FileOptions_OptimizeMode = 2 + FileOptions_LITE_RUNTIME FileOptions_OptimizeMode = 3 +) + +var FileOptions_OptimizeMode_name = map[int32]string{ + 1: "SPEED", + 2: "CODE_SIZE", + 3: "LITE_RUNTIME", +} +var FileOptions_OptimizeMode_value = map[string]int32{ + "SPEED": 1, + "CODE_SIZE": 2, + "LITE_RUNTIME": 3, +} + +func (x FileOptions_OptimizeMode) Enum() *FileOptions_OptimizeMode { + p := new(FileOptions_OptimizeMode) + *p = x + return p +} +func (x FileOptions_OptimizeMode) String() string { + return proto.EnumName(FileOptions_OptimizeMode_name, int32(x)) +} +func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FileOptions_OptimizeMode_value, data, "FileOptions_OptimizeMode") + if err != nil { + return err + } + *x = FileOptions_OptimizeMode(value) + return nil +} +func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{10, 0} +} + +type FieldOptions_CType int32 + +const ( + // Default mode. + FieldOptions_STRING FieldOptions_CType = 0 + FieldOptions_CORD FieldOptions_CType = 1 + FieldOptions_STRING_PIECE FieldOptions_CType = 2 +) + +var FieldOptions_CType_name = map[int32]string{ + 0: "STRING", + 1: "CORD", + 2: "STRING_PIECE", +} +var FieldOptions_CType_value = map[string]int32{ + "STRING": 0, + "CORD": 1, + "STRING_PIECE": 2, +} + +func (x FieldOptions_CType) Enum() *FieldOptions_CType { + p := new(FieldOptions_CType) + *p = x + return p +} +func (x FieldOptions_CType) String() string { + return proto.EnumName(FieldOptions_CType_name, int32(x)) +} +func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldOptions_CType_value, data, "FieldOptions_CType") + if err != nil { + return err + } + *x = FieldOptions_CType(value) + return nil +} +func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{12, 0} +} + +type FieldOptions_JSType int32 + +const ( + // Use the default type. + FieldOptions_JS_NORMAL FieldOptions_JSType = 0 + // Use JavaScript strings. + FieldOptions_JS_STRING FieldOptions_JSType = 1 + // Use JavaScript numbers. + FieldOptions_JS_NUMBER FieldOptions_JSType = 2 +) + +var FieldOptions_JSType_name = map[int32]string{ + 0: "JS_NORMAL", + 1: "JS_STRING", + 2: "JS_NUMBER", +} +var FieldOptions_JSType_value = map[string]int32{ + "JS_NORMAL": 0, + "JS_STRING": 1, + "JS_NUMBER": 2, +} + +func (x FieldOptions_JSType) Enum() *FieldOptions_JSType { + p := new(FieldOptions_JSType) + *p = x + return p +} +func (x FieldOptions_JSType) String() string { + return proto.EnumName(FieldOptions_JSType_name, int32(x)) +} +func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldOptions_JSType_value, data, "FieldOptions_JSType") + if err != nil { + return err + } + *x = FieldOptions_JSType(value) + return nil +} +func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{12, 1} +} + +// Is this method side-effect-free (or safe in HTTP parlance), or idempotent, +// or neither? HTTP based RPC implementation may choose GET verb for safe +// methods, and PUT verb for idempotent methods instead of the default POST. +type MethodOptions_IdempotencyLevel int32 + +const ( + MethodOptions_IDEMPOTENCY_UNKNOWN MethodOptions_IdempotencyLevel = 0 + MethodOptions_NO_SIDE_EFFECTS MethodOptions_IdempotencyLevel = 1 + MethodOptions_IDEMPOTENT MethodOptions_IdempotencyLevel = 2 +) + +var MethodOptions_IdempotencyLevel_name = map[int32]string{ + 0: "IDEMPOTENCY_UNKNOWN", + 1: "NO_SIDE_EFFECTS", + 2: "IDEMPOTENT", +} +var MethodOptions_IdempotencyLevel_value = map[string]int32{ + "IDEMPOTENCY_UNKNOWN": 0, + "NO_SIDE_EFFECTS": 1, + "IDEMPOTENT": 2, +} + +func (x MethodOptions_IdempotencyLevel) Enum() *MethodOptions_IdempotencyLevel { + p := new(MethodOptions_IdempotencyLevel) + *p = x + return p +} +func (x MethodOptions_IdempotencyLevel) String() string { + return proto.EnumName(MethodOptions_IdempotencyLevel_name, int32(x)) +} +func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MethodOptions_IdempotencyLevel_value, data, "MethodOptions_IdempotencyLevel") + if err != nil { + return err + } + *x = MethodOptions_IdempotencyLevel(value) + return nil +} +func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{17, 0} +} + +// The protocol compiler can output a FileDescriptorSet containing the .proto +// files it parses. +type FileDescriptorSet struct { + File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileDescriptorSet) Reset() { *m = FileDescriptorSet{} } +func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorSet) ProtoMessage() {} +func (*FileDescriptorSet) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{0} +} +func (m *FileDescriptorSet) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileDescriptorSet.Unmarshal(m, b) +} +func (m *FileDescriptorSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileDescriptorSet.Marshal(b, m, deterministic) +} +func (dst *FileDescriptorSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileDescriptorSet.Merge(dst, src) +} +func (m *FileDescriptorSet) XXX_Size() int { + return xxx_messageInfo_FileDescriptorSet.Size(m) +} +func (m *FileDescriptorSet) XXX_DiscardUnknown() { + xxx_messageInfo_FileDescriptorSet.DiscardUnknown(m) +} + +var xxx_messageInfo_FileDescriptorSet proto.InternalMessageInfo + +func (m *FileDescriptorSet) GetFile() []*FileDescriptorProto { + if m != nil { + return m.File + } + return nil +} + +// Describes a complete .proto file. +type FileDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"` + // Names of files imported by this file. + Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"` + // Indexes of the public imported files in the dependency list above. + PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency,json=publicDependency" json:"public_dependency,omitempty"` + // Indexes of the weak imported files in the dependency list. + // For Google-internal migration only. Do not use. + WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"` + // All top-level definitions in this file. + MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + Service []*ServiceDescriptorProto `protobuf:"bytes,6,rep,name=service" json:"service,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,7,rep,name=extension" json:"extension,omitempty"` + Options *FileOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + // This field contains optional information about the original source code. + // You may safely remove this entire field without harming runtime + // functionality of the descriptors -- the information is needed only by + // development tools. + SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"` + // The syntax of the proto file. + // The supported values are "proto2" and "proto3". + Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileDescriptorProto) Reset() { *m = FileDescriptorProto{} } +func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorProto) ProtoMessage() {} +func (*FileDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{1} +} +func (m *FileDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileDescriptorProto.Unmarshal(m, b) +} +func (m *FileDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileDescriptorProto.Marshal(b, m, deterministic) +} +func (dst *FileDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileDescriptorProto.Merge(dst, src) +} +func (m *FileDescriptorProto) XXX_Size() int { + return xxx_messageInfo_FileDescriptorProto.Size(m) +} +func (m *FileDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_FileDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_FileDescriptorProto proto.InternalMessageInfo + +func (m *FileDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FileDescriptorProto) GetPackage() string { + if m != nil && m.Package != nil { + return *m.Package + } + return "" +} + +func (m *FileDescriptorProto) GetDependency() []string { + if m != nil { + return m.Dependency + } + return nil +} + +func (m *FileDescriptorProto) GetPublicDependency() []int32 { + if m != nil { + return m.PublicDependency + } + return nil +} + +func (m *FileDescriptorProto) GetWeakDependency() []int32 { + if m != nil { + return m.WeakDependency + } + return nil +} + +func (m *FileDescriptorProto) GetMessageType() []*DescriptorProto { + if m != nil { + return m.MessageType + } + return nil +} + +func (m *FileDescriptorProto) GetEnumType() []*EnumDescriptorProto { + if m != nil { + return m.EnumType + } + return nil +} + +func (m *FileDescriptorProto) GetService() []*ServiceDescriptorProto { + if m != nil { + return m.Service + } + return nil +} + +func (m *FileDescriptorProto) GetExtension() []*FieldDescriptorProto { + if m != nil { + return m.Extension + } + return nil +} + +func (m *FileDescriptorProto) GetOptions() *FileOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *FileDescriptorProto) GetSourceCodeInfo() *SourceCodeInfo { + if m != nil { + return m.SourceCodeInfo + } + return nil +} + +func (m *FileDescriptorProto) GetSyntax() string { + if m != nil && m.Syntax != nil { + return *m.Syntax + } + return "" +} + +// Describes a message type. +type DescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Field []*FieldDescriptorProto `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"` + NestedType []*DescriptorProto `protobuf:"bytes,3,rep,name=nested_type,json=nestedType" json:"nested_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,4,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range,json=extensionRange" json:"extension_range,omitempty"` + OneofDecl []*OneofDescriptorProto `protobuf:"bytes,8,rep,name=oneof_decl,json=oneofDecl" json:"oneof_decl,omitempty"` + Options *MessageOptions `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"` + ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved field names, which may not be used by fields in the same message. + // A given name may only be reserved once. + ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DescriptorProto) Reset() { *m = DescriptorProto{} } +func (m *DescriptorProto) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto) ProtoMessage() {} +func (*DescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{2} +} +func (m *DescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto.Unmarshal(m, b) +} +func (m *DescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto.Marshal(b, m, deterministic) +} +func (dst *DescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto.Merge(dst, src) +} +func (m *DescriptorProto) XXX_Size() int { + return xxx_messageInfo_DescriptorProto.Size(m) +} +func (m *DescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_DescriptorProto proto.InternalMessageInfo + +func (m *DescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *DescriptorProto) GetField() []*FieldDescriptorProto { + if m != nil { + return m.Field + } + return nil +} + +func (m *DescriptorProto) GetExtension() []*FieldDescriptorProto { + if m != nil { + return m.Extension + } + return nil +} + +func (m *DescriptorProto) GetNestedType() []*DescriptorProto { + if m != nil { + return m.NestedType + } + return nil +} + +func (m *DescriptorProto) GetEnumType() []*EnumDescriptorProto { + if m != nil { + return m.EnumType + } + return nil +} + +func (m *DescriptorProto) GetExtensionRange() []*DescriptorProto_ExtensionRange { + if m != nil { + return m.ExtensionRange + } + return nil +} + +func (m *DescriptorProto) GetOneofDecl() []*OneofDescriptorProto { + if m != nil { + return m.OneofDecl + } + return nil +} + +func (m *DescriptorProto) GetOptions() *MessageOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *DescriptorProto) GetReservedRange() []*DescriptorProto_ReservedRange { + if m != nil { + return m.ReservedRange + } + return nil +} + +func (m *DescriptorProto) GetReservedName() []string { + if m != nil { + return m.ReservedName + } + return nil +} + +type DescriptorProto_ExtensionRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DescriptorProto_ExtensionRange) Reset() { *m = DescriptorProto_ExtensionRange{} } +func (m *DescriptorProto_ExtensionRange) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto_ExtensionRange) ProtoMessage() {} +func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{2, 0} +} +func (m *DescriptorProto_ExtensionRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Unmarshal(m, b) +} +func (m *DescriptorProto_ExtensionRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Marshal(b, m, deterministic) +} +func (dst *DescriptorProto_ExtensionRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto_ExtensionRange.Merge(dst, src) +} +func (m *DescriptorProto_ExtensionRange) XXX_Size() int { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Size(m) +} +func (m *DescriptorProto_ExtensionRange) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto_ExtensionRange.DiscardUnknown(m) +} + +var xxx_messageInfo_DescriptorProto_ExtensionRange proto.InternalMessageInfo + +func (m *DescriptorProto_ExtensionRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *DescriptorProto_ExtensionRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +func (m *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions { + if m != nil { + return m.Options + } + return nil +} + +// Range of reserved tag numbers. Reserved tag numbers may not be used by +// fields or extension ranges in the same message. Reserved ranges may +// not overlap. +type DescriptorProto_ReservedRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DescriptorProto_ReservedRange) Reset() { *m = DescriptorProto_ReservedRange{} } +func (m *DescriptorProto_ReservedRange) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto_ReservedRange) ProtoMessage() {} +func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{2, 1} +} +func (m *DescriptorProto_ReservedRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto_ReservedRange.Unmarshal(m, b) +} +func (m *DescriptorProto_ReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto_ReservedRange.Marshal(b, m, deterministic) +} +func (dst *DescriptorProto_ReservedRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto_ReservedRange.Merge(dst, src) +} +func (m *DescriptorProto_ReservedRange) XXX_Size() int { + return xxx_messageInfo_DescriptorProto_ReservedRange.Size(m) +} +func (m *DescriptorProto_ReservedRange) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto_ReservedRange.DiscardUnknown(m) +} + +var xxx_messageInfo_DescriptorProto_ReservedRange proto.InternalMessageInfo + +func (m *DescriptorProto_ReservedRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *DescriptorProto_ReservedRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +type ExtensionRangeOptions struct { + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExtensionRangeOptions) Reset() { *m = ExtensionRangeOptions{} } +func (m *ExtensionRangeOptions) String() string { return proto.CompactTextString(m) } +func (*ExtensionRangeOptions) ProtoMessage() {} +func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{3} +} + +var extRange_ExtensionRangeOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*ExtensionRangeOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ExtensionRangeOptions +} +func (m *ExtensionRangeOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExtensionRangeOptions.Unmarshal(m, b) +} +func (m *ExtensionRangeOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExtensionRangeOptions.Marshal(b, m, deterministic) +} +func (dst *ExtensionRangeOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtensionRangeOptions.Merge(dst, src) +} +func (m *ExtensionRangeOptions) XXX_Size() int { + return xxx_messageInfo_ExtensionRangeOptions.Size(m) +} +func (m *ExtensionRangeOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ExtensionRangeOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_ExtensionRangeOptions proto.InternalMessageInfo + +func (m *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +// Describes a field within a message. +type FieldDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,3,opt,name=number" json:"number,omitempty"` + Label *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"` + // If type_name is set, this need not be set. If both this and type_name + // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. + Type *FieldDescriptorProto_Type `protobuf:"varint,5,opt,name=type,enum=google.protobuf.FieldDescriptorProto_Type" json:"type,omitempty"` + // For message and enum types, this is the name of the type. If the name + // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping + // rules are used to find the type (i.e. first the nested types within this + // message are searched, then within the parent, on up to the root + // namespace). + TypeName *string `protobuf:"bytes,6,opt,name=type_name,json=typeName" json:"type_name,omitempty"` + // For extensions, this is the name of the type being extended. It is + // resolved in the same manner as type_name. + Extendee *string `protobuf:"bytes,2,opt,name=extendee" json:"extendee,omitempty"` + // For numeric types, contains the original text representation of the value. + // For booleans, "true" or "false". + // For strings, contains the default text contents (not escaped in any way). + // For bytes, contains the C escaped value. All bytes >= 128 are escaped. + // TODO(kenton): Base-64 encode? + DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"` + // If set, gives the index of a oneof in the containing type's oneof_decl + // list. This field is a member of that oneof. + OneofIndex *int32 `protobuf:"varint,9,opt,name=oneof_index,json=oneofIndex" json:"oneof_index,omitempty"` + // JSON name of this field. The value is set by protocol compiler. If the + // user has set a "json_name" option on this field, that option's value + // will be used. Otherwise, it's deduced from the field's name by converting + // it to camelCase. + JsonName *string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"` + Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FieldDescriptorProto) Reset() { *m = FieldDescriptorProto{} } +func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FieldDescriptorProto) ProtoMessage() {} +func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{4} +} +func (m *FieldDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FieldDescriptorProto.Unmarshal(m, b) +} +func (m *FieldDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FieldDescriptorProto.Marshal(b, m, deterministic) +} +func (dst *FieldDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldDescriptorProto.Merge(dst, src) +} +func (m *FieldDescriptorProto) XXX_Size() int { + return xxx_messageInfo_FieldDescriptorProto.Size(m) +} +func (m *FieldDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_FieldDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldDescriptorProto proto.InternalMessageInfo + +func (m *FieldDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FieldDescriptorProto) GetNumber() int32 { + if m != nil && m.Number != nil { + return *m.Number + } + return 0 +} + +func (m *FieldDescriptorProto) GetLabel() FieldDescriptorProto_Label { + if m != nil && m.Label != nil { + return *m.Label + } + return FieldDescriptorProto_LABEL_OPTIONAL +} + +func (m *FieldDescriptorProto) GetType() FieldDescriptorProto_Type { + if m != nil && m.Type != nil { + return *m.Type + } + return FieldDescriptorProto_TYPE_DOUBLE +} + +func (m *FieldDescriptorProto) GetTypeName() string { + if m != nil && m.TypeName != nil { + return *m.TypeName + } + return "" +} + +func (m *FieldDescriptorProto) GetExtendee() string { + if m != nil && m.Extendee != nil { + return *m.Extendee + } + return "" +} + +func (m *FieldDescriptorProto) GetDefaultValue() string { + if m != nil && m.DefaultValue != nil { + return *m.DefaultValue + } + return "" +} + +func (m *FieldDescriptorProto) GetOneofIndex() int32 { + if m != nil && m.OneofIndex != nil { + return *m.OneofIndex + } + return 0 +} + +func (m *FieldDescriptorProto) GetJsonName() string { + if m != nil && m.JsonName != nil { + return *m.JsonName + } + return "" +} + +func (m *FieldDescriptorProto) GetOptions() *FieldOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a oneof. +type OneofDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} } +func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*OneofDescriptorProto) ProtoMessage() {} +func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{5} +} +func (m *OneofDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OneofDescriptorProto.Unmarshal(m, b) +} +func (m *OneofDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OneofDescriptorProto.Marshal(b, m, deterministic) +} +func (dst *OneofDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_OneofDescriptorProto.Merge(dst, src) +} +func (m *OneofDescriptorProto) XXX_Size() int { + return xxx_messageInfo_OneofDescriptorProto.Size(m) +} +func (m *OneofDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_OneofDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_OneofDescriptorProto proto.InternalMessageInfo + +func (m *OneofDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *OneofDescriptorProto) GetOptions() *OneofOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes an enum type. +type EnumDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` + Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + // Range of reserved numeric values. Reserved numeric values may not be used + // by enum values in the same enum declaration. Reserved ranges may not + // overlap. + ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved enum value names, which may not be reused. A given name may only + // be reserved once. + ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumDescriptorProto) Reset() { *m = EnumDescriptorProto{} } +func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumDescriptorProto) ProtoMessage() {} +func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{6} +} +func (m *EnumDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumDescriptorProto.Unmarshal(m, b) +} +func (m *EnumDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumDescriptorProto.Marshal(b, m, deterministic) +} +func (dst *EnumDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumDescriptorProto.Merge(dst, src) +} +func (m *EnumDescriptorProto) XXX_Size() int { + return xxx_messageInfo_EnumDescriptorProto.Size(m) +} +func (m *EnumDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_EnumDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumDescriptorProto proto.InternalMessageInfo + +func (m *EnumDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *EnumDescriptorProto) GetValue() []*EnumValueDescriptorProto { + if m != nil { + return m.Value + } + return nil +} + +func (m *EnumDescriptorProto) GetOptions() *EnumOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *EnumDescriptorProto) GetReservedRange() []*EnumDescriptorProto_EnumReservedRange { + if m != nil { + return m.ReservedRange + } + return nil +} + +func (m *EnumDescriptorProto) GetReservedName() []string { + if m != nil { + return m.ReservedName + } + return nil +} + +// Range of reserved numeric values. Reserved values may not be used by +// entries in the same enum. Reserved ranges may not overlap. +// +// Note that this is distinct from DescriptorProto.ReservedRange in that it +// is inclusive such that it can appropriately represent the entire int32 +// domain. +type EnumDescriptorProto_EnumReservedRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumDescriptorProto_EnumReservedRange) Reset() { *m = EnumDescriptorProto_EnumReservedRange{} } +func (m *EnumDescriptorProto_EnumReservedRange) String() string { return proto.CompactTextString(m) } +func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} +func (*EnumDescriptorProto_EnumReservedRange) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{6, 0} +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Unmarshal(m, b) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Marshal(b, m, deterministic) +} +func (dst *EnumDescriptorProto_EnumReservedRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Merge(dst, src) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Size() int { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Size(m) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_DiscardUnknown() { + xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumDescriptorProto_EnumReservedRange proto.InternalMessageInfo + +func (m *EnumDescriptorProto_EnumReservedRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +// Describes a value within an enum. +type EnumValueDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` + Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumValueDescriptorProto) Reset() { *m = EnumValueDescriptorProto{} } +func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumValueDescriptorProto) ProtoMessage() {} +func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{7} +} +func (m *EnumValueDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumValueDescriptorProto.Unmarshal(m, b) +} +func (m *EnumValueDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumValueDescriptorProto.Marshal(b, m, deterministic) +} +func (dst *EnumValueDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumValueDescriptorProto.Merge(dst, src) +} +func (m *EnumValueDescriptorProto) XXX_Size() int { + return xxx_messageInfo_EnumValueDescriptorProto.Size(m) +} +func (m *EnumValueDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_EnumValueDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumValueDescriptorProto proto.InternalMessageInfo + +func (m *EnumValueDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *EnumValueDescriptorProto) GetNumber() int32 { + if m != nil && m.Number != nil { + return *m.Number + } + return 0 +} + +func (m *EnumValueDescriptorProto) GetOptions() *EnumValueOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a service. +type ServiceDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` + Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServiceDescriptorProto) Reset() { *m = ServiceDescriptorProto{} } +func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*ServiceDescriptorProto) ProtoMessage() {} +func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{8} +} +func (m *ServiceDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServiceDescriptorProto.Unmarshal(m, b) +} +func (m *ServiceDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServiceDescriptorProto.Marshal(b, m, deterministic) +} +func (dst *ServiceDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceDescriptorProto.Merge(dst, src) +} +func (m *ServiceDescriptorProto) XXX_Size() int { + return xxx_messageInfo_ServiceDescriptorProto.Size(m) +} +func (m *ServiceDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceDescriptorProto proto.InternalMessageInfo + +func (m *ServiceDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *ServiceDescriptorProto) GetMethod() []*MethodDescriptorProto { + if m != nil { + return m.Method + } + return nil +} + +func (m *ServiceDescriptorProto) GetOptions() *ServiceOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a method of a service. +type MethodDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Input and output type names. These are resolved in the same way as + // FieldDescriptorProto.type_name, but must refer to a message type. + InputType *string `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"` + OutputType *string `protobuf:"bytes,3,opt,name=output_type,json=outputType" json:"output_type,omitempty"` + Options *MethodOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"` + // Identifies if client streams multiple client messages + ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"` + // Identifies if server streams multiple server messages + ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MethodDescriptorProto) Reset() { *m = MethodDescriptorProto{} } +func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*MethodDescriptorProto) ProtoMessage() {} +func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{9} +} +func (m *MethodDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MethodDescriptorProto.Unmarshal(m, b) +} +func (m *MethodDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MethodDescriptorProto.Marshal(b, m, deterministic) +} +func (dst *MethodDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_MethodDescriptorProto.Merge(dst, src) +} +func (m *MethodDescriptorProto) XXX_Size() int { + return xxx_messageInfo_MethodDescriptorProto.Size(m) +} +func (m *MethodDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_MethodDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_MethodDescriptorProto proto.InternalMessageInfo + +const Default_MethodDescriptorProto_ClientStreaming bool = false +const Default_MethodDescriptorProto_ServerStreaming bool = false + +func (m *MethodDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MethodDescriptorProto) GetInputType() string { + if m != nil && m.InputType != nil { + return *m.InputType + } + return "" +} + +func (m *MethodDescriptorProto) GetOutputType() string { + if m != nil && m.OutputType != nil { + return *m.OutputType + } + return "" +} + +func (m *MethodDescriptorProto) GetOptions() *MethodOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *MethodDescriptorProto) GetClientStreaming() bool { + if m != nil && m.ClientStreaming != nil { + return *m.ClientStreaming + } + return Default_MethodDescriptorProto_ClientStreaming +} + +func (m *MethodDescriptorProto) GetServerStreaming() bool { + if m != nil && m.ServerStreaming != nil { + return *m.ServerStreaming + } + return Default_MethodDescriptorProto_ServerStreaming +} + +type FileOptions struct { + // Sets the Java package where classes generated from this .proto will be + // placed. By default, the proto package is used, but this is often + // inappropriate because proto packages do not normally start with backwards + // domain names. + JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"` + // If set, all the classes from the .proto file are wrapped in a single + // outer class with the given name. This applies to both Proto1 + // (equivalent to the old "--one_java_file" option) and Proto2 (where + // a .proto always translates to a single class, but you may want to + // explicitly choose the class name). + JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"` + // If set true, then the Java code generator will generate a separate .java + // file for each top-level message, enum, and service defined in the .proto + // file. Thus, these types will *not* be nested inside the outer class + // named by java_outer_classname. However, the outer class will still be + // generated to contain the file's getDescriptor() method as well as any + // top-level extensions defined in the file. + JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"` + // This option does nothing. + JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` // Deprecated: Do not use. + // If set true, then the Java2 code generator will generate code that + // throws an exception whenever an attempt is made to assign a non-UTF-8 + // byte sequence to a string field. + // Message reflection will do the same. + // However, an extension field still accepts non-UTF-8 byte sequences. + // This option has no effect on when used with the lite runtime. + JavaStringCheckUtf8 *bool `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"` + OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"` + // Sets the Go package where structs generated from this .proto will be + // placed. If omitted, the Go package will be derived from the following: + // - The basename of the package import path, if provided. + // - Otherwise, the package statement in the .proto file, if present. + // - Otherwise, the basename of the .proto file, without extension. + GoPackage *string `protobuf:"bytes,11,opt,name=go_package,json=goPackage" json:"go_package,omitempty"` + // Should generic services be generated in each language? "Generic" services + // are not specific to any particular RPC system. They are generated by the + // main code generators in each language (without additional plugins). + // Generic services were the only kind of service generation supported by + // early versions of google.protobuf. + // + // Generic services are now considered deprecated in favor of using plugins + // that generate code specific to your particular RPC system. Therefore, + // these default to false. Old code which depends on generic services should + // explicitly set them to true. + CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"` + JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"` + PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"` + PhpGenericServices *bool `protobuf:"varint,42,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"` + // Is this file deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for everything in the file, or it will be completely ignored; in the very + // least, this is a formalization for deprecating files. + Deprecated *bool `protobuf:"varint,23,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Enables the use of arenas for the proto messages in this file. This applies + // only to generated classes for C++. + CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,json=ccEnableArenas,def=0" json:"cc_enable_arenas,omitempty"` + // Sets the objective c class prefix which is prepended to all objective c + // generated classes from this .proto. There is no default. + ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"` + // Namespace for generated classes; defaults to the package. + CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace,json=csharpNamespace" json:"csharp_namespace,omitempty"` + // By default Swift generators will take the proto package and CamelCase it + // replacing '.' with underscore and use that to prefix the types/symbols + // defined. When this options is provided, they will use this value instead + // to prefix the types/symbols defined. + SwiftPrefix *string `protobuf:"bytes,39,opt,name=swift_prefix,json=swiftPrefix" json:"swift_prefix,omitempty"` + // Sets the php class prefix which is prepended to all php generated classes + // from this .proto. Default is empty. + PhpClassPrefix *string `protobuf:"bytes,40,opt,name=php_class_prefix,json=phpClassPrefix" json:"php_class_prefix,omitempty"` + // Use this option to change the namespace of php generated classes. Default + // is empty. When this option is empty, the package name will be used for + // determining the namespace. + PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"` + // The parser stores options it doesn't recognize here. + // See the documentation for the "Options" section above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileOptions) Reset() { *m = FileOptions{} } +func (m *FileOptions) String() string { return proto.CompactTextString(m) } +func (*FileOptions) ProtoMessage() {} +func (*FileOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{10} +} + +var extRange_FileOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_FileOptions +} +func (m *FileOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileOptions.Unmarshal(m, b) +} +func (m *FileOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileOptions.Marshal(b, m, deterministic) +} +func (dst *FileOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileOptions.Merge(dst, src) +} +func (m *FileOptions) XXX_Size() int { + return xxx_messageInfo_FileOptions.Size(m) +} +func (m *FileOptions) XXX_DiscardUnknown() { + xxx_messageInfo_FileOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_FileOptions proto.InternalMessageInfo + +const Default_FileOptions_JavaMultipleFiles bool = false +const Default_FileOptions_JavaStringCheckUtf8 bool = false +const Default_FileOptions_OptimizeFor FileOptions_OptimizeMode = FileOptions_SPEED +const Default_FileOptions_CcGenericServices bool = false +const Default_FileOptions_JavaGenericServices bool = false +const Default_FileOptions_PyGenericServices bool = false +const Default_FileOptions_PhpGenericServices bool = false +const Default_FileOptions_Deprecated bool = false +const Default_FileOptions_CcEnableArenas bool = false + +func (m *FileOptions) GetJavaPackage() string { + if m != nil && m.JavaPackage != nil { + return *m.JavaPackage + } + return "" +} + +func (m *FileOptions) GetJavaOuterClassname() string { + if m != nil && m.JavaOuterClassname != nil { + return *m.JavaOuterClassname + } + return "" +} + +func (m *FileOptions) GetJavaMultipleFiles() bool { + if m != nil && m.JavaMultipleFiles != nil { + return *m.JavaMultipleFiles + } + return Default_FileOptions_JavaMultipleFiles +} + +// Deprecated: Do not use. +func (m *FileOptions) GetJavaGenerateEqualsAndHash() bool { + if m != nil && m.JavaGenerateEqualsAndHash != nil { + return *m.JavaGenerateEqualsAndHash + } + return false +} + +func (m *FileOptions) GetJavaStringCheckUtf8() bool { + if m != nil && m.JavaStringCheckUtf8 != nil { + return *m.JavaStringCheckUtf8 + } + return Default_FileOptions_JavaStringCheckUtf8 +} + +func (m *FileOptions) GetOptimizeFor() FileOptions_OptimizeMode { + if m != nil && m.OptimizeFor != nil { + return *m.OptimizeFor + } + return Default_FileOptions_OptimizeFor +} + +func (m *FileOptions) GetGoPackage() string { + if m != nil && m.GoPackage != nil { + return *m.GoPackage + } + return "" +} + +func (m *FileOptions) GetCcGenericServices() bool { + if m != nil && m.CcGenericServices != nil { + return *m.CcGenericServices + } + return Default_FileOptions_CcGenericServices +} + +func (m *FileOptions) GetJavaGenericServices() bool { + if m != nil && m.JavaGenericServices != nil { + return *m.JavaGenericServices + } + return Default_FileOptions_JavaGenericServices +} + +func (m *FileOptions) GetPyGenericServices() bool { + if m != nil && m.PyGenericServices != nil { + return *m.PyGenericServices + } + return Default_FileOptions_PyGenericServices +} + +func (m *FileOptions) GetPhpGenericServices() bool { + if m != nil && m.PhpGenericServices != nil { + return *m.PhpGenericServices + } + return Default_FileOptions_PhpGenericServices +} + +func (m *FileOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_FileOptions_Deprecated +} + +func (m *FileOptions) GetCcEnableArenas() bool { + if m != nil && m.CcEnableArenas != nil { + return *m.CcEnableArenas + } + return Default_FileOptions_CcEnableArenas +} + +func (m *FileOptions) GetObjcClassPrefix() string { + if m != nil && m.ObjcClassPrefix != nil { + return *m.ObjcClassPrefix + } + return "" +} + +func (m *FileOptions) GetCsharpNamespace() string { + if m != nil && m.CsharpNamespace != nil { + return *m.CsharpNamespace + } + return "" +} + +func (m *FileOptions) GetSwiftPrefix() string { + if m != nil && m.SwiftPrefix != nil { + return *m.SwiftPrefix + } + return "" +} + +func (m *FileOptions) GetPhpClassPrefix() string { + if m != nil && m.PhpClassPrefix != nil { + return *m.PhpClassPrefix + } + return "" +} + +func (m *FileOptions) GetPhpNamespace() string { + if m != nil && m.PhpNamespace != nil { + return *m.PhpNamespace + } + return "" +} + +func (m *FileOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type MessageOptions struct { + // Set true to use the old proto1 MessageSet wire format for extensions. + // This is provided for backwards-compatibility with the MessageSet wire + // format. You should not use this for any other reason: It's less + // efficient, has fewer features, and is more complicated. + // + // The message must be defined exactly as follows: + // message Foo { + // option message_set_wire_format = true; + // extensions 4 to max; + // } + // Note that the message cannot have any defined fields; MessageSets only + // have extensions. + // + // All extensions of your type must be singular messages; e.g. they cannot + // be int32s, enums, or repeated messages. + // + // Because this is an option, the above two restrictions are not enforced by + // the protocol compiler. + MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,json=messageSetWireFormat,def=0" json:"message_set_wire_format,omitempty"` + // Disables the generation of the standard "descriptor()" accessor, which can + // conflict with a field of the same name. This is meant to make migration + // from proto1 easier; new code should avoid fields named "descriptor". + NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,json=noStandardDescriptorAccessor,def=0" json:"no_standard_descriptor_accessor,omitempty"` + // Is this message deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the message, or it will be completely ignored; in the very least, + // this is a formalization for deprecating messages. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Whether the message is an automatically generated map entry type for the + // maps field. + // + // For maps fields: + // map map_field = 1; + // The parsed descriptor looks like: + // message MapFieldEntry { + // option map_entry = true; + // optional KeyType key = 1; + // optional ValueType value = 2; + // } + // repeated MapFieldEntry map_field = 1; + // + // Implementations may choose not to generate the map_entry=true message, but + // use a native map in the target language to hold the keys and values. + // The reflection APIs in such implementions still need to work as + // if the field is a repeated message field. + // + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. + MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MessageOptions) Reset() { *m = MessageOptions{} } +func (m *MessageOptions) String() string { return proto.CompactTextString(m) } +func (*MessageOptions) ProtoMessage() {} +func (*MessageOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{11} +} + +var extRange_MessageOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*MessageOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MessageOptions +} +func (m *MessageOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MessageOptions.Unmarshal(m, b) +} +func (m *MessageOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MessageOptions.Marshal(b, m, deterministic) +} +func (dst *MessageOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_MessageOptions.Merge(dst, src) +} +func (m *MessageOptions) XXX_Size() int { + return xxx_messageInfo_MessageOptions.Size(m) +} +func (m *MessageOptions) XXX_DiscardUnknown() { + xxx_messageInfo_MessageOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_MessageOptions proto.InternalMessageInfo + +const Default_MessageOptions_MessageSetWireFormat bool = false +const Default_MessageOptions_NoStandardDescriptorAccessor bool = false +const Default_MessageOptions_Deprecated bool = false + +func (m *MessageOptions) GetMessageSetWireFormat() bool { + if m != nil && m.MessageSetWireFormat != nil { + return *m.MessageSetWireFormat + } + return Default_MessageOptions_MessageSetWireFormat +} + +func (m *MessageOptions) GetNoStandardDescriptorAccessor() bool { + if m != nil && m.NoStandardDescriptorAccessor != nil { + return *m.NoStandardDescriptorAccessor + } + return Default_MessageOptions_NoStandardDescriptorAccessor +} + +func (m *MessageOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_MessageOptions_Deprecated +} + +func (m *MessageOptions) GetMapEntry() bool { + if m != nil && m.MapEntry != nil { + return *m.MapEntry + } + return false +} + +func (m *MessageOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type FieldOptions struct { + // The ctype option instructs the C++ code generator to use a different + // representation of the field than it normally would. See the specific + // options below. This option is not yet implemented in the open source + // release -- sorry, we'll try to include it in a future version! + Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"` + // The packed option can be enabled for repeated primitive fields to enable + // a more efficient representation on the wire. Rather than repeatedly + // writing the tag and type for each element, the entire array is encoded as + // a single length-delimited blob. In proto3, only explicit setting it to + // false will avoid using packed encoding. + Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"` + // The jstype option determines the JavaScript type used for values of the + // field. The option is permitted only for 64 bit integral and fixed types + // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING + // is represented as JavaScript string, which avoids loss of precision that + // can happen when a large value is converted to a floating point JavaScript. + // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to + // use the JavaScript "number" type. The behavior of the default option + // JS_NORMAL is implementation dependent. + // + // This option is an enum to permit additional types to be added, e.g. + // goog.math.Integer. + Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"` + // Should this field be parsed lazily? Lazy applies only to message-type + // fields. It means that when the outer message is initially parsed, the + // inner message's contents will not be parsed but instead stored in encoded + // form. The inner message will actually be parsed when it is first accessed. + // + // This is only a hint. Implementations are free to choose whether to use + // eager or lazy parsing regardless of the value of this option. However, + // setting this option true suggests that the protocol author believes that + // using lazy parsing on this field is worth the additional bookkeeping + // overhead typically needed to implement it. + // + // This option does not affect the public interface of any generated code; + // all method signatures remain the same. Furthermore, thread-safety of the + // interface is not affected by this option; const methods remain safe to + // call from multiple threads concurrently, while non-const methods continue + // to require exclusive access. + // + // + // Note that implementations may choose not to check required fields within + // a lazy sub-message. That is, calling IsInitialized() on the outer message + // may return true even if the inner message has missing required fields. + // This is necessary because otherwise the inner message would have to be + // parsed in order to perform the check, defeating the purpose of lazy + // parsing. An implementation which chooses not to check required fields + // must be consistent about it. That is, for any particular sub-message, the + // implementation must either *always* check its required fields, or *never* + // check its required fields, regardless of whether or not the message has + // been parsed. + Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"` + // Is this field deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for accessors, or it will be completely ignored; in the very least, this + // is a formalization for deprecating fields. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // For Google-internal migration only. Do not use. + Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FieldOptions) Reset() { *m = FieldOptions{} } +func (m *FieldOptions) String() string { return proto.CompactTextString(m) } +func (*FieldOptions) ProtoMessage() {} +func (*FieldOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{12} +} + +var extRange_FieldOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*FieldOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_FieldOptions +} +func (m *FieldOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FieldOptions.Unmarshal(m, b) +} +func (m *FieldOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FieldOptions.Marshal(b, m, deterministic) +} +func (dst *FieldOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldOptions.Merge(dst, src) +} +func (m *FieldOptions) XXX_Size() int { + return xxx_messageInfo_FieldOptions.Size(m) +} +func (m *FieldOptions) XXX_DiscardUnknown() { + xxx_messageInfo_FieldOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldOptions proto.InternalMessageInfo + +const Default_FieldOptions_Ctype FieldOptions_CType = FieldOptions_STRING +const Default_FieldOptions_Jstype FieldOptions_JSType = FieldOptions_JS_NORMAL +const Default_FieldOptions_Lazy bool = false +const Default_FieldOptions_Deprecated bool = false +const Default_FieldOptions_Weak bool = false + +func (m *FieldOptions) GetCtype() FieldOptions_CType { + if m != nil && m.Ctype != nil { + return *m.Ctype + } + return Default_FieldOptions_Ctype +} + +func (m *FieldOptions) GetPacked() bool { + if m != nil && m.Packed != nil { + return *m.Packed + } + return false +} + +func (m *FieldOptions) GetJstype() FieldOptions_JSType { + if m != nil && m.Jstype != nil { + return *m.Jstype + } + return Default_FieldOptions_Jstype +} + +func (m *FieldOptions) GetLazy() bool { + if m != nil && m.Lazy != nil { + return *m.Lazy + } + return Default_FieldOptions_Lazy +} + +func (m *FieldOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_FieldOptions_Deprecated +} + +func (m *FieldOptions) GetWeak() bool { + if m != nil && m.Weak != nil { + return *m.Weak + } + return Default_FieldOptions_Weak +} + +func (m *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type OneofOptions struct { + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OneofOptions) Reset() { *m = OneofOptions{} } +func (m *OneofOptions) String() string { return proto.CompactTextString(m) } +func (*OneofOptions) ProtoMessage() {} +func (*OneofOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{13} +} + +var extRange_OneofOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*OneofOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_OneofOptions +} +func (m *OneofOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OneofOptions.Unmarshal(m, b) +} +func (m *OneofOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OneofOptions.Marshal(b, m, deterministic) +} +func (dst *OneofOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_OneofOptions.Merge(dst, src) +} +func (m *OneofOptions) XXX_Size() int { + return xxx_messageInfo_OneofOptions.Size(m) +} +func (m *OneofOptions) XXX_DiscardUnknown() { + xxx_messageInfo_OneofOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_OneofOptions proto.InternalMessageInfo + +func (m *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type EnumOptions struct { + // Set this option to true to allow mapping different tag names to the same + // value. + AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"` + // Is this enum deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum, or it will be completely ignored; in the very least, this + // is a formalization for deprecating enums. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumOptions) Reset() { *m = EnumOptions{} } +func (m *EnumOptions) String() string { return proto.CompactTextString(m) } +func (*EnumOptions) ProtoMessage() {} +func (*EnumOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{14} +} + +var extRange_EnumOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*EnumOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_EnumOptions +} +func (m *EnumOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumOptions.Unmarshal(m, b) +} +func (m *EnumOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumOptions.Marshal(b, m, deterministic) +} +func (dst *EnumOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumOptions.Merge(dst, src) +} +func (m *EnumOptions) XXX_Size() int { + return xxx_messageInfo_EnumOptions.Size(m) +} +func (m *EnumOptions) XXX_DiscardUnknown() { + xxx_messageInfo_EnumOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumOptions proto.InternalMessageInfo + +const Default_EnumOptions_Deprecated bool = false + +func (m *EnumOptions) GetAllowAlias() bool { + if m != nil && m.AllowAlias != nil { + return *m.AllowAlias + } + return false +} + +func (m *EnumOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_EnumOptions_Deprecated +} + +func (m *EnumOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type EnumValueOptions struct { + // Is this enum value deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum value, or it will be completely ignored; in the very least, + // this is a formalization for deprecating enum values. + Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} } +func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) } +func (*EnumValueOptions) ProtoMessage() {} +func (*EnumValueOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{15} +} + +var extRange_EnumValueOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*EnumValueOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_EnumValueOptions +} +func (m *EnumValueOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumValueOptions.Unmarshal(m, b) +} +func (m *EnumValueOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumValueOptions.Marshal(b, m, deterministic) +} +func (dst *EnumValueOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumValueOptions.Merge(dst, src) +} +func (m *EnumValueOptions) XXX_Size() int { + return xxx_messageInfo_EnumValueOptions.Size(m) +} +func (m *EnumValueOptions) XXX_DiscardUnknown() { + xxx_messageInfo_EnumValueOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumValueOptions proto.InternalMessageInfo + +const Default_EnumValueOptions_Deprecated bool = false + +func (m *EnumValueOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_EnumValueOptions_Deprecated +} + +func (m *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type ServiceOptions struct { + // Is this service deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the service, or it will be completely ignored; in the very least, + // this is a formalization for deprecating services. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServiceOptions) Reset() { *m = ServiceOptions{} } +func (m *ServiceOptions) String() string { return proto.CompactTextString(m) } +func (*ServiceOptions) ProtoMessage() {} +func (*ServiceOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{16} +} + +var extRange_ServiceOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*ServiceOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ServiceOptions +} +func (m *ServiceOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServiceOptions.Unmarshal(m, b) +} +func (m *ServiceOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServiceOptions.Marshal(b, m, deterministic) +} +func (dst *ServiceOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceOptions.Merge(dst, src) +} +func (m *ServiceOptions) XXX_Size() int { + return xxx_messageInfo_ServiceOptions.Size(m) +} +func (m *ServiceOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceOptions proto.InternalMessageInfo + +const Default_ServiceOptions_Deprecated bool = false + +func (m *ServiceOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_ServiceOptions_Deprecated +} + +func (m *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type MethodOptions struct { + // Is this method deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the method, or it will be completely ignored; in the very least, + // this is a formalization for deprecating methods. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MethodOptions) Reset() { *m = MethodOptions{} } +func (m *MethodOptions) String() string { return proto.CompactTextString(m) } +func (*MethodOptions) ProtoMessage() {} +func (*MethodOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{17} +} + +var extRange_MethodOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MethodOptions +} +func (m *MethodOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MethodOptions.Unmarshal(m, b) +} +func (m *MethodOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MethodOptions.Marshal(b, m, deterministic) +} +func (dst *MethodOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_MethodOptions.Merge(dst, src) +} +func (m *MethodOptions) XXX_Size() int { + return xxx_messageInfo_MethodOptions.Size(m) +} +func (m *MethodOptions) XXX_DiscardUnknown() { + xxx_messageInfo_MethodOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_MethodOptions proto.InternalMessageInfo + +const Default_MethodOptions_Deprecated bool = false +const Default_MethodOptions_IdempotencyLevel MethodOptions_IdempotencyLevel = MethodOptions_IDEMPOTENCY_UNKNOWN + +func (m *MethodOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_MethodOptions_Deprecated +} + +func (m *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel { + if m != nil && m.IdempotencyLevel != nil { + return *m.IdempotencyLevel + } + return Default_MethodOptions_IdempotencyLevel +} + +func (m *MethodOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +// A message representing a option the parser does not recognize. This only +// appears in options protos created by the compiler::Parser class. +// DescriptorPool resolves these when building Descriptor objects. Therefore, +// options protos in descriptor objects (e.g. returned by Descriptor::options(), +// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions +// in them. +type UninterpretedOption struct { + Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"` + // The value of the uninterpreted option, in whatever type the tokenizer + // identified it as during parsing. Exactly one of these should be set. + IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"` + PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"` + NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"` + DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"` + StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"` + AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} } +func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) } +func (*UninterpretedOption) ProtoMessage() {} +func (*UninterpretedOption) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{18} +} +func (m *UninterpretedOption) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UninterpretedOption.Unmarshal(m, b) +} +func (m *UninterpretedOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UninterpretedOption.Marshal(b, m, deterministic) +} +func (dst *UninterpretedOption) XXX_Merge(src proto.Message) { + xxx_messageInfo_UninterpretedOption.Merge(dst, src) +} +func (m *UninterpretedOption) XXX_Size() int { + return xxx_messageInfo_UninterpretedOption.Size(m) +} +func (m *UninterpretedOption) XXX_DiscardUnknown() { + xxx_messageInfo_UninterpretedOption.DiscardUnknown(m) +} + +var xxx_messageInfo_UninterpretedOption proto.InternalMessageInfo + +func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart { + if m != nil { + return m.Name + } + return nil +} + +func (m *UninterpretedOption) GetIdentifierValue() string { + if m != nil && m.IdentifierValue != nil { + return *m.IdentifierValue + } + return "" +} + +func (m *UninterpretedOption) GetPositiveIntValue() uint64 { + if m != nil && m.PositiveIntValue != nil { + return *m.PositiveIntValue + } + return 0 +} + +func (m *UninterpretedOption) GetNegativeIntValue() int64 { + if m != nil && m.NegativeIntValue != nil { + return *m.NegativeIntValue + } + return 0 +} + +func (m *UninterpretedOption) GetDoubleValue() float64 { + if m != nil && m.DoubleValue != nil { + return *m.DoubleValue + } + return 0 +} + +func (m *UninterpretedOption) GetStringValue() []byte { + if m != nil { + return m.StringValue + } + return nil +} + +func (m *UninterpretedOption) GetAggregateValue() string { + if m != nil && m.AggregateValue != nil { + return *m.AggregateValue + } + return "" +} + +// The name of the uninterpreted option. Each string represents a segment in +// a dot-separated name. is_extension is true iff a segment represents an +// extension (denoted with parentheses in options specs in .proto files). +// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents +// "foo.(bar.baz).qux". +type UninterpretedOption_NamePart struct { + NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"` + IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UninterpretedOption_NamePart) Reset() { *m = UninterpretedOption_NamePart{} } +func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) } +func (*UninterpretedOption_NamePart) ProtoMessage() {} +func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{18, 0} +} +func (m *UninterpretedOption_NamePart) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UninterpretedOption_NamePart.Unmarshal(m, b) +} +func (m *UninterpretedOption_NamePart) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UninterpretedOption_NamePart.Marshal(b, m, deterministic) +} +func (dst *UninterpretedOption_NamePart) XXX_Merge(src proto.Message) { + xxx_messageInfo_UninterpretedOption_NamePart.Merge(dst, src) +} +func (m *UninterpretedOption_NamePart) XXX_Size() int { + return xxx_messageInfo_UninterpretedOption_NamePart.Size(m) +} +func (m *UninterpretedOption_NamePart) XXX_DiscardUnknown() { + xxx_messageInfo_UninterpretedOption_NamePart.DiscardUnknown(m) +} + +var xxx_messageInfo_UninterpretedOption_NamePart proto.InternalMessageInfo + +func (m *UninterpretedOption_NamePart) GetNamePart() string { + if m != nil && m.NamePart != nil { + return *m.NamePart + } + return "" +} + +func (m *UninterpretedOption_NamePart) GetIsExtension() bool { + if m != nil && m.IsExtension != nil { + return *m.IsExtension + } + return false +} + +// Encapsulates information about the original source file from which a +// FileDescriptorProto was generated. +type SourceCodeInfo struct { + // A Location identifies a piece of source code in a .proto file which + // corresponds to a particular definition. This information is intended + // to be useful to IDEs, code indexers, documentation generators, and similar + // tools. + // + // For example, say we have a file like: + // message Foo { + // optional string foo = 1; + // } + // Let's look at just the field definition: + // optional string foo = 1; + // ^ ^^ ^^ ^ ^^^ + // a bc de f ghi + // We have the following locations: + // span path represents + // [a,i) [ 4, 0, 2, 0 ] The whole field definition. + // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). + // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). + // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). + // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). + // + // Notes: + // - A location may refer to a repeated field itself (i.e. not to any + // particular index within it). This is used whenever a set of elements are + // logically enclosed in a single code segment. For example, an entire + // extend block (possibly containing multiple extension definitions) will + // have an outer location whose path refers to the "extensions" repeated + // field without an index. + // - Multiple locations may have the same path. This happens when a single + // logical declaration is spread out across multiple places. The most + // obvious example is the "extend" block again -- there may be multiple + // extend blocks in the same scope, each of which will have the same path. + // - A location's span is not always a subset of its parent's span. For + // example, the "extendee" of an extension declaration appears at the + // beginning of the "extend" block and is shared by all extensions within + // the block. + // - Just because a location's span is a subset of some other location's span + // does not mean that it is a descendent. For example, a "group" defines + // both a type and a field in a single declaration. Thus, the locations + // corresponding to the type and field and their components will overlap. + // - Code which tries to interpret locations should probably be designed to + // ignore those that it doesn't understand, as more types of locations could + // be recorded in the future. + Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} } +func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo) ProtoMessage() {} +func (*SourceCodeInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{19} +} +func (m *SourceCodeInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SourceCodeInfo.Unmarshal(m, b) +} +func (m *SourceCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SourceCodeInfo.Marshal(b, m, deterministic) +} +func (dst *SourceCodeInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceCodeInfo.Merge(dst, src) +} +func (m *SourceCodeInfo) XXX_Size() int { + return xxx_messageInfo_SourceCodeInfo.Size(m) +} +func (m *SourceCodeInfo) XXX_DiscardUnknown() { + xxx_messageInfo_SourceCodeInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_SourceCodeInfo proto.InternalMessageInfo + +func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { + if m != nil { + return m.Location + } + return nil +} + +type SourceCodeInfo_Location struct { + // Identifies which part of the FileDescriptorProto was defined at this + // location. + // + // Each element is a field number or an index. They form a path from + // the root FileDescriptorProto to the place where the definition. For + // example, this path: + // [ 4, 3, 2, 7, 1 ] + // refers to: + // file.message_type(3) // 4, 3 + // .field(7) // 2, 7 + // .name() // 1 + // This is because FileDescriptorProto.message_type has field number 4: + // repeated DescriptorProto message_type = 4; + // and DescriptorProto.field has field number 2: + // repeated FieldDescriptorProto field = 2; + // and FieldDescriptorProto.name has field number 1: + // optional string name = 1; + // + // Thus, the above path gives the location of a field name. If we removed + // the last element: + // [ 4, 3, 2, 7 ] + // this path refers to the whole field declaration (from the beginning + // of the label to the terminating semicolon). + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Always has exactly three or four elements: start line, start column, + // end line (optional, otherwise assumed same as start line), end column. + // These are packed into a single field for efficiency. Note that line + // and column numbers are zero-based -- typically you will want to add + // 1 to each before displaying to a user. + Span []int32 `protobuf:"varint,2,rep,packed,name=span" json:"span,omitempty"` + // If this SourceCodeInfo represents a complete declaration, these are any + // comments appearing before and after the declaration which appear to be + // attached to the declaration. + // + // A series of line comments appearing on consecutive lines, with no other + // tokens appearing on those lines, will be treated as a single comment. + // + // leading_detached_comments will keep paragraphs of comments that appear + // before (but not connected to) the current element. Each paragraph, + // separated by empty lines, will be one comment element in the repeated + // field. + // + // Only the comment content is provided; comment markers (e.g. //) are + // stripped out. For block comments, leading whitespace and an asterisk + // will be stripped from the beginning of each line other than the first. + // Newlines are included in the output. + // + // Examples: + // + // optional int32 foo = 1; // Comment attached to foo. + // // Comment attached to bar. + // optional int32 bar = 2; + // + // optional string baz = 3; + // // Comment attached to baz. + // // Another line attached to baz. + // + // // Comment attached to qux. + // // + // // Another line attached to qux. + // optional double qux = 4; + // + // // Detached comment for corge. This is not leading or trailing comments + // // to qux or corge because there are blank lines separating it from + // // both. + // + // // Detached comment for corge paragraph 2. + // + // optional string corge = 5; + // /* Block comment attached + // * to corge. Leading asterisks + // * will be removed. */ + // /* Block comment attached to + // * grault. */ + // optional int32 grault = 6; + // + // // ignored detached comments. + LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"` + TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"` + LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location{} } +func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo_Location) ProtoMessage() {} +func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{19, 0} +} +func (m *SourceCodeInfo_Location) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SourceCodeInfo_Location.Unmarshal(m, b) +} +func (m *SourceCodeInfo_Location) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SourceCodeInfo_Location.Marshal(b, m, deterministic) +} +func (dst *SourceCodeInfo_Location) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceCodeInfo_Location.Merge(dst, src) +} +func (m *SourceCodeInfo_Location) XXX_Size() int { + return xxx_messageInfo_SourceCodeInfo_Location.Size(m) +} +func (m *SourceCodeInfo_Location) XXX_DiscardUnknown() { + xxx_messageInfo_SourceCodeInfo_Location.DiscardUnknown(m) +} + +var xxx_messageInfo_SourceCodeInfo_Location proto.InternalMessageInfo + +func (m *SourceCodeInfo_Location) GetPath() []int32 { + if m != nil { + return m.Path + } + return nil +} + +func (m *SourceCodeInfo_Location) GetSpan() []int32 { + if m != nil { + return m.Span + } + return nil +} + +func (m *SourceCodeInfo_Location) GetLeadingComments() string { + if m != nil && m.LeadingComments != nil { + return *m.LeadingComments + } + return "" +} + +func (m *SourceCodeInfo_Location) GetTrailingComments() string { + if m != nil && m.TrailingComments != nil { + return *m.TrailingComments + } + return "" +} + +func (m *SourceCodeInfo_Location) GetLeadingDetachedComments() []string { + if m != nil { + return m.LeadingDetachedComments + } + return nil +} + +// Describes the relationship between generated code and its original source +// file. A GeneratedCodeInfo message is associated with only one generated +// source file, but may contain references to different source .proto files. +type GeneratedCodeInfo struct { + // An Annotation connects some span of text in generated code to an element + // of its generating .proto file. + Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GeneratedCodeInfo) Reset() { *m = GeneratedCodeInfo{} } +func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo) ProtoMessage() {} +func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{20} +} +func (m *GeneratedCodeInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GeneratedCodeInfo.Unmarshal(m, b) +} +func (m *GeneratedCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GeneratedCodeInfo.Marshal(b, m, deterministic) +} +func (dst *GeneratedCodeInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_GeneratedCodeInfo.Merge(dst, src) +} +func (m *GeneratedCodeInfo) XXX_Size() int { + return xxx_messageInfo_GeneratedCodeInfo.Size(m) +} +func (m *GeneratedCodeInfo) XXX_DiscardUnknown() { + xxx_messageInfo_GeneratedCodeInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_GeneratedCodeInfo proto.InternalMessageInfo + +func (m *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { + if m != nil { + return m.Annotation + } + return nil +} + +type GeneratedCodeInfo_Annotation struct { + // Identifies the element in the original source .proto file. This field + // is formatted the same as SourceCodeInfo.Location.path. + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Identifies the filesystem path to the original source .proto. + SourceFile *string `protobuf:"bytes,2,opt,name=source_file,json=sourceFile" json:"source_file,omitempty"` + // Identifies the starting offset in bytes in the generated code + // that relates to the identified object. + Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"` + // Identifies the ending offset in bytes in the generated code that + // relates to the identified offset. The end offset should be one past + // the last relevant byte (so the length of the text = end - begin). + End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GeneratedCodeInfo_Annotation) Reset() { *m = GeneratedCodeInfo_Annotation{} } +func (m *GeneratedCodeInfo_Annotation) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} +func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{20, 0} +} +func (m *GeneratedCodeInfo_Annotation) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Unmarshal(m, b) +} +func (m *GeneratedCodeInfo_Annotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Marshal(b, m, deterministic) +} +func (dst *GeneratedCodeInfo_Annotation) XXX_Merge(src proto.Message) { + xxx_messageInfo_GeneratedCodeInfo_Annotation.Merge(dst, src) +} +func (m *GeneratedCodeInfo_Annotation) XXX_Size() int { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Size(m) +} +func (m *GeneratedCodeInfo_Annotation) XXX_DiscardUnknown() { + xxx_messageInfo_GeneratedCodeInfo_Annotation.DiscardUnknown(m) +} + +var xxx_messageInfo_GeneratedCodeInfo_Annotation proto.InternalMessageInfo + +func (m *GeneratedCodeInfo_Annotation) GetPath() []int32 { + if m != nil { + return m.Path + } + return nil +} + +func (m *GeneratedCodeInfo_Annotation) GetSourceFile() string { + if m != nil && m.SourceFile != nil { + return *m.SourceFile + } + return "" +} + +func (m *GeneratedCodeInfo_Annotation) GetBegin() int32 { + if m != nil && m.Begin != nil { + return *m.Begin + } + return 0 +} + +func (m *GeneratedCodeInfo_Annotation) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +func init() { + proto.RegisterType((*FileDescriptorSet)(nil), "google.protobuf.FileDescriptorSet") + proto.RegisterType((*FileDescriptorProto)(nil), "google.protobuf.FileDescriptorProto") + proto.RegisterType((*DescriptorProto)(nil), "google.protobuf.DescriptorProto") + proto.RegisterType((*DescriptorProto_ExtensionRange)(nil), "google.protobuf.DescriptorProto.ExtensionRange") + proto.RegisterType((*DescriptorProto_ReservedRange)(nil), "google.protobuf.DescriptorProto.ReservedRange") + proto.RegisterType((*ExtensionRangeOptions)(nil), "google.protobuf.ExtensionRangeOptions") + proto.RegisterType((*FieldDescriptorProto)(nil), "google.protobuf.FieldDescriptorProto") + proto.RegisterType((*OneofDescriptorProto)(nil), "google.protobuf.OneofDescriptorProto") + proto.RegisterType((*EnumDescriptorProto)(nil), "google.protobuf.EnumDescriptorProto") + proto.RegisterType((*EnumDescriptorProto_EnumReservedRange)(nil), "google.protobuf.EnumDescriptorProto.EnumReservedRange") + proto.RegisterType((*EnumValueDescriptorProto)(nil), "google.protobuf.EnumValueDescriptorProto") + proto.RegisterType((*ServiceDescriptorProto)(nil), "google.protobuf.ServiceDescriptorProto") + proto.RegisterType((*MethodDescriptorProto)(nil), "google.protobuf.MethodDescriptorProto") + proto.RegisterType((*FileOptions)(nil), "google.protobuf.FileOptions") + proto.RegisterType((*MessageOptions)(nil), "google.protobuf.MessageOptions") + proto.RegisterType((*FieldOptions)(nil), "google.protobuf.FieldOptions") + proto.RegisterType((*OneofOptions)(nil), "google.protobuf.OneofOptions") + proto.RegisterType((*EnumOptions)(nil), "google.protobuf.EnumOptions") + proto.RegisterType((*EnumValueOptions)(nil), "google.protobuf.EnumValueOptions") + proto.RegisterType((*ServiceOptions)(nil), "google.protobuf.ServiceOptions") + proto.RegisterType((*MethodOptions)(nil), "google.protobuf.MethodOptions") + proto.RegisterType((*UninterpretedOption)(nil), "google.protobuf.UninterpretedOption") + proto.RegisterType((*UninterpretedOption_NamePart)(nil), "google.protobuf.UninterpretedOption.NamePart") + proto.RegisterType((*SourceCodeInfo)(nil), "google.protobuf.SourceCodeInfo") + proto.RegisterType((*SourceCodeInfo_Location)(nil), "google.protobuf.SourceCodeInfo.Location") + proto.RegisterType((*GeneratedCodeInfo)(nil), "google.protobuf.GeneratedCodeInfo") + proto.RegisterType((*GeneratedCodeInfo_Annotation)(nil), "google.protobuf.GeneratedCodeInfo.Annotation") + proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Type", FieldDescriptorProto_Type_name, FieldDescriptorProto_Type_value) + proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Label", FieldDescriptorProto_Label_name, FieldDescriptorProto_Label_value) + proto.RegisterEnum("google.protobuf.FileOptions_OptimizeMode", FileOptions_OptimizeMode_name, FileOptions_OptimizeMode_value) + proto.RegisterEnum("google.protobuf.FieldOptions_CType", FieldOptions_CType_name, FieldOptions_CType_value) + proto.RegisterEnum("google.protobuf.FieldOptions_JSType", FieldOptions_JSType_name, FieldOptions_JSType_value) + proto.RegisterEnum("google.protobuf.MethodOptions_IdempotencyLevel", MethodOptions_IdempotencyLevel_name, MethodOptions_IdempotencyLevel_value) +} + +func init() { + proto.RegisterFile("google/protobuf/descriptor.proto", fileDescriptor_descriptor_4df4cb5f42392df6) +} + +var fileDescriptor_descriptor_4df4cb5f42392df6 = []byte{ + // 2555 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xdd, 0x6e, 0x1b, 0xc7, + 0xf5, 0xcf, 0xf2, 0x4b, 0xe4, 0x21, 0x45, 0x8d, 0x46, 0x8a, 0xbd, 0x56, 0x3e, 0x2c, 0x33, 0x1f, + 0x96, 0x9d, 0x7f, 0xa8, 0xc0, 0xb1, 0x1d, 0x47, 0xfe, 0x23, 0x2d, 0x45, 0xae, 0x15, 0xaa, 0x12, + 0xc9, 0x2e, 0xa9, 0xe6, 0x03, 0x28, 0x16, 0xa3, 0xdd, 0x21, 0xb9, 0xf6, 0x72, 0x77, 0xb3, 0xbb, + 0xb4, 0xad, 0xa0, 0x17, 0x06, 0x7a, 0xd5, 0xab, 0xde, 0x16, 0x45, 0xd1, 0x8b, 0xde, 0x04, 0xe8, + 0x03, 0x14, 0xc8, 0x5d, 0x9f, 0xa0, 0x40, 0xde, 0xa0, 0x68, 0x0b, 0xb4, 0x8f, 0xd0, 0xcb, 0x62, + 0x66, 0x76, 0x97, 0xbb, 0x24, 0x15, 0x2b, 0x01, 0xe2, 0x5c, 0x91, 0xf3, 0x9b, 0xdf, 0x39, 0x73, + 0xe6, 0xcc, 0x99, 0x33, 0x67, 0x66, 0x61, 0x7b, 0xe4, 0x38, 0x23, 0x8b, 0xee, 0xba, 0x9e, 0x13, + 0x38, 0xa7, 0xd3, 0xe1, 0xae, 0x41, 0x7d, 0xdd, 0x33, 0xdd, 0xc0, 0xf1, 0xea, 0x1c, 0xc3, 0x6b, + 0x82, 0x51, 0x8f, 0x18, 0xb5, 0x63, 0x58, 0x7f, 0x60, 0x5a, 0xb4, 0x15, 0x13, 0xfb, 0x34, 0xc0, + 0xf7, 0x20, 0x37, 0x34, 0x2d, 0x2a, 0x4b, 0xdb, 0xd9, 0x9d, 0xf2, 0xad, 0x37, 0xeb, 0x73, 0x42, + 0xf5, 0xb4, 0x44, 0x8f, 0xc1, 0x2a, 0x97, 0xa8, 0xfd, 0x2b, 0x07, 0x1b, 0x4b, 0x7a, 0x31, 0x86, + 0x9c, 0x4d, 0x26, 0x4c, 0xa3, 0xb4, 0x53, 0x52, 0xf9, 0x7f, 0x2c, 0xc3, 0x8a, 0x4b, 0xf4, 0x47, + 0x64, 0x44, 0xe5, 0x0c, 0x87, 0xa3, 0x26, 0x7e, 0x1d, 0xc0, 0xa0, 0x2e, 0xb5, 0x0d, 0x6a, 0xeb, + 0x67, 0x72, 0x76, 0x3b, 0xbb, 0x53, 0x52, 0x13, 0x08, 0x7e, 0x07, 0xd6, 0xdd, 0xe9, 0xa9, 0x65, + 0xea, 0x5a, 0x82, 0x06, 0xdb, 0xd9, 0x9d, 0xbc, 0x8a, 0x44, 0x47, 0x6b, 0x46, 0xbe, 0x0e, 0x6b, + 0x4f, 0x28, 0x79, 0x94, 0xa4, 0x96, 0x39, 0xb5, 0xca, 0xe0, 0x04, 0xb1, 0x09, 0x95, 0x09, 0xf5, + 0x7d, 0x32, 0xa2, 0x5a, 0x70, 0xe6, 0x52, 0x39, 0xc7, 0x67, 0xbf, 0xbd, 0x30, 0xfb, 0xf9, 0x99, + 0x97, 0x43, 0xa9, 0xc1, 0x99, 0x4b, 0x71, 0x03, 0x4a, 0xd4, 0x9e, 0x4e, 0x84, 0x86, 0xfc, 0x39, + 0xfe, 0x53, 0xec, 0xe9, 0x64, 0x5e, 0x4b, 0x91, 0x89, 0x85, 0x2a, 0x56, 0x7c, 0xea, 0x3d, 0x36, + 0x75, 0x2a, 0x17, 0xb8, 0x82, 0xeb, 0x0b, 0x0a, 0xfa, 0xa2, 0x7f, 0x5e, 0x47, 0x24, 0x87, 0x9b, + 0x50, 0xa2, 0x4f, 0x03, 0x6a, 0xfb, 0xa6, 0x63, 0xcb, 0x2b, 0x5c, 0xc9, 0x5b, 0x4b, 0x56, 0x91, + 0x5a, 0xc6, 0xbc, 0x8a, 0x99, 0x1c, 0xbe, 0x0b, 0x2b, 0x8e, 0x1b, 0x98, 0x8e, 0xed, 0xcb, 0xc5, + 0x6d, 0x69, 0xa7, 0x7c, 0xeb, 0xd5, 0xa5, 0x81, 0xd0, 0x15, 0x1c, 0x35, 0x22, 0xe3, 0x36, 0x20, + 0xdf, 0x99, 0x7a, 0x3a, 0xd5, 0x74, 0xc7, 0xa0, 0x9a, 0x69, 0x0f, 0x1d, 0xb9, 0xc4, 0x15, 0x5c, + 0x5d, 0x9c, 0x08, 0x27, 0x36, 0x1d, 0x83, 0xb6, 0xed, 0xa1, 0xa3, 0x56, 0xfd, 0x54, 0x1b, 0x5f, + 0x82, 0x82, 0x7f, 0x66, 0x07, 0xe4, 0xa9, 0x5c, 0xe1, 0x11, 0x12, 0xb6, 0x6a, 0x5f, 0x17, 0x60, + 0xed, 0x22, 0x21, 0x76, 0x1f, 0xf2, 0x43, 0x36, 0x4b, 0x39, 0xf3, 0x5d, 0x7c, 0x20, 0x64, 0xd2, + 0x4e, 0x2c, 0x7c, 0x4f, 0x27, 0x36, 0xa0, 0x6c, 0x53, 0x3f, 0xa0, 0x86, 0x88, 0x88, 0xec, 0x05, + 0x63, 0x0a, 0x84, 0xd0, 0x62, 0x48, 0xe5, 0xbe, 0x57, 0x48, 0x7d, 0x0a, 0x6b, 0xb1, 0x49, 0x9a, + 0x47, 0xec, 0x51, 0x14, 0x9b, 0xbb, 0xcf, 0xb3, 0xa4, 0xae, 0x44, 0x72, 0x2a, 0x13, 0x53, 0xab, + 0x34, 0xd5, 0xc6, 0x2d, 0x00, 0xc7, 0xa6, 0xce, 0x50, 0x33, 0xa8, 0x6e, 0xc9, 0xc5, 0x73, 0xbc, + 0xd4, 0x65, 0x94, 0x05, 0x2f, 0x39, 0x02, 0xd5, 0x2d, 0xfc, 0xe1, 0x2c, 0xd4, 0x56, 0xce, 0x89, + 0x94, 0x63, 0xb1, 0xc9, 0x16, 0xa2, 0xed, 0x04, 0xaa, 0x1e, 0x65, 0x71, 0x4f, 0x8d, 0x70, 0x66, + 0x25, 0x6e, 0x44, 0xfd, 0xb9, 0x33, 0x53, 0x43, 0x31, 0x31, 0xb1, 0x55, 0x2f, 0xd9, 0xc4, 0x6f, + 0x40, 0x0c, 0x68, 0x3c, 0xac, 0x80, 0x67, 0xa1, 0x4a, 0x04, 0x76, 0xc8, 0x84, 0x6e, 0x7d, 0x09, + 0xd5, 0xb4, 0x7b, 0xf0, 0x26, 0xe4, 0xfd, 0x80, 0x78, 0x01, 0x8f, 0xc2, 0xbc, 0x2a, 0x1a, 0x18, + 0x41, 0x96, 0xda, 0x06, 0xcf, 0x72, 0x79, 0x95, 0xfd, 0xc5, 0x3f, 0x9d, 0x4d, 0x38, 0xcb, 0x27, + 0xfc, 0xf6, 0xe2, 0x8a, 0xa6, 0x34, 0xcf, 0xcf, 0x7b, 0xeb, 0x03, 0x58, 0x4d, 0x4d, 0xe0, 0xa2, + 0x43, 0xd7, 0x7e, 0x05, 0x2f, 0x2f, 0x55, 0x8d, 0x3f, 0x85, 0xcd, 0xa9, 0x6d, 0xda, 0x01, 0xf5, + 0x5c, 0x8f, 0xb2, 0x88, 0x15, 0x43, 0xc9, 0xff, 0x5e, 0x39, 0x27, 0xe6, 0x4e, 0x92, 0x6c, 0xa1, + 0x45, 0xdd, 0x98, 0x2e, 0x82, 0x37, 0x4b, 0xc5, 0xff, 0xac, 0xa0, 0x67, 0xcf, 0x9e, 0x3d, 0xcb, + 0xd4, 0x7e, 0x57, 0x80, 0xcd, 0x65, 0x7b, 0x66, 0xe9, 0xf6, 0xbd, 0x04, 0x05, 0x7b, 0x3a, 0x39, + 0xa5, 0x1e, 0x77, 0x52, 0x5e, 0x0d, 0x5b, 0xb8, 0x01, 0x79, 0x8b, 0x9c, 0x52, 0x4b, 0xce, 0x6d, + 0x4b, 0x3b, 0xd5, 0x5b, 0xef, 0x5c, 0x68, 0x57, 0xd6, 0x8f, 0x98, 0x88, 0x2a, 0x24, 0xf1, 0x47, + 0x90, 0x0b, 0x53, 0x34, 0xd3, 0x70, 0xf3, 0x62, 0x1a, 0xd8, 0x5e, 0x52, 0xb9, 0x1c, 0x7e, 0x05, + 0x4a, 0xec, 0x57, 0xc4, 0x46, 0x81, 0xdb, 0x5c, 0x64, 0x00, 0x8b, 0x0b, 0xbc, 0x05, 0x45, 0xbe, + 0x4d, 0x0c, 0x1a, 0x1d, 0x6d, 0x71, 0x9b, 0x05, 0x96, 0x41, 0x87, 0x64, 0x6a, 0x05, 0xda, 0x63, + 0x62, 0x4d, 0x29, 0x0f, 0xf8, 0x92, 0x5a, 0x09, 0xc1, 0x5f, 0x30, 0x0c, 0x5f, 0x85, 0xb2, 0xd8, + 0x55, 0xa6, 0x6d, 0xd0, 0xa7, 0x3c, 0x7b, 0xe6, 0x55, 0xb1, 0xd1, 0xda, 0x0c, 0x61, 0xc3, 0x3f, + 0xf4, 0x1d, 0x3b, 0x0a, 0x4d, 0x3e, 0x04, 0x03, 0xf8, 0xf0, 0x1f, 0xcc, 0x27, 0xee, 0xd7, 0x96, + 0x4f, 0x6f, 0x3e, 0xa6, 0x6a, 0x7f, 0xc9, 0x40, 0x8e, 0xe7, 0x8b, 0x35, 0x28, 0x0f, 0x3e, 0xeb, + 0x29, 0x5a, 0xab, 0x7b, 0xb2, 0x7f, 0xa4, 0x20, 0x09, 0x57, 0x01, 0x38, 0xf0, 0xe0, 0xa8, 0xdb, + 0x18, 0xa0, 0x4c, 0xdc, 0x6e, 0x77, 0x06, 0x77, 0x6f, 0xa3, 0x6c, 0x2c, 0x70, 0x22, 0x80, 0x5c, + 0x92, 0xf0, 0xfe, 0x2d, 0x94, 0xc7, 0x08, 0x2a, 0x42, 0x41, 0xfb, 0x53, 0xa5, 0x75, 0xf7, 0x36, + 0x2a, 0xa4, 0x91, 0xf7, 0x6f, 0xa1, 0x15, 0xbc, 0x0a, 0x25, 0x8e, 0xec, 0x77, 0xbb, 0x47, 0xa8, + 0x18, 0xeb, 0xec, 0x0f, 0xd4, 0x76, 0xe7, 0x00, 0x95, 0x62, 0x9d, 0x07, 0x6a, 0xf7, 0xa4, 0x87, + 0x20, 0xd6, 0x70, 0xac, 0xf4, 0xfb, 0x8d, 0x03, 0x05, 0x95, 0x63, 0xc6, 0xfe, 0x67, 0x03, 0xa5, + 0x8f, 0x2a, 0x29, 0xb3, 0xde, 0xbf, 0x85, 0x56, 0xe3, 0x21, 0x94, 0xce, 0xc9, 0x31, 0xaa, 0xe2, + 0x75, 0x58, 0x15, 0x43, 0x44, 0x46, 0xac, 0xcd, 0x41, 0x77, 0x6f, 0x23, 0x34, 0x33, 0x44, 0x68, + 0x59, 0x4f, 0x01, 0x77, 0x6f, 0x23, 0x5c, 0x6b, 0x42, 0x9e, 0x47, 0x17, 0xc6, 0x50, 0x3d, 0x6a, + 0xec, 0x2b, 0x47, 0x5a, 0xb7, 0x37, 0x68, 0x77, 0x3b, 0x8d, 0x23, 0x24, 0xcd, 0x30, 0x55, 0xf9, + 0xf9, 0x49, 0x5b, 0x55, 0x5a, 0x28, 0x93, 0xc4, 0x7a, 0x4a, 0x63, 0xa0, 0xb4, 0x50, 0xb6, 0xa6, + 0xc3, 0xe6, 0xb2, 0x3c, 0xb9, 0x74, 0x67, 0x24, 0x96, 0x38, 0x73, 0xce, 0x12, 0x73, 0x5d, 0x0b, + 0x4b, 0xfc, 0xcf, 0x0c, 0x6c, 0x2c, 0x39, 0x2b, 0x96, 0x0e, 0xf2, 0x13, 0xc8, 0x8b, 0x10, 0x15, + 0xa7, 0xe7, 0x8d, 0xa5, 0x87, 0x0e, 0x0f, 0xd8, 0x85, 0x13, 0x94, 0xcb, 0x25, 0x2b, 0x88, 0xec, + 0x39, 0x15, 0x04, 0x53, 0xb1, 0x90, 0xd3, 0x7f, 0xb9, 0x90, 0xd3, 0xc5, 0xb1, 0x77, 0xf7, 0x22, + 0xc7, 0x1e, 0xc7, 0xbe, 0x5b, 0x6e, 0xcf, 0x2f, 0xc9, 0xed, 0xf7, 0x61, 0x7d, 0x41, 0xd1, 0x85, + 0x73, 0xec, 0xaf, 0x25, 0x90, 0xcf, 0x73, 0xce, 0x73, 0x32, 0x5d, 0x26, 0x95, 0xe9, 0xee, 0xcf, + 0x7b, 0xf0, 0xda, 0xf9, 0x8b, 0xb0, 0xb0, 0xd6, 0x5f, 0x49, 0x70, 0x69, 0x79, 0xa5, 0xb8, 0xd4, + 0x86, 0x8f, 0xa0, 0x30, 0xa1, 0xc1, 0xd8, 0x89, 0xaa, 0xa5, 0xb7, 0x97, 0x9c, 0xc1, 0xac, 0x7b, + 0x7e, 0xb1, 0x43, 0xa9, 0xe4, 0x21, 0x9e, 0x3d, 0xaf, 0xdc, 0x13, 0xd6, 0x2c, 0x58, 0xfa, 0x9b, + 0x0c, 0xbc, 0xbc, 0x54, 0xf9, 0x52, 0x43, 0x5f, 0x03, 0x30, 0x6d, 0x77, 0x1a, 0x88, 0x8a, 0x48, + 0x24, 0xd8, 0x12, 0x47, 0x78, 0xf2, 0x62, 0xc9, 0x73, 0x1a, 0xc4, 0xfd, 0x59, 0xde, 0x0f, 0x02, + 0xe2, 0x84, 0x7b, 0x33, 0x43, 0x73, 0xdc, 0xd0, 0xd7, 0xcf, 0x99, 0xe9, 0x42, 0x60, 0xbe, 0x07, + 0x48, 0xb7, 0x4c, 0x6a, 0x07, 0x9a, 0x1f, 0x78, 0x94, 0x4c, 0x4c, 0x7b, 0xc4, 0x4f, 0x90, 0xe2, + 0x5e, 0x7e, 0x48, 0x2c, 0x9f, 0xaa, 0x6b, 0xa2, 0xbb, 0x1f, 0xf5, 0x32, 0x09, 0x1e, 0x40, 0x5e, + 0x42, 0xa2, 0x90, 0x92, 0x10, 0xdd, 0xb1, 0x44, 0xed, 0xeb, 0x22, 0x94, 0x13, 0x75, 0x35, 0xbe, + 0x06, 0x95, 0x87, 0xe4, 0x31, 0xd1, 0xa2, 0xbb, 0x92, 0xf0, 0x44, 0x99, 0x61, 0xbd, 0xf0, 0xbe, + 0xf4, 0x1e, 0x6c, 0x72, 0x8a, 0x33, 0x0d, 0xa8, 0xa7, 0xe9, 0x16, 0xf1, 0x7d, 0xee, 0xb4, 0x22, + 0xa7, 0x62, 0xd6, 0xd7, 0x65, 0x5d, 0xcd, 0xa8, 0x07, 0xdf, 0x81, 0x0d, 0x2e, 0x31, 0x99, 0x5a, + 0x81, 0xe9, 0x5a, 0x54, 0x63, 0xb7, 0x37, 0x9f, 0x9f, 0x24, 0xb1, 0x65, 0xeb, 0x8c, 0x71, 0x1c, + 0x12, 0x98, 0x45, 0x3e, 0x6e, 0xc1, 0x6b, 0x5c, 0x6c, 0x44, 0x6d, 0xea, 0x91, 0x80, 0x6a, 0xf4, + 0x8b, 0x29, 0xb1, 0x7c, 0x8d, 0xd8, 0x86, 0x36, 0x26, 0xfe, 0x58, 0xde, 0x64, 0x0a, 0xf6, 0x33, + 0xb2, 0xa4, 0x5e, 0x61, 0xc4, 0x83, 0x90, 0xa7, 0x70, 0x5a, 0xc3, 0x36, 0x3e, 0x26, 0xfe, 0x18, + 0xef, 0xc1, 0x25, 0xae, 0xc5, 0x0f, 0x3c, 0xd3, 0x1e, 0x69, 0xfa, 0x98, 0xea, 0x8f, 0xb4, 0x69, + 0x30, 0xbc, 0x27, 0xbf, 0x92, 0x1c, 0x9f, 0x5b, 0xd8, 0xe7, 0x9c, 0x26, 0xa3, 0x9c, 0x04, 0xc3, + 0x7b, 0xb8, 0x0f, 0x15, 0xb6, 0x18, 0x13, 0xf3, 0x4b, 0xaa, 0x0d, 0x1d, 0x8f, 0x1f, 0x8d, 0xd5, + 0x25, 0xa9, 0x29, 0xe1, 0xc1, 0x7a, 0x37, 0x14, 0x38, 0x76, 0x0c, 0xba, 0x97, 0xef, 0xf7, 0x14, + 0xa5, 0xa5, 0x96, 0x23, 0x2d, 0x0f, 0x1c, 0x8f, 0x05, 0xd4, 0xc8, 0x89, 0x1d, 0x5c, 0x16, 0x01, + 0x35, 0x72, 0x22, 0xf7, 0xde, 0x81, 0x0d, 0x5d, 0x17, 0x73, 0x36, 0x75, 0x2d, 0xbc, 0x63, 0xf9, + 0x32, 0x4a, 0x39, 0x4b, 0xd7, 0x0f, 0x04, 0x21, 0x8c, 0x71, 0x1f, 0x7f, 0x08, 0x2f, 0xcf, 0x9c, + 0x95, 0x14, 0x5c, 0x5f, 0x98, 0xe5, 0xbc, 0xe8, 0x1d, 0xd8, 0x70, 0xcf, 0x16, 0x05, 0x71, 0x6a, + 0x44, 0xf7, 0x6c, 0x5e, 0xec, 0x03, 0xd8, 0x74, 0xc7, 0xee, 0xa2, 0xdc, 0xcd, 0xa4, 0x1c, 0x76, + 0xc7, 0xee, 0xbc, 0xe0, 0x5b, 0xfc, 0xc2, 0xed, 0x51, 0x9d, 0x04, 0xd4, 0x90, 0x2f, 0x27, 0xe9, + 0x89, 0x0e, 0xbc, 0x0b, 0x48, 0xd7, 0x35, 0x6a, 0x93, 0x53, 0x8b, 0x6a, 0xc4, 0xa3, 0x36, 0xf1, + 0xe5, 0xab, 0x49, 0x72, 0x55, 0xd7, 0x15, 0xde, 0xdb, 0xe0, 0x9d, 0xf8, 0x26, 0xac, 0x3b, 0xa7, + 0x0f, 0x75, 0x11, 0x92, 0x9a, 0xeb, 0xd1, 0xa1, 0xf9, 0x54, 0x7e, 0x93, 0xfb, 0x77, 0x8d, 0x75, + 0xf0, 0x80, 0xec, 0x71, 0x18, 0xdf, 0x00, 0xa4, 0xfb, 0x63, 0xe2, 0xb9, 0x3c, 0x27, 0xfb, 0x2e, + 0xd1, 0xa9, 0xfc, 0x96, 0xa0, 0x0a, 0xbc, 0x13, 0xc1, 0x6c, 0x4b, 0xf8, 0x4f, 0xcc, 0x61, 0x10, + 0x69, 0xbc, 0x2e, 0xb6, 0x04, 0xc7, 0x42, 0x6d, 0x3b, 0x80, 0x98, 0x2b, 0x52, 0x03, 0xef, 0x70, + 0x5a, 0xd5, 0x1d, 0xbb, 0xc9, 0x71, 0xdf, 0x80, 0x55, 0xc6, 0x9c, 0x0d, 0x7a, 0x43, 0x14, 0x64, + 0xee, 0x38, 0x31, 0xe2, 0x0f, 0x56, 0x1b, 0xd7, 0xf6, 0xa0, 0x92, 0x8c, 0x4f, 0x5c, 0x02, 0x11, + 0xa1, 0x48, 0x62, 0xc5, 0x4a, 0xb3, 0xdb, 0x62, 0x65, 0xc6, 0xe7, 0x0a, 0xca, 0xb0, 0x72, 0xe7, + 0xa8, 0x3d, 0x50, 0x34, 0xf5, 0xa4, 0x33, 0x68, 0x1f, 0x2b, 0x28, 0x9b, 0xa8, 0xab, 0x0f, 0x73, + 0xc5, 0xb7, 0xd1, 0xf5, 0xda, 0x37, 0x19, 0xa8, 0xa6, 0x2f, 0x4a, 0xf8, 0xff, 0xe1, 0x72, 0xf4, + 0xaa, 0xe1, 0xd3, 0x40, 0x7b, 0x62, 0x7a, 0x7c, 0xe3, 0x4c, 0x88, 0x38, 0xc4, 0xe2, 0xa5, 0xdb, + 0x0c, 0x59, 0x7d, 0x1a, 0x7c, 0x62, 0x7a, 0x6c, 0x5b, 0x4c, 0x48, 0x80, 0x8f, 0xe0, 0xaa, 0xed, + 0x68, 0x7e, 0x40, 0x6c, 0x83, 0x78, 0x86, 0x36, 0x7b, 0x4f, 0xd2, 0x88, 0xae, 0x53, 0xdf, 0x77, + 0xc4, 0x81, 0x15, 0x6b, 0x79, 0xd5, 0x76, 0xfa, 0x21, 0x79, 0x96, 0xc9, 0x1b, 0x21, 0x75, 0x2e, + 0xcc, 0xb2, 0xe7, 0x85, 0xd9, 0x2b, 0x50, 0x9a, 0x10, 0x57, 0xa3, 0x76, 0xe0, 0x9d, 0xf1, 0xf2, + 0xb8, 0xa8, 0x16, 0x27, 0xc4, 0x55, 0x58, 0xfb, 0x85, 0xdc, 0x52, 0x0e, 0x73, 0xc5, 0x22, 0x2a, + 0x1d, 0xe6, 0x8a, 0x25, 0x04, 0xb5, 0x7f, 0x64, 0xa1, 0x92, 0x2c, 0x97, 0xd9, 0xed, 0x43, 0xe7, + 0x27, 0x8b, 0xc4, 0x73, 0xcf, 0x1b, 0xdf, 0x5a, 0x5c, 0xd7, 0x9b, 0xec, 0xc8, 0xd9, 0x2b, 0x88, + 0x22, 0x56, 0x15, 0x92, 0xec, 0xb8, 0x67, 0xd9, 0x86, 0x8a, 0xa2, 0xa1, 0xa8, 0x86, 0x2d, 0x7c, + 0x00, 0x85, 0x87, 0x3e, 0xd7, 0x5d, 0xe0, 0xba, 0xdf, 0xfc, 0x76, 0xdd, 0x87, 0x7d, 0xae, 0xbc, + 0x74, 0xd8, 0xd7, 0x3a, 0x5d, 0xf5, 0xb8, 0x71, 0xa4, 0x86, 0xe2, 0xf8, 0x0a, 0xe4, 0x2c, 0xf2, + 0xe5, 0x59, 0xfa, 0x70, 0xe2, 0xd0, 0x45, 0x17, 0xe1, 0x0a, 0xe4, 0x9e, 0x50, 0xf2, 0x28, 0x7d, + 0x24, 0x70, 0xe8, 0x07, 0xdc, 0x0c, 0xbb, 0x90, 0xe7, 0xfe, 0xc2, 0x00, 0xa1, 0xc7, 0xd0, 0x4b, + 0xb8, 0x08, 0xb9, 0x66, 0x57, 0x65, 0x1b, 0x02, 0x41, 0x45, 0xa0, 0x5a, 0xaf, 0xad, 0x34, 0x15, + 0x94, 0xa9, 0xdd, 0x81, 0x82, 0x70, 0x02, 0xdb, 0x2c, 0xb1, 0x1b, 0xd0, 0x4b, 0x61, 0x33, 0xd4, + 0x21, 0x45, 0xbd, 0x27, 0xc7, 0xfb, 0x8a, 0x8a, 0x32, 0xe9, 0xa5, 0xce, 0xa1, 0x7c, 0xcd, 0x87, + 0x4a, 0xb2, 0x5e, 0x7e, 0x31, 0x77, 0xe1, 0xbf, 0x4a, 0x50, 0x4e, 0xd4, 0xbf, 0xac, 0x70, 0x21, + 0x96, 0xe5, 0x3c, 0xd1, 0x88, 0x65, 0x12, 0x3f, 0x0c, 0x0d, 0xe0, 0x50, 0x83, 0x21, 0x17, 0x5d, + 0xba, 0x17, 0xb4, 0x45, 0xf2, 0xa8, 0x50, 0xfb, 0xa3, 0x04, 0x68, 0xbe, 0x00, 0x9d, 0x33, 0x53, + 0xfa, 0x31, 0xcd, 0xac, 0xfd, 0x41, 0x82, 0x6a, 0xba, 0xea, 0x9c, 0x33, 0xef, 0xda, 0x8f, 0x6a, + 0xde, 0xdf, 0x33, 0xb0, 0x9a, 0xaa, 0x35, 0x2f, 0x6a, 0xdd, 0x17, 0xb0, 0x6e, 0x1a, 0x74, 0xe2, + 0x3a, 0x01, 0xb5, 0xf5, 0x33, 0xcd, 0xa2, 0x8f, 0xa9, 0x25, 0xd7, 0x78, 0xd2, 0xd8, 0xfd, 0xf6, + 0x6a, 0xb6, 0xde, 0x9e, 0xc9, 0x1d, 0x31, 0xb1, 0xbd, 0x8d, 0x76, 0x4b, 0x39, 0xee, 0x75, 0x07, + 0x4a, 0xa7, 0xf9, 0x99, 0x76, 0xd2, 0xf9, 0x59, 0xa7, 0xfb, 0x49, 0x47, 0x45, 0xe6, 0x1c, 0xed, + 0x07, 0xdc, 0xf6, 0x3d, 0x40, 0xf3, 0x46, 0xe1, 0xcb, 0xb0, 0xcc, 0x2c, 0xf4, 0x12, 0xde, 0x80, + 0xb5, 0x4e, 0x57, 0xeb, 0xb7, 0x5b, 0x8a, 0xa6, 0x3c, 0x78, 0xa0, 0x34, 0x07, 0x7d, 0xf1, 0x3e, + 0x11, 0xb3, 0x07, 0xa9, 0x0d, 0x5e, 0xfb, 0x7d, 0x16, 0x36, 0x96, 0x58, 0x82, 0x1b, 0xe1, 0xcd, + 0x42, 0x5c, 0x76, 0xde, 0xbd, 0x88, 0xf5, 0x75, 0x56, 0x10, 0xf4, 0x88, 0x17, 0x84, 0x17, 0x91, + 0x1b, 0xc0, 0xbc, 0x64, 0x07, 0xe6, 0xd0, 0xa4, 0x5e, 0xf8, 0x9c, 0x23, 0xae, 0x1b, 0x6b, 0x33, + 0x5c, 0xbc, 0xe8, 0xfc, 0x1f, 0x60, 0xd7, 0xf1, 0xcd, 0xc0, 0x7c, 0x4c, 0x35, 0xd3, 0x8e, 0xde, + 0x7e, 0xd8, 0xf5, 0x23, 0xa7, 0xa2, 0xa8, 0xa7, 0x6d, 0x07, 0x31, 0xdb, 0xa6, 0x23, 0x32, 0xc7, + 0x66, 0xc9, 0x3c, 0xab, 0xa2, 0xa8, 0x27, 0x66, 0x5f, 0x83, 0x8a, 0xe1, 0x4c, 0x59, 0x4d, 0x26, + 0x78, 0xec, 0xec, 0x90, 0xd4, 0xb2, 0xc0, 0x62, 0x4a, 0x58, 0x6d, 0xcf, 0x1e, 0x9d, 0x2a, 0x6a, + 0x59, 0x60, 0x82, 0x72, 0x1d, 0xd6, 0xc8, 0x68, 0xe4, 0x31, 0xe5, 0x91, 0x22, 0x71, 0x7f, 0xa8, + 0xc6, 0x30, 0x27, 0x6e, 0x1d, 0x42, 0x31, 0xf2, 0x03, 0x3b, 0xaa, 0x99, 0x27, 0x34, 0x57, 0x5c, + 0x8a, 0x33, 0x3b, 0x25, 0xb5, 0x68, 0x47, 0x9d, 0xd7, 0xa0, 0x62, 0xfa, 0xda, 0xec, 0x0d, 0x3d, + 0xb3, 0x9d, 0xd9, 0x29, 0xaa, 0x65, 0xd3, 0x8f, 0xdf, 0x1f, 0x6b, 0x5f, 0x65, 0xa0, 0x9a, 0xfe, + 0x06, 0x80, 0x5b, 0x50, 0xb4, 0x1c, 0x9d, 0xf0, 0xd0, 0x12, 0x1f, 0xa0, 0x76, 0x9e, 0xf3, 0xd9, + 0xa0, 0x7e, 0x14, 0xf2, 0xd5, 0x58, 0x72, 0xeb, 0x6f, 0x12, 0x14, 0x23, 0x18, 0x5f, 0x82, 0x9c, + 0x4b, 0x82, 0x31, 0x57, 0x97, 0xdf, 0xcf, 0x20, 0x49, 0xe5, 0x6d, 0x86, 0xfb, 0x2e, 0xb1, 0x79, + 0x08, 0x84, 0x38, 0x6b, 0xb3, 0x75, 0xb5, 0x28, 0x31, 0xf8, 0xe5, 0xc4, 0x99, 0x4c, 0xa8, 0x1d, + 0xf8, 0xd1, 0xba, 0x86, 0x78, 0x33, 0x84, 0xf1, 0x3b, 0xb0, 0x1e, 0x78, 0xc4, 0xb4, 0x52, 0xdc, + 0x1c, 0xe7, 0xa2, 0xa8, 0x23, 0x26, 0xef, 0xc1, 0x95, 0x48, 0xaf, 0x41, 0x03, 0xa2, 0x8f, 0xa9, + 0x31, 0x13, 0x2a, 0xf0, 0x47, 0x88, 0xcb, 0x21, 0xa1, 0x15, 0xf6, 0x47, 0xb2, 0xb5, 0x6f, 0x24, + 0x58, 0x8f, 0xae, 0x53, 0x46, 0xec, 0xac, 0x63, 0x00, 0x62, 0xdb, 0x4e, 0x90, 0x74, 0xd7, 0x62, + 0x28, 0x2f, 0xc8, 0xd5, 0x1b, 0xb1, 0x90, 0x9a, 0x50, 0xb0, 0x35, 0x01, 0x98, 0xf5, 0x9c, 0xeb, + 0xb6, 0xab, 0x50, 0x0e, 0x3f, 0xf0, 0xf0, 0xaf, 0x84, 0xe2, 0x02, 0x0e, 0x02, 0x62, 0xf7, 0x2e, + 0xbc, 0x09, 0xf9, 0x53, 0x3a, 0x32, 0xed, 0xf0, 0xd9, 0x56, 0x34, 0xa2, 0x67, 0x92, 0x5c, 0xfc, + 0x4c, 0xb2, 0xff, 0x5b, 0x09, 0x36, 0x74, 0x67, 0x32, 0x6f, 0xef, 0x3e, 0x9a, 0x7b, 0x05, 0xf0, + 0x3f, 0x96, 0x3e, 0xff, 0x68, 0x64, 0x06, 0xe3, 0xe9, 0x69, 0x5d, 0x77, 0x26, 0xbb, 0x23, 0xc7, + 0x22, 0xf6, 0x68, 0xf6, 0x99, 0x93, 0xff, 0xd1, 0xdf, 0x1d, 0x51, 0xfb, 0xdd, 0x91, 0x93, 0xf8, + 0xe8, 0x79, 0x7f, 0xf6, 0xf7, 0xbf, 0x92, 0xf4, 0xa7, 0x4c, 0xf6, 0xa0, 0xb7, 0xff, 0xe7, 0xcc, + 0xd6, 0x81, 0x18, 0xae, 0x17, 0xb9, 0x47, 0xa5, 0x43, 0x8b, 0xea, 0x6c, 0xca, 0xff, 0x0b, 0x00, + 0x00, 0xff, 0xff, 0x1a, 0x28, 0x25, 0x79, 0x42, 0x1d, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go index b2af97f4a..70276e8f5 100644 --- a/vendor/github.com/golang/protobuf/ptypes/any.go +++ b/vendor/github.com/golang/protobuf/ptypes/any.go @@ -130,10 +130,12 @@ func UnmarshalAny(any *any.Any, pb proto.Message) error { // Is returns true if any value contains a given message type. func Is(any *any.Any, pb proto.Message) bool { - aname, err := AnyMessageName(any) - if err != nil { + // The following is equivalent to AnyMessageName(any) == proto.MessageName(pb), + // but it avoids scanning TypeUrl for the slash. + if any == nil { return false } - - return aname == proto.MessageName(pb) + name := proto.MessageName(pb) + prefix := len(any.TypeUrl) - len(name) + return prefix >= 1 && any.TypeUrl[prefix-1] == '/' && any.TypeUrl[prefix:] == name } diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go index f34601723..e3c56d3ff 100644 --- a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go +++ b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go @@ -1,16 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: google/protobuf/any.proto -/* -Package any is a generated protocol buffer package. - -It is generated from these files: - google/protobuf/any.proto - -It has these top-level messages: - Any -*/ -package any +package any // import "github.com/golang/protobuf/ptypes/any" import proto "github.com/golang/protobuf/proto" import fmt "fmt" @@ -130,16 +121,38 @@ type Any struct { // Schemes other than `http`, `https` (or the empty scheme) might be // used with implementation specific semantics. // - TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl" json:"type_url,omitempty"` + TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` // Must be a valid serialized protocol buffer of the above specified type. - Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *Any) Reset() { *m = Any{} } -func (m *Any) String() string { return proto.CompactTextString(m) } -func (*Any) ProtoMessage() {} -func (*Any) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } -func (*Any) XXX_WellKnownType() string { return "Any" } +func (m *Any) Reset() { *m = Any{} } +func (m *Any) String() string { return proto.CompactTextString(m) } +func (*Any) ProtoMessage() {} +func (*Any) Descriptor() ([]byte, []int) { + return fileDescriptor_any_744b9ca530f228db, []int{0} +} +func (*Any) XXX_WellKnownType() string { return "Any" } +func (m *Any) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Any.Unmarshal(m, b) +} +func (m *Any) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Any.Marshal(b, m, deterministic) +} +func (dst *Any) XXX_Merge(src proto.Message) { + xxx_messageInfo_Any.Merge(dst, src) +} +func (m *Any) XXX_Size() int { + return xxx_messageInfo_Any.Size(m) +} +func (m *Any) XXX_DiscardUnknown() { + xxx_messageInfo_Any.DiscardUnknown(m) +} + +var xxx_messageInfo_Any proto.InternalMessageInfo func (m *Any) GetTypeUrl() string { if m != nil { @@ -159,9 +172,9 @@ func init() { proto.RegisterType((*Any)(nil), "google.protobuf.Any") } -func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor0) } +func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_any_744b9ca530f228db) } -var fileDescriptor0 = []byte{ +var fileDescriptor_any_744b9ca530f228db = []byte{ // 185 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4, diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go index b2410a098..a7beb2c41 100644 --- a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go +++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go @@ -1,16 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: google/protobuf/duration.proto -/* -Package duration is a generated protocol buffer package. - -It is generated from these files: - google/protobuf/duration.proto - -It has these top-level messages: - Duration -*/ -package duration +package duration // import "github.com/golang/protobuf/ptypes/duration" import proto "github.com/golang/protobuf/proto" import fmt "fmt" @@ -91,21 +82,43 @@ type Duration struct { // Signed seconds of the span of time. Must be from -315,576,000,000 // to +315,576,000,000 inclusive. Note: these bounds are computed from: // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years - Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` // Signed fractions of a second at nanosecond resolution of the span // of time. Durations less than one second are represented with a 0 // `seconds` field and a positive or negative `nanos` field. For durations // of one second or more, a non-zero value for the `nanos` field must be // of the same sign as the `seconds` field. Must be from -999,999,999 // to +999,999,999 inclusive. - Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *Duration) Reset() { *m = Duration{} } -func (m *Duration) String() string { return proto.CompactTextString(m) } -func (*Duration) ProtoMessage() {} -func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } -func (*Duration) XXX_WellKnownType() string { return "Duration" } +func (m *Duration) Reset() { *m = Duration{} } +func (m *Duration) String() string { return proto.CompactTextString(m) } +func (*Duration) ProtoMessage() {} +func (*Duration) Descriptor() ([]byte, []int) { + return fileDescriptor_duration_e7d612259e3f0613, []int{0} +} +func (*Duration) XXX_WellKnownType() string { return "Duration" } +func (m *Duration) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Duration.Unmarshal(m, b) +} +func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Duration.Marshal(b, m, deterministic) +} +func (dst *Duration) XXX_Merge(src proto.Message) { + xxx_messageInfo_Duration.Merge(dst, src) +} +func (m *Duration) XXX_Size() int { + return xxx_messageInfo_Duration.Size(m) +} +func (m *Duration) XXX_DiscardUnknown() { + xxx_messageInfo_Duration.DiscardUnknown(m) +} + +var xxx_messageInfo_Duration proto.InternalMessageInfo func (m *Duration) GetSeconds() int64 { if m != nil { @@ -125,9 +138,11 @@ func init() { proto.RegisterType((*Duration)(nil), "google.protobuf.Duration") } -func init() { proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor0) } +func init() { + proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_duration_e7d612259e3f0613) +} -var fileDescriptor0 = []byte{ +var fileDescriptor_duration_e7d612259e3f0613 = []byte{ // 190 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a, diff --git a/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go new file mode 100644 index 000000000..a69b403ce --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go @@ -0,0 +1,79 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/empty.proto + +package empty // import "github.com/golang/protobuf/ptypes/empty" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// A generic empty message that you can re-use to avoid defining duplicated +// empty messages in your APIs. A typical example is to use it as the request +// or the response type of an API method. For instance: +// +// service Foo { +// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); +// } +// +// The JSON representation for `Empty` is empty JSON object `{}`. +type Empty struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Empty) Reset() { *m = Empty{} } +func (m *Empty) String() string { return proto.CompactTextString(m) } +func (*Empty) ProtoMessage() {} +func (*Empty) Descriptor() ([]byte, []int) { + return fileDescriptor_empty_39e6d6db0632e5b2, []int{0} +} +func (*Empty) XXX_WellKnownType() string { return "Empty" } +func (m *Empty) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Empty.Unmarshal(m, b) +} +func (m *Empty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Empty.Marshal(b, m, deterministic) +} +func (dst *Empty) XXX_Merge(src proto.Message) { + xxx_messageInfo_Empty.Merge(dst, src) +} +func (m *Empty) XXX_Size() int { + return xxx_messageInfo_Empty.Size(m) +} +func (m *Empty) XXX_DiscardUnknown() { + xxx_messageInfo_Empty.DiscardUnknown(m) +} + +var xxx_messageInfo_Empty proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Empty)(nil), "google.protobuf.Empty") +} + +func init() { proto.RegisterFile("google/protobuf/empty.proto", fileDescriptor_empty_39e6d6db0632e5b2) } + +var fileDescriptor_empty_39e6d6db0632e5b2 = []byte{ + // 148 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcd, 0x2d, 0x28, + 0xa9, 0xd4, 0x03, 0x73, 0x85, 0xf8, 0x21, 0x92, 0x7a, 0x30, 0x49, 0x25, 0x76, 0x2e, 0x56, 0x57, + 0x90, 0xbc, 0x53, 0x19, 0x97, 0x70, 0x72, 0x7e, 0xae, 0x1e, 0x9a, 0xbc, 0x13, 0x17, 0x58, 0x36, + 0x00, 0xc4, 0x0d, 0x60, 0x8c, 0x52, 0x4f, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, + 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0x47, 0x58, 0x53, 0x50, 0x52, 0x59, 0x90, 0x5a, 0x0c, + 0xb1, 0xed, 0x07, 0x23, 0xe3, 0x22, 0x26, 0x66, 0xf7, 0x00, 0xa7, 0x55, 0x4c, 0x72, 0xee, 0x10, + 0x13, 0x03, 0xa0, 0xea, 0xf4, 0xc2, 0x53, 0x73, 0x72, 0xbc, 0xf3, 0xf2, 0xcb, 0xf3, 0x42, 0x40, + 0xea, 0x93, 0xd8, 0xc0, 0x06, 0x18, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x64, 0xd4, 0xb3, 0xa6, + 0xb7, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go b/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go new file mode 100644 index 000000000..ee6382e14 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go @@ -0,0 +1,450 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/struct.proto + +package structpb // import "github.com/golang/protobuf/ptypes/struct" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// `NullValue` is a singleton enumeration to represent the null value for the +// `Value` type union. +// +// The JSON representation for `NullValue` is JSON `null`. +type NullValue int32 + +const ( + // Null value. + NullValue_NULL_VALUE NullValue = 0 +) + +var NullValue_name = map[int32]string{ + 0: "NULL_VALUE", +} +var NullValue_value = map[string]int32{ + "NULL_VALUE": 0, +} + +func (x NullValue) String() string { + return proto.EnumName(NullValue_name, int32(x)) +} +func (NullValue) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_struct_3a5a94e0c7801b27, []int{0} +} +func (NullValue) XXX_WellKnownType() string { return "NullValue" } + +// `Struct` represents a structured data value, consisting of fields +// which map to dynamically typed values. In some languages, `Struct` +// might be supported by a native representation. For example, in +// scripting languages like JS a struct is represented as an +// object. The details of that representation are described together +// with the proto support for the language. +// +// The JSON representation for `Struct` is JSON object. +type Struct struct { + // Unordered map of dynamically typed values. + Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Struct) Reset() { *m = Struct{} } +func (m *Struct) String() string { return proto.CompactTextString(m) } +func (*Struct) ProtoMessage() {} +func (*Struct) Descriptor() ([]byte, []int) { + return fileDescriptor_struct_3a5a94e0c7801b27, []int{0} +} +func (*Struct) XXX_WellKnownType() string { return "Struct" } +func (m *Struct) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Struct.Unmarshal(m, b) +} +func (m *Struct) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Struct.Marshal(b, m, deterministic) +} +func (dst *Struct) XXX_Merge(src proto.Message) { + xxx_messageInfo_Struct.Merge(dst, src) +} +func (m *Struct) XXX_Size() int { + return xxx_messageInfo_Struct.Size(m) +} +func (m *Struct) XXX_DiscardUnknown() { + xxx_messageInfo_Struct.DiscardUnknown(m) +} + +var xxx_messageInfo_Struct proto.InternalMessageInfo + +func (m *Struct) GetFields() map[string]*Value { + if m != nil { + return m.Fields + } + return nil +} + +// `Value` represents a dynamically typed value which can be either +// null, a number, a string, a boolean, a recursive struct value, or a +// list of values. A producer of value is expected to set one of that +// variants, absence of any variant indicates an error. +// +// The JSON representation for `Value` is JSON value. +type Value struct { + // The kind of value. + // + // Types that are valid to be assigned to Kind: + // *Value_NullValue + // *Value_NumberValue + // *Value_StringValue + // *Value_BoolValue + // *Value_StructValue + // *Value_ListValue + Kind isValue_Kind `protobuf_oneof:"kind"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Value) Reset() { *m = Value{} } +func (m *Value) String() string { return proto.CompactTextString(m) } +func (*Value) ProtoMessage() {} +func (*Value) Descriptor() ([]byte, []int) { + return fileDescriptor_struct_3a5a94e0c7801b27, []int{1} +} +func (*Value) XXX_WellKnownType() string { return "Value" } +func (m *Value) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Value.Unmarshal(m, b) +} +func (m *Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Value.Marshal(b, m, deterministic) +} +func (dst *Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_Value.Merge(dst, src) +} +func (m *Value) XXX_Size() int { + return xxx_messageInfo_Value.Size(m) +} +func (m *Value) XXX_DiscardUnknown() { + xxx_messageInfo_Value.DiscardUnknown(m) +} + +var xxx_messageInfo_Value proto.InternalMessageInfo + +type isValue_Kind interface { + isValue_Kind() +} + +type Value_NullValue struct { + NullValue NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"` +} + +type Value_NumberValue struct { + NumberValue float64 `protobuf:"fixed64,2,opt,name=number_value,json=numberValue,proto3,oneof"` +} + +type Value_StringValue struct { + StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,proto3,oneof"` +} + +type Value_BoolValue struct { + BoolValue bool `protobuf:"varint,4,opt,name=bool_value,json=boolValue,proto3,oneof"` +} + +type Value_StructValue struct { + StructValue *Struct `protobuf:"bytes,5,opt,name=struct_value,json=structValue,proto3,oneof"` +} + +type Value_ListValue struct { + ListValue *ListValue `protobuf:"bytes,6,opt,name=list_value,json=listValue,proto3,oneof"` +} + +func (*Value_NullValue) isValue_Kind() {} + +func (*Value_NumberValue) isValue_Kind() {} + +func (*Value_StringValue) isValue_Kind() {} + +func (*Value_BoolValue) isValue_Kind() {} + +func (*Value_StructValue) isValue_Kind() {} + +func (*Value_ListValue) isValue_Kind() {} + +func (m *Value) GetKind() isValue_Kind { + if m != nil { + return m.Kind + } + return nil +} + +func (m *Value) GetNullValue() NullValue { + if x, ok := m.GetKind().(*Value_NullValue); ok { + return x.NullValue + } + return NullValue_NULL_VALUE +} + +func (m *Value) GetNumberValue() float64 { + if x, ok := m.GetKind().(*Value_NumberValue); ok { + return x.NumberValue + } + return 0 +} + +func (m *Value) GetStringValue() string { + if x, ok := m.GetKind().(*Value_StringValue); ok { + return x.StringValue + } + return "" +} + +func (m *Value) GetBoolValue() bool { + if x, ok := m.GetKind().(*Value_BoolValue); ok { + return x.BoolValue + } + return false +} + +func (m *Value) GetStructValue() *Struct { + if x, ok := m.GetKind().(*Value_StructValue); ok { + return x.StructValue + } + return nil +} + +func (m *Value) GetListValue() *ListValue { + if x, ok := m.GetKind().(*Value_ListValue); ok { + return x.ListValue + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Value) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Value_OneofMarshaler, _Value_OneofUnmarshaler, _Value_OneofSizer, []interface{}{ + (*Value_NullValue)(nil), + (*Value_NumberValue)(nil), + (*Value_StringValue)(nil), + (*Value_BoolValue)(nil), + (*Value_StructValue)(nil), + (*Value_ListValue)(nil), + } +} + +func _Value_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Value) + // kind + switch x := m.Kind.(type) { + case *Value_NullValue: + b.EncodeVarint(1<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.NullValue)) + case *Value_NumberValue: + b.EncodeVarint(2<<3 | proto.WireFixed64) + b.EncodeFixed64(math.Float64bits(x.NumberValue)) + case *Value_StringValue: + b.EncodeVarint(3<<3 | proto.WireBytes) + b.EncodeStringBytes(x.StringValue) + case *Value_BoolValue: + t := uint64(0) + if x.BoolValue { + t = 1 + } + b.EncodeVarint(4<<3 | proto.WireVarint) + b.EncodeVarint(t) + case *Value_StructValue: + b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.StructValue); err != nil { + return err + } + case *Value_ListValue: + b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ListValue); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Value.Kind has unexpected type %T", x) + } + return nil +} + +func _Value_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Value) + switch tag { + case 1: // kind.null_value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Kind = &Value_NullValue{NullValue(x)} + return true, err + case 2: // kind.number_value + if wire != proto.WireFixed64 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed64() + m.Kind = &Value_NumberValue{math.Float64frombits(x)} + return true, err + case 3: // kind.string_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Kind = &Value_StringValue{x} + return true, err + case 4: // kind.bool_value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Kind = &Value_BoolValue{x != 0} + return true, err + case 5: // kind.struct_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Struct) + err := b.DecodeMessage(msg) + m.Kind = &Value_StructValue{msg} + return true, err + case 6: // kind.list_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ListValue) + err := b.DecodeMessage(msg) + m.Kind = &Value_ListValue{msg} + return true, err + default: + return false, nil + } +} + +func _Value_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Value) + // kind + switch x := m.Kind.(type) { + case *Value_NullValue: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(x.NullValue)) + case *Value_NumberValue: + n += 1 // tag and wire + n += 8 + case *Value_StringValue: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.StringValue))) + n += len(x.StringValue) + case *Value_BoolValue: + n += 1 // tag and wire + n += 1 + case *Value_StructValue: + s := proto.Size(x.StructValue) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *Value_ListValue: + s := proto.Size(x.ListValue) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// `ListValue` is a wrapper around a repeated field of values. +// +// The JSON representation for `ListValue` is JSON array. +type ListValue struct { + // Repeated field of dynamically typed values. + Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListValue) Reset() { *m = ListValue{} } +func (m *ListValue) String() string { return proto.CompactTextString(m) } +func (*ListValue) ProtoMessage() {} +func (*ListValue) Descriptor() ([]byte, []int) { + return fileDescriptor_struct_3a5a94e0c7801b27, []int{2} +} +func (*ListValue) XXX_WellKnownType() string { return "ListValue" } +func (m *ListValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListValue.Unmarshal(m, b) +} +func (m *ListValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListValue.Marshal(b, m, deterministic) +} +func (dst *ListValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListValue.Merge(dst, src) +} +func (m *ListValue) XXX_Size() int { + return xxx_messageInfo_ListValue.Size(m) +} +func (m *ListValue) XXX_DiscardUnknown() { + xxx_messageInfo_ListValue.DiscardUnknown(m) +} + +var xxx_messageInfo_ListValue proto.InternalMessageInfo + +func (m *ListValue) GetValues() []*Value { + if m != nil { + return m.Values + } + return nil +} + +func init() { + proto.RegisterType((*Struct)(nil), "google.protobuf.Struct") + proto.RegisterMapType((map[string]*Value)(nil), "google.protobuf.Struct.FieldsEntry") + proto.RegisterType((*Value)(nil), "google.protobuf.Value") + proto.RegisterType((*ListValue)(nil), "google.protobuf.ListValue") + proto.RegisterEnum("google.protobuf.NullValue", NullValue_name, NullValue_value) +} + +func init() { + proto.RegisterFile("google/protobuf/struct.proto", fileDescriptor_struct_3a5a94e0c7801b27) +} + +var fileDescriptor_struct_3a5a94e0c7801b27 = []byte{ + // 417 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0x41, 0x8b, 0xd3, 0x40, + 0x14, 0xc7, 0x3b, 0xc9, 0x36, 0x98, 0x17, 0x59, 0x97, 0x11, 0xb4, 0xac, 0xa2, 0xa1, 0x7b, 0x09, + 0x22, 0x29, 0xd6, 0x8b, 0x18, 0x2f, 0x06, 0xd6, 0x5d, 0x30, 0x2c, 0x31, 0xba, 0x15, 0xbc, 0x94, + 0x26, 0x4d, 0x63, 0xe8, 0x74, 0x26, 0x24, 0x33, 0x4a, 0x8f, 0x7e, 0x0b, 0xcf, 0x1e, 0x3d, 0xfa, + 0xe9, 0x3c, 0xca, 0xcc, 0x24, 0xa9, 0xb4, 0xf4, 0x94, 0xbc, 0xf7, 0x7e, 0xef, 0x3f, 0xef, 0xff, + 0x66, 0xe0, 0x71, 0xc1, 0x58, 0x41, 0xf2, 0x49, 0x55, 0x33, 0xce, 0x52, 0xb1, 0x9a, 0x34, 0xbc, + 0x16, 0x19, 0xf7, 0x55, 0x8c, 0xef, 0xe9, 0xaa, 0xdf, 0x55, 0xc7, 0x3f, 0x11, 0x58, 0x1f, 0x15, + 0x81, 0x03, 0xb0, 0x56, 0x65, 0x4e, 0x96, 0xcd, 0x08, 0xb9, 0xa6, 0xe7, 0x4c, 0x2f, 0xfc, 0x3d, + 0xd8, 0xd7, 0xa0, 0xff, 0x4e, 0x51, 0x97, 0x94, 0xd7, 0xdb, 0xa4, 0x6d, 0x39, 0xff, 0x00, 0xce, + 0x7f, 0x69, 0x7c, 0x06, 0xe6, 0x3a, 0xdf, 0x8e, 0x90, 0x8b, 0x3c, 0x3b, 0x91, 0xbf, 0xf8, 0x39, + 0x0c, 0xbf, 0x2d, 0x88, 0xc8, 0x47, 0x86, 0x8b, 0x3c, 0x67, 0xfa, 0xe0, 0x40, 0x7c, 0x26, 0xab, + 0x89, 0x86, 0x5e, 0x1b, 0xaf, 0xd0, 0xf8, 0x8f, 0x01, 0x43, 0x95, 0xc4, 0x01, 0x00, 0x15, 0x84, + 0xcc, 0xb5, 0x80, 0x14, 0x3d, 0x9d, 0x9e, 0x1f, 0x08, 0xdc, 0x08, 0x42, 0x14, 0x7f, 0x3d, 0x48, + 0x6c, 0xda, 0x05, 0xf8, 0x02, 0xee, 0x52, 0xb1, 0x49, 0xf3, 0x7a, 0xbe, 0x3b, 0x1f, 0x5d, 0x0f, + 0x12, 0x47, 0x67, 0x7b, 0xa8, 0xe1, 0x75, 0x49, 0x8b, 0x16, 0x32, 0xe5, 0xe0, 0x12, 0xd2, 0x59, + 0x0d, 0x3d, 0x05, 0x48, 0x19, 0xeb, 0xc6, 0x38, 0x71, 0x91, 0x77, 0x47, 0x1e, 0x25, 0x73, 0x1a, + 0x78, 0xa3, 0x54, 0x44, 0xc6, 0x5b, 0x64, 0xa8, 0xac, 0x3e, 0x3c, 0xb2, 0xc7, 0x56, 0x5e, 0x64, + 0xbc, 0x77, 0x49, 0xca, 0xa6, 0xeb, 0xb5, 0x54, 0xef, 0xa1, 0xcb, 0xa8, 0x6c, 0x78, 0xef, 0x92, + 0x74, 0x41, 0x68, 0xc1, 0xc9, 0xba, 0xa4, 0xcb, 0x71, 0x00, 0x76, 0x4f, 0x60, 0x1f, 0x2c, 0x25, + 0xd6, 0xdd, 0xe8, 0xb1, 0xa5, 0xb7, 0xd4, 0xb3, 0x47, 0x60, 0xf7, 0x4b, 0xc4, 0xa7, 0x00, 0x37, + 0xb7, 0x51, 0x34, 0x9f, 0xbd, 0x8d, 0x6e, 0x2f, 0xcf, 0x06, 0xe1, 0x0f, 0x04, 0xf7, 0x33, 0xb6, + 0xd9, 0x97, 0x08, 0x1d, 0xed, 0x26, 0x96, 0x71, 0x8c, 0xbe, 0xbc, 0x28, 0x4a, 0xfe, 0x55, 0xa4, + 0x7e, 0xc6, 0x36, 0x93, 0x82, 0x91, 0x05, 0x2d, 0x76, 0x4f, 0xb1, 0xe2, 0xdb, 0x2a, 0x6f, 0xda, + 0x17, 0x19, 0xe8, 0x4f, 0x95, 0xfe, 0x45, 0xe8, 0x97, 0x61, 0x5e, 0xc5, 0xe1, 0x6f, 0xe3, 0xc9, + 0x95, 0x16, 0x8f, 0xbb, 0xf9, 0x3e, 0xe7, 0x84, 0xbc, 0xa7, 0xec, 0x3b, 0xfd, 0x24, 0x3b, 0x53, + 0x4b, 0x49, 0xbd, 0xfc, 0x17, 0x00, 0x00, 0xff, 0xff, 0xe8, 0x1b, 0x59, 0xf8, 0xe5, 0x02, 0x00, + 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go index e23e4a25d..8e76ae976 100644 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go @@ -1,16 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: google/protobuf/timestamp.proto -/* -Package timestamp is a generated protocol buffer package. - -It is generated from these files: - google/protobuf/timestamp.proto - -It has these top-level messages: - Timestamp -*/ -package timestamp +package timestamp // import "github.com/golang/protobuf/ptypes/timestamp" import proto "github.com/golang/protobuf/proto" import fmt "fmt" @@ -101,7 +92,7 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) // with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one // can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( -// http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime()) +// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--) // to obtain a formatter capable of generating timestamps in this format. // // @@ -109,19 +100,41 @@ type Timestamp struct { // Represents seconds of UTC time since Unix epoch // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to // 9999-12-31T23:59:59Z inclusive. - Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` // Non-negative fractions of a second at nanosecond resolution. Negative // second values with fractions must still have non-negative nanos values // that count forward in time. Must be from 0 to 999,999,999 // inclusive. - Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *Timestamp) Reset() { *m = Timestamp{} } -func (m *Timestamp) String() string { return proto.CompactTextString(m) } -func (*Timestamp) ProtoMessage() {} -func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } -func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" } +func (m *Timestamp) Reset() { *m = Timestamp{} } +func (m *Timestamp) String() string { return proto.CompactTextString(m) } +func (*Timestamp) ProtoMessage() {} +func (*Timestamp) Descriptor() ([]byte, []int) { + return fileDescriptor_timestamp_b826e8e5fba671a8, []int{0} +} +func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" } +func (m *Timestamp) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Timestamp.Unmarshal(m, b) +} +func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic) +} +func (dst *Timestamp) XXX_Merge(src proto.Message) { + xxx_messageInfo_Timestamp.Merge(dst, src) +} +func (m *Timestamp) XXX_Size() int { + return xxx_messageInfo_Timestamp.Size(m) +} +func (m *Timestamp) XXX_DiscardUnknown() { + xxx_messageInfo_Timestamp.DiscardUnknown(m) +} + +var xxx_messageInfo_Timestamp proto.InternalMessageInfo func (m *Timestamp) GetSeconds() int64 { if m != nil { @@ -141,9 +154,11 @@ func init() { proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp") } -func init() { proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor0) } +func init() { + proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_timestamp_b826e8e5fba671a8) +} -var fileDescriptor0 = []byte{ +var fileDescriptor_timestamp_b826e8e5fba671a8 = []byte{ // 191 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d, diff --git a/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go new file mode 100644 index 000000000..0f0fa837f --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go @@ -0,0 +1,443 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/wrappers.proto + +package wrappers // import "github.com/golang/protobuf/ptypes/wrappers" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Wrapper message for `double`. +// +// The JSON representation for `DoubleValue` is JSON number. +type DoubleValue struct { + // The double value. + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DoubleValue) Reset() { *m = DoubleValue{} } +func (m *DoubleValue) String() string { return proto.CompactTextString(m) } +func (*DoubleValue) ProtoMessage() {} +func (*DoubleValue) Descriptor() ([]byte, []int) { + return fileDescriptor_wrappers_16c7c35c009f3253, []int{0} +} +func (*DoubleValue) XXX_WellKnownType() string { return "DoubleValue" } +func (m *DoubleValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DoubleValue.Unmarshal(m, b) +} +func (m *DoubleValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DoubleValue.Marshal(b, m, deterministic) +} +func (dst *DoubleValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_DoubleValue.Merge(dst, src) +} +func (m *DoubleValue) XXX_Size() int { + return xxx_messageInfo_DoubleValue.Size(m) +} +func (m *DoubleValue) XXX_DiscardUnknown() { + xxx_messageInfo_DoubleValue.DiscardUnknown(m) +} + +var xxx_messageInfo_DoubleValue proto.InternalMessageInfo + +func (m *DoubleValue) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `float`. +// +// The JSON representation for `FloatValue` is JSON number. +type FloatValue struct { + // The float value. + Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FloatValue) Reset() { *m = FloatValue{} } +func (m *FloatValue) String() string { return proto.CompactTextString(m) } +func (*FloatValue) ProtoMessage() {} +func (*FloatValue) Descriptor() ([]byte, []int) { + return fileDescriptor_wrappers_16c7c35c009f3253, []int{1} +} +func (*FloatValue) XXX_WellKnownType() string { return "FloatValue" } +func (m *FloatValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FloatValue.Unmarshal(m, b) +} +func (m *FloatValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FloatValue.Marshal(b, m, deterministic) +} +func (dst *FloatValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_FloatValue.Merge(dst, src) +} +func (m *FloatValue) XXX_Size() int { + return xxx_messageInfo_FloatValue.Size(m) +} +func (m *FloatValue) XXX_DiscardUnknown() { + xxx_messageInfo_FloatValue.DiscardUnknown(m) +} + +var xxx_messageInfo_FloatValue proto.InternalMessageInfo + +func (m *FloatValue) GetValue() float32 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `int64`. +// +// The JSON representation for `Int64Value` is JSON string. +type Int64Value struct { + // The int64 value. + Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Int64Value) Reset() { *m = Int64Value{} } +func (m *Int64Value) String() string { return proto.CompactTextString(m) } +func (*Int64Value) ProtoMessage() {} +func (*Int64Value) Descriptor() ([]byte, []int) { + return fileDescriptor_wrappers_16c7c35c009f3253, []int{2} +} +func (*Int64Value) XXX_WellKnownType() string { return "Int64Value" } +func (m *Int64Value) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Int64Value.Unmarshal(m, b) +} +func (m *Int64Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Int64Value.Marshal(b, m, deterministic) +} +func (dst *Int64Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_Int64Value.Merge(dst, src) +} +func (m *Int64Value) XXX_Size() int { + return xxx_messageInfo_Int64Value.Size(m) +} +func (m *Int64Value) XXX_DiscardUnknown() { + xxx_messageInfo_Int64Value.DiscardUnknown(m) +} + +var xxx_messageInfo_Int64Value proto.InternalMessageInfo + +func (m *Int64Value) GetValue() int64 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `uint64`. +// +// The JSON representation for `UInt64Value` is JSON string. +type UInt64Value struct { + // The uint64 value. + Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UInt64Value) Reset() { *m = UInt64Value{} } +func (m *UInt64Value) String() string { return proto.CompactTextString(m) } +func (*UInt64Value) ProtoMessage() {} +func (*UInt64Value) Descriptor() ([]byte, []int) { + return fileDescriptor_wrappers_16c7c35c009f3253, []int{3} +} +func (*UInt64Value) XXX_WellKnownType() string { return "UInt64Value" } +func (m *UInt64Value) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UInt64Value.Unmarshal(m, b) +} +func (m *UInt64Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UInt64Value.Marshal(b, m, deterministic) +} +func (dst *UInt64Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_UInt64Value.Merge(dst, src) +} +func (m *UInt64Value) XXX_Size() int { + return xxx_messageInfo_UInt64Value.Size(m) +} +func (m *UInt64Value) XXX_DiscardUnknown() { + xxx_messageInfo_UInt64Value.DiscardUnknown(m) +} + +var xxx_messageInfo_UInt64Value proto.InternalMessageInfo + +func (m *UInt64Value) GetValue() uint64 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `int32`. +// +// The JSON representation for `Int32Value` is JSON number. +type Int32Value struct { + // The int32 value. + Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Int32Value) Reset() { *m = Int32Value{} } +func (m *Int32Value) String() string { return proto.CompactTextString(m) } +func (*Int32Value) ProtoMessage() {} +func (*Int32Value) Descriptor() ([]byte, []int) { + return fileDescriptor_wrappers_16c7c35c009f3253, []int{4} +} +func (*Int32Value) XXX_WellKnownType() string { return "Int32Value" } +func (m *Int32Value) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Int32Value.Unmarshal(m, b) +} +func (m *Int32Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Int32Value.Marshal(b, m, deterministic) +} +func (dst *Int32Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_Int32Value.Merge(dst, src) +} +func (m *Int32Value) XXX_Size() int { + return xxx_messageInfo_Int32Value.Size(m) +} +func (m *Int32Value) XXX_DiscardUnknown() { + xxx_messageInfo_Int32Value.DiscardUnknown(m) +} + +var xxx_messageInfo_Int32Value proto.InternalMessageInfo + +func (m *Int32Value) GetValue() int32 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `uint32`. +// +// The JSON representation for `UInt32Value` is JSON number. +type UInt32Value struct { + // The uint32 value. + Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UInt32Value) Reset() { *m = UInt32Value{} } +func (m *UInt32Value) String() string { return proto.CompactTextString(m) } +func (*UInt32Value) ProtoMessage() {} +func (*UInt32Value) Descriptor() ([]byte, []int) { + return fileDescriptor_wrappers_16c7c35c009f3253, []int{5} +} +func (*UInt32Value) XXX_WellKnownType() string { return "UInt32Value" } +func (m *UInt32Value) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UInt32Value.Unmarshal(m, b) +} +func (m *UInt32Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UInt32Value.Marshal(b, m, deterministic) +} +func (dst *UInt32Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_UInt32Value.Merge(dst, src) +} +func (m *UInt32Value) XXX_Size() int { + return xxx_messageInfo_UInt32Value.Size(m) +} +func (m *UInt32Value) XXX_DiscardUnknown() { + xxx_messageInfo_UInt32Value.DiscardUnknown(m) +} + +var xxx_messageInfo_UInt32Value proto.InternalMessageInfo + +func (m *UInt32Value) GetValue() uint32 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `bool`. +// +// The JSON representation for `BoolValue` is JSON `true` and `false`. +type BoolValue struct { + // The bool value. + Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BoolValue) Reset() { *m = BoolValue{} } +func (m *BoolValue) String() string { return proto.CompactTextString(m) } +func (*BoolValue) ProtoMessage() {} +func (*BoolValue) Descriptor() ([]byte, []int) { + return fileDescriptor_wrappers_16c7c35c009f3253, []int{6} +} +func (*BoolValue) XXX_WellKnownType() string { return "BoolValue" } +func (m *BoolValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BoolValue.Unmarshal(m, b) +} +func (m *BoolValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BoolValue.Marshal(b, m, deterministic) +} +func (dst *BoolValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_BoolValue.Merge(dst, src) +} +func (m *BoolValue) XXX_Size() int { + return xxx_messageInfo_BoolValue.Size(m) +} +func (m *BoolValue) XXX_DiscardUnknown() { + xxx_messageInfo_BoolValue.DiscardUnknown(m) +} + +var xxx_messageInfo_BoolValue proto.InternalMessageInfo + +func (m *BoolValue) GetValue() bool { + if m != nil { + return m.Value + } + return false +} + +// Wrapper message for `string`. +// +// The JSON representation for `StringValue` is JSON string. +type StringValue struct { + // The string value. + Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StringValue) Reset() { *m = StringValue{} } +func (m *StringValue) String() string { return proto.CompactTextString(m) } +func (*StringValue) ProtoMessage() {} +func (*StringValue) Descriptor() ([]byte, []int) { + return fileDescriptor_wrappers_16c7c35c009f3253, []int{7} +} +func (*StringValue) XXX_WellKnownType() string { return "StringValue" } +func (m *StringValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StringValue.Unmarshal(m, b) +} +func (m *StringValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StringValue.Marshal(b, m, deterministic) +} +func (dst *StringValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_StringValue.Merge(dst, src) +} +func (m *StringValue) XXX_Size() int { + return xxx_messageInfo_StringValue.Size(m) +} +func (m *StringValue) XXX_DiscardUnknown() { + xxx_messageInfo_StringValue.DiscardUnknown(m) +} + +var xxx_messageInfo_StringValue proto.InternalMessageInfo + +func (m *StringValue) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +// Wrapper message for `bytes`. +// +// The JSON representation for `BytesValue` is JSON string. +type BytesValue struct { + // The bytes value. + Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BytesValue) Reset() { *m = BytesValue{} } +func (m *BytesValue) String() string { return proto.CompactTextString(m) } +func (*BytesValue) ProtoMessage() {} +func (*BytesValue) Descriptor() ([]byte, []int) { + return fileDescriptor_wrappers_16c7c35c009f3253, []int{8} +} +func (*BytesValue) XXX_WellKnownType() string { return "BytesValue" } +func (m *BytesValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BytesValue.Unmarshal(m, b) +} +func (m *BytesValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BytesValue.Marshal(b, m, deterministic) +} +func (dst *BytesValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_BytesValue.Merge(dst, src) +} +func (m *BytesValue) XXX_Size() int { + return xxx_messageInfo_BytesValue.Size(m) +} +func (m *BytesValue) XXX_DiscardUnknown() { + xxx_messageInfo_BytesValue.DiscardUnknown(m) +} + +var xxx_messageInfo_BytesValue proto.InternalMessageInfo + +func (m *BytesValue) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func init() { + proto.RegisterType((*DoubleValue)(nil), "google.protobuf.DoubleValue") + proto.RegisterType((*FloatValue)(nil), "google.protobuf.FloatValue") + proto.RegisterType((*Int64Value)(nil), "google.protobuf.Int64Value") + proto.RegisterType((*UInt64Value)(nil), "google.protobuf.UInt64Value") + proto.RegisterType((*Int32Value)(nil), "google.protobuf.Int32Value") + proto.RegisterType((*UInt32Value)(nil), "google.protobuf.UInt32Value") + proto.RegisterType((*BoolValue)(nil), "google.protobuf.BoolValue") + proto.RegisterType((*StringValue)(nil), "google.protobuf.StringValue") + proto.RegisterType((*BytesValue)(nil), "google.protobuf.BytesValue") +} + +func init() { + proto.RegisterFile("google/protobuf/wrappers.proto", fileDescriptor_wrappers_16c7c35c009f3253) +} + +var fileDescriptor_wrappers_16c7c35c009f3253 = []byte{ + // 259 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x2f, 0x4a, 0x2c, + 0x28, 0x48, 0x2d, 0x2a, 0xd6, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0xca, + 0x5c, 0xdc, 0x2e, 0xf9, 0xa5, 0x49, 0x39, 0xa9, 0x61, 0x89, 0x39, 0xa5, 0xa9, 0x42, 0x22, 0x5c, + 0xac, 0x65, 0x20, 0x86, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x63, 0x10, 0x84, 0xa3, 0xa4, 0xc4, 0xc5, + 0xe5, 0x96, 0x93, 0x9f, 0x58, 0x82, 0x45, 0x0d, 0x13, 0x92, 0x1a, 0xcf, 0xbc, 0x12, 0x33, 0x13, + 0x2c, 0x6a, 0x98, 0x61, 0x6a, 0x94, 0xb9, 0xb8, 0x43, 0x71, 0x29, 0x62, 0x41, 0x35, 0xc8, 0xd8, + 0x08, 0x8b, 0x1a, 0x56, 0x34, 0x83, 0xb0, 0x2a, 0xe2, 0x85, 0x29, 0x52, 0xe4, 0xe2, 0x74, 0xca, + 0xcf, 0xcf, 0xc1, 0xa2, 0x84, 0x03, 0xc9, 0x9c, 0xe0, 0x92, 0xa2, 0xcc, 0xbc, 0x74, 0x2c, 0x8a, + 0x38, 0x91, 0x1c, 0xe4, 0x54, 0x59, 0x92, 0x5a, 0x8c, 0x45, 0x0d, 0x0f, 0x54, 0x8d, 0x53, 0x0d, + 0x97, 0x70, 0x72, 0x7e, 0xae, 0x1e, 0x5a, 0xe8, 0x3a, 0xf1, 0x86, 0x43, 0x83, 0x3f, 0x00, 0x24, + 0x12, 0xc0, 0x18, 0xa5, 0x95, 0x9e, 0x59, 0x92, 0x51, 0x9a, 0xa4, 0x97, 0x9c, 0x9f, 0xab, 0x9f, + 0x9e, 0x9f, 0x93, 0x98, 0x97, 0x8e, 0x88, 0xaa, 0x82, 0x92, 0xca, 0x82, 0xd4, 0x62, 0x78, 0x8c, + 0xfd, 0x60, 0x64, 0x5c, 0xc4, 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e, + 0x00, 0x54, 0xa9, 0x5e, 0x78, 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b, + 0x12, 0x1b, 0xd8, 0x0c, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x19, 0x6c, 0xb9, 0xb8, 0xfe, + 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/googleapis/gax-go/LICENSE b/vendor/github.com/googleapis/gax-go/LICENSE new file mode 100644 index 000000000..6d16b6578 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/LICENSE @@ -0,0 +1,27 @@ +Copyright 2016, Google Inc. +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/googleapis/gax-go/call_option.go b/vendor/github.com/googleapis/gax-go/call_option.go new file mode 100644 index 000000000..7b621643e --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/call_option.go @@ -0,0 +1,157 @@ +// Copyright 2016, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package gax + +import ( + "math/rand" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// CallOption is an option used by Invoke to control behaviors of RPC calls. +// CallOption works by modifying relevant fields of CallSettings. +type CallOption interface { + // Resolve applies the option by modifying cs. + Resolve(cs *CallSettings) +} + +// Retryer is used by Invoke to determine retry behavior. +type Retryer interface { + // Retry reports whether a request should be retriedand how long to pause before retrying + // if the previous attempt returned with err. Invoke never calls Retry with nil error. + Retry(err error) (pause time.Duration, shouldRetry bool) +} + +type retryerOption func() Retryer + +func (o retryerOption) Resolve(s *CallSettings) { + s.Retry = o +} + +// WithRetry sets CallSettings.Retry to fn. +func WithRetry(fn func() Retryer) CallOption { + return retryerOption(fn) +} + +// OnCodes returns a Retryer that retries if and only if +// the previous attempt returns a GRPC error whose error code is stored in cc. +// Pause times between retries are specified by bo. +// +// bo is only used for its parameters; each Retryer has its own copy. +func OnCodes(cc []codes.Code, bo Backoff) Retryer { + return &boRetryer{ + backoff: bo, + codes: append([]codes.Code(nil), cc...), + } +} + +type boRetryer struct { + backoff Backoff + codes []codes.Code +} + +func (r *boRetryer) Retry(err error) (time.Duration, bool) { + st, ok := status.FromError(err) + if !ok { + return 0, false + } + c := st.Code() + for _, rc := range r.codes { + if c == rc { + return r.backoff.Pause(), true + } + } + return 0, false +} + +// Backoff implements exponential backoff. +// The wait time between retries is a random value between 0 and the "retry envelope". +// The envelope starts at Initial and increases by the factor of Multiplier every retry, +// but is capped at Max. +type Backoff struct { + // Initial is the initial value of the retry envelope, defaults to 1 second. + Initial time.Duration + + // Max is the maximum value of the retry envelope, defaults to 30 seconds. + Max time.Duration + + // Multiplier is the factor by which the retry envelope increases. + // It should be greater than 1 and defaults to 2. + Multiplier float64 + + // cur is the current retry envelope + cur time.Duration +} + +func (bo *Backoff) Pause() time.Duration { + if bo.Initial == 0 { + bo.Initial = time.Second + } + if bo.cur == 0 { + bo.cur = bo.Initial + } + if bo.Max == 0 { + bo.Max = 30 * time.Second + } + if bo.Multiplier < 1 { + bo.Multiplier = 2 + } + // Select a duration between zero and the current max. It might seem counterintuitive to + // have so much jitter, but https://www.awsarchitectureblog.com/2015/03/backoff.html + // argues that that is the best strategy. + d := time.Duration(rand.Int63n(int64(bo.cur))) + bo.cur = time.Duration(float64(bo.cur) * bo.Multiplier) + if bo.cur > bo.Max { + bo.cur = bo.Max + } + return d +} + +type grpcOpt []grpc.CallOption + +func (o grpcOpt) Resolve(s *CallSettings) { + s.GRPC = o +} + +func WithGRPCOptions(opt ...grpc.CallOption) CallOption { + return grpcOpt(append([]grpc.CallOption(nil), opt...)) +} + +type CallSettings struct { + // Retry returns a Retryer to be used to control retry logic of a method call. + // If Retry is nil or the returned Retryer is nil, the call will not be retried. + Retry func() Retryer + + // CallOptions to be forwarded to GRPC. + GRPC []grpc.CallOption +} diff --git a/vendor/github.com/googleapis/gax-go/gax.go b/vendor/github.com/googleapis/gax-go/gax.go new file mode 100644 index 000000000..5ebedff0d --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/gax.go @@ -0,0 +1,40 @@ +// Copyright 2016, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package gax contains a set of modules which aid the development of APIs +// for clients and servers based on gRPC and Google API conventions. +// +// Application code will rarely need to use this library directly. +// However, code generated automatically from API definition files can use it +// to simplify code generation and to provide more convenient and idiomatic API surfaces. +// +// This project is currently experimental and not supported. +package gax + +const Version = "0.1.0" diff --git a/vendor/github.com/googleapis/gax-go/header.go b/vendor/github.com/googleapis/gax-go/header.go new file mode 100644 index 000000000..d81455ecc --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/header.go @@ -0,0 +1,24 @@ +package gax + +import "bytes" + +// XGoogHeader is for use by the Google Cloud Libraries only. +// +// XGoogHeader formats key-value pairs. +// The resulting string is suitable for x-goog-api-client header. +func XGoogHeader(keyval ...string) string { + if len(keyval) == 0 { + return "" + } + if len(keyval)%2 != 0 { + panic("gax.Header: odd argument count") + } + var buf bytes.Buffer + for i := 0; i < len(keyval); i += 2 { + buf.WriteByte(' ') + buf.WriteString(keyval[i]) + buf.WriteByte('/') + buf.WriteString(keyval[i+1]) + } + return buf.String()[1:] +} diff --git a/vendor/github.com/googleapis/gax-go/invoke.go b/vendor/github.com/googleapis/gax-go/invoke.go new file mode 100644 index 000000000..86049d826 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/invoke.go @@ -0,0 +1,90 @@ +// Copyright 2016, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package gax + +import ( + "time" + + "golang.org/x/net/context" +) + +// A user defined call stub. +type APICall func(context.Context, CallSettings) error + +// Invoke calls the given APICall, +// performing retries as specified by opts, if any. +func Invoke(ctx context.Context, call APICall, opts ...CallOption) error { + var settings CallSettings + for _, opt := range opts { + opt.Resolve(&settings) + } + return invoke(ctx, call, settings, Sleep) +} + +// Sleep is similar to time.Sleep, but it can be interrupted by ctx.Done() closing. +// If interrupted, Sleep returns ctx.Err(). +func Sleep(ctx context.Context, d time.Duration) error { + t := time.NewTimer(d) + select { + case <-ctx.Done(): + t.Stop() + return ctx.Err() + case <-t.C: + return nil + } +} + +type sleeper func(ctx context.Context, d time.Duration) error + +// invoke implements Invoke, taking an additional sleeper argument for testing. +func invoke(ctx context.Context, call APICall, settings CallSettings, sp sleeper) error { + var retryer Retryer + for { + err := call(ctx, settings) + if err == nil { + return nil + } + if settings.Retry == nil { + return err + } + if retryer == nil { + if r := settings.Retry(); r != nil { + retryer = r + } else { + return err + } + } + if d, ok := retryer.Retry(err); !ok { + return err + } else if err = sp(ctx, d); err != nil { + return err + } + } +} diff --git a/vendor/github.com/jmespath/go-jmespath/LICENSE b/vendor/github.com/jmespath/go-jmespath/LICENSE new file mode 100644 index 000000000..b03310a91 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/LICENSE @@ -0,0 +1,13 @@ +Copyright 2015 James Saryerwinnie + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/jmespath/go-jmespath/api.go b/vendor/github.com/jmespath/go-jmespath/api.go new file mode 100644 index 000000000..9cfa988bc --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/api.go @@ -0,0 +1,49 @@ +package jmespath + +import "strconv" + +// JmesPath is the epresentation of a compiled JMES path query. A JmesPath is +// safe for concurrent use by multiple goroutines. +type JMESPath struct { + ast ASTNode + intr *treeInterpreter +} + +// Compile parses a JMESPath expression and returns, if successful, a JMESPath +// object that can be used to match against data. +func Compile(expression string) (*JMESPath, error) { + parser := NewParser() + ast, err := parser.Parse(expression) + if err != nil { + return nil, err + } + jmespath := &JMESPath{ast: ast, intr: newInterpreter()} + return jmespath, nil +} + +// MustCompile is like Compile but panics if the expression cannot be parsed. +// It simplifies safe initialization of global variables holding compiled +// JMESPaths. +func MustCompile(expression string) *JMESPath { + jmespath, err := Compile(expression) + if err != nil { + panic(`jmespath: Compile(` + strconv.Quote(expression) + `): ` + err.Error()) + } + return jmespath +} + +// Search evaluates a JMESPath expression against input data and returns the result. +func (jp *JMESPath) Search(data interface{}) (interface{}, error) { + return jp.intr.Execute(jp.ast, data) +} + +// Search evaluates a JMESPath expression against input data and returns the result. +func Search(expression string, data interface{}) (interface{}, error) { + intr := newInterpreter() + parser := NewParser() + ast, err := parser.Parse(expression) + if err != nil { + return nil, err + } + return intr.Execute(ast, data) +} diff --git a/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go b/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go new file mode 100644 index 000000000..1cd2d239c --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go @@ -0,0 +1,16 @@ +// generated by stringer -type astNodeType; DO NOT EDIT + +package jmespath + +import "fmt" + +const _astNodeType_name = "ASTEmptyASTComparatorASTCurrentNodeASTExpRefASTFunctionExpressionASTFieldASTFilterProjectionASTFlattenASTIdentityASTIndexASTIndexExpressionASTKeyValPairASTLiteralASTMultiSelectHashASTMultiSelectListASTOrExpressionASTAndExpressionASTNotExpressionASTPipeASTProjectionASTSubexpressionASTSliceASTValueProjection" + +var _astNodeType_index = [...]uint16{0, 8, 21, 35, 44, 65, 73, 92, 102, 113, 121, 139, 152, 162, 180, 198, 213, 229, 245, 252, 265, 281, 289, 307} + +func (i astNodeType) String() string { + if i < 0 || i >= astNodeType(len(_astNodeType_index)-1) { + return fmt.Sprintf("astNodeType(%d)", i) + } + return _astNodeType_name[_astNodeType_index[i]:_astNodeType_index[i+1]] +} diff --git a/vendor/github.com/jmespath/go-jmespath/functions.go b/vendor/github.com/jmespath/go-jmespath/functions.go new file mode 100644 index 000000000..9b7cd89b4 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/functions.go @@ -0,0 +1,842 @@ +package jmespath + +import ( + "encoding/json" + "errors" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "strings" + "unicode/utf8" +) + +type jpFunction func(arguments []interface{}) (interface{}, error) + +type jpType string + +const ( + jpUnknown jpType = "unknown" + jpNumber jpType = "number" + jpString jpType = "string" + jpArray jpType = "array" + jpObject jpType = "object" + jpArrayNumber jpType = "array[number]" + jpArrayString jpType = "array[string]" + jpExpref jpType = "expref" + jpAny jpType = "any" +) + +type functionEntry struct { + name string + arguments []argSpec + handler jpFunction + hasExpRef bool +} + +type argSpec struct { + types []jpType + variadic bool +} + +type byExprString struct { + intr *treeInterpreter + node ASTNode + items []interface{} + hasError bool +} + +func (a *byExprString) Len() int { + return len(a.items) +} +func (a *byExprString) Swap(i, j int) { + a.items[i], a.items[j] = a.items[j], a.items[i] +} +func (a *byExprString) Less(i, j int) bool { + first, err := a.intr.Execute(a.node, a.items[i]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + ith, ok := first.(string) + if !ok { + a.hasError = true + return true + } + second, err := a.intr.Execute(a.node, a.items[j]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + jth, ok := second.(string) + if !ok { + a.hasError = true + return true + } + return ith < jth +} + +type byExprFloat struct { + intr *treeInterpreter + node ASTNode + items []interface{} + hasError bool +} + +func (a *byExprFloat) Len() int { + return len(a.items) +} +func (a *byExprFloat) Swap(i, j int) { + a.items[i], a.items[j] = a.items[j], a.items[i] +} +func (a *byExprFloat) Less(i, j int) bool { + first, err := a.intr.Execute(a.node, a.items[i]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + ith, ok := first.(float64) + if !ok { + a.hasError = true + return true + } + second, err := a.intr.Execute(a.node, a.items[j]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + jth, ok := second.(float64) + if !ok { + a.hasError = true + return true + } + return ith < jth +} + +type functionCaller struct { + functionTable map[string]functionEntry +} + +func newFunctionCaller() *functionCaller { + caller := &functionCaller{} + caller.functionTable = map[string]functionEntry{ + "length": { + name: "length", + arguments: []argSpec{ + {types: []jpType{jpString, jpArray, jpObject}}, + }, + handler: jpfLength, + }, + "starts_with": { + name: "starts_with", + arguments: []argSpec{ + {types: []jpType{jpString}}, + {types: []jpType{jpString}}, + }, + handler: jpfStartsWith, + }, + "abs": { + name: "abs", + arguments: []argSpec{ + {types: []jpType{jpNumber}}, + }, + handler: jpfAbs, + }, + "avg": { + name: "avg", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber}}, + }, + handler: jpfAvg, + }, + "ceil": { + name: "ceil", + arguments: []argSpec{ + {types: []jpType{jpNumber}}, + }, + handler: jpfCeil, + }, + "contains": { + name: "contains", + arguments: []argSpec{ + {types: []jpType{jpArray, jpString}}, + {types: []jpType{jpAny}}, + }, + handler: jpfContains, + }, + "ends_with": { + name: "ends_with", + arguments: []argSpec{ + {types: []jpType{jpString}}, + {types: []jpType{jpString}}, + }, + handler: jpfEndsWith, + }, + "floor": { + name: "floor", + arguments: []argSpec{ + {types: []jpType{jpNumber}}, + }, + handler: jpfFloor, + }, + "map": { + name: "amp", + arguments: []argSpec{ + {types: []jpType{jpExpref}}, + {types: []jpType{jpArray}}, + }, + handler: jpfMap, + hasExpRef: true, + }, + "max": { + name: "max", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber, jpArrayString}}, + }, + handler: jpfMax, + }, + "merge": { + name: "merge", + arguments: []argSpec{ + {types: []jpType{jpObject}, variadic: true}, + }, + handler: jpfMerge, + }, + "max_by": { + name: "max_by", + arguments: []argSpec{ + {types: []jpType{jpArray}}, + {types: []jpType{jpExpref}}, + }, + handler: jpfMaxBy, + hasExpRef: true, + }, + "sum": { + name: "sum", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber}}, + }, + handler: jpfSum, + }, + "min": { + name: "min", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber, jpArrayString}}, + }, + handler: jpfMin, + }, + "min_by": { + name: "min_by", + arguments: []argSpec{ + {types: []jpType{jpArray}}, + {types: []jpType{jpExpref}}, + }, + handler: jpfMinBy, + hasExpRef: true, + }, + "type": { + name: "type", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfType, + }, + "keys": { + name: "keys", + arguments: []argSpec{ + {types: []jpType{jpObject}}, + }, + handler: jpfKeys, + }, + "values": { + name: "values", + arguments: []argSpec{ + {types: []jpType{jpObject}}, + }, + handler: jpfValues, + }, + "sort": { + name: "sort", + arguments: []argSpec{ + {types: []jpType{jpArrayString, jpArrayNumber}}, + }, + handler: jpfSort, + }, + "sort_by": { + name: "sort_by", + arguments: []argSpec{ + {types: []jpType{jpArray}}, + {types: []jpType{jpExpref}}, + }, + handler: jpfSortBy, + hasExpRef: true, + }, + "join": { + name: "join", + arguments: []argSpec{ + {types: []jpType{jpString}}, + {types: []jpType{jpArrayString}}, + }, + handler: jpfJoin, + }, + "reverse": { + name: "reverse", + arguments: []argSpec{ + {types: []jpType{jpArray, jpString}}, + }, + handler: jpfReverse, + }, + "to_array": { + name: "to_array", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfToArray, + }, + "to_string": { + name: "to_string", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfToString, + }, + "to_number": { + name: "to_number", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfToNumber, + }, + "not_null": { + name: "not_null", + arguments: []argSpec{ + {types: []jpType{jpAny}, variadic: true}, + }, + handler: jpfNotNull, + }, + } + return caller +} + +func (e *functionEntry) resolveArgs(arguments []interface{}) ([]interface{}, error) { + if len(e.arguments) == 0 { + return arguments, nil + } + if !e.arguments[len(e.arguments)-1].variadic { + if len(e.arguments) != len(arguments) { + return nil, errors.New("incorrect number of args") + } + for i, spec := range e.arguments { + userArg := arguments[i] + err := spec.typeCheck(userArg) + if err != nil { + return nil, err + } + } + return arguments, nil + } + if len(arguments) < len(e.arguments) { + return nil, errors.New("Invalid arity.") + } + return arguments, nil +} + +func (a *argSpec) typeCheck(arg interface{}) error { + for _, t := range a.types { + switch t { + case jpNumber: + if _, ok := arg.(float64); ok { + return nil + } + case jpString: + if _, ok := arg.(string); ok { + return nil + } + case jpArray: + if isSliceType(arg) { + return nil + } + case jpObject: + if _, ok := arg.(map[string]interface{}); ok { + return nil + } + case jpArrayNumber: + if _, ok := toArrayNum(arg); ok { + return nil + } + case jpArrayString: + if _, ok := toArrayStr(arg); ok { + return nil + } + case jpAny: + return nil + case jpExpref: + if _, ok := arg.(expRef); ok { + return nil + } + } + } + return fmt.Errorf("Invalid type for: %v, expected: %#v", arg, a.types) +} + +func (f *functionCaller) CallFunction(name string, arguments []interface{}, intr *treeInterpreter) (interface{}, error) { + entry, ok := f.functionTable[name] + if !ok { + return nil, errors.New("unknown function: " + name) + } + resolvedArgs, err := entry.resolveArgs(arguments) + if err != nil { + return nil, err + } + if entry.hasExpRef { + var extra []interface{} + extra = append(extra, intr) + resolvedArgs = append(extra, resolvedArgs...) + } + return entry.handler(resolvedArgs) +} + +func jpfAbs(arguments []interface{}) (interface{}, error) { + num := arguments[0].(float64) + return math.Abs(num), nil +} + +func jpfLength(arguments []interface{}) (interface{}, error) { + arg := arguments[0] + if c, ok := arg.(string); ok { + return float64(utf8.RuneCountInString(c)), nil + } else if isSliceType(arg) { + v := reflect.ValueOf(arg) + return float64(v.Len()), nil + } else if c, ok := arg.(map[string]interface{}); ok { + return float64(len(c)), nil + } + return nil, errors.New("could not compute length()") +} + +func jpfStartsWith(arguments []interface{}) (interface{}, error) { + search := arguments[0].(string) + prefix := arguments[1].(string) + return strings.HasPrefix(search, prefix), nil +} + +func jpfAvg(arguments []interface{}) (interface{}, error) { + // We've already type checked the value so we can safely use + // type assertions. + args := arguments[0].([]interface{}) + length := float64(len(args)) + numerator := 0.0 + for _, n := range args { + numerator += n.(float64) + } + return numerator / length, nil +} +func jpfCeil(arguments []interface{}) (interface{}, error) { + val := arguments[0].(float64) + return math.Ceil(val), nil +} +func jpfContains(arguments []interface{}) (interface{}, error) { + search := arguments[0] + el := arguments[1] + if searchStr, ok := search.(string); ok { + if elStr, ok := el.(string); ok { + return strings.Index(searchStr, elStr) != -1, nil + } + return false, nil + } + // Otherwise this is a generic contains for []interface{} + general := search.([]interface{}) + for _, item := range general { + if item == el { + return true, nil + } + } + return false, nil +} +func jpfEndsWith(arguments []interface{}) (interface{}, error) { + search := arguments[0].(string) + suffix := arguments[1].(string) + return strings.HasSuffix(search, suffix), nil +} +func jpfFloor(arguments []interface{}) (interface{}, error) { + val := arguments[0].(float64) + return math.Floor(val), nil +} +func jpfMap(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + exp := arguments[1].(expRef) + node := exp.ref + arr := arguments[2].([]interface{}) + mapped := make([]interface{}, 0, len(arr)) + for _, value := range arr { + current, err := intr.Execute(node, value) + if err != nil { + return nil, err + } + mapped = append(mapped, current) + } + return mapped, nil +} +func jpfMax(arguments []interface{}) (interface{}, error) { + if items, ok := toArrayNum(arguments[0]); ok { + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item > best { + best = item + } + } + return best, nil + } + // Otherwise we're dealing with a max() of strings. + items, _ := toArrayStr(arguments[0]) + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item > best { + best = item + } + } + return best, nil +} +func jpfMerge(arguments []interface{}) (interface{}, error) { + final := make(map[string]interface{}) + for _, m := range arguments { + mapped := m.(map[string]interface{}) + for key, value := range mapped { + final[key] = value + } + } + return final, nil +} +func jpfMaxBy(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + arr := arguments[1].([]interface{}) + exp := arguments[2].(expRef) + node := exp.ref + if len(arr) == 0 { + return nil, nil + } else if len(arr) == 1 { + return arr[0], nil + } + start, err := intr.Execute(node, arr[0]) + if err != nil { + return nil, err + } + switch t := start.(type) { + case float64: + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(float64) + if !ok { + return nil, errors.New("invalid type, must be number") + } + if current > bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + case string: + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(string) + if !ok { + return nil, errors.New("invalid type, must be string") + } + if current > bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + default: + return nil, errors.New("invalid type, must be number of string") + } +} +func jpfSum(arguments []interface{}) (interface{}, error) { + items, _ := toArrayNum(arguments[0]) + sum := 0.0 + for _, item := range items { + sum += item + } + return sum, nil +} + +func jpfMin(arguments []interface{}) (interface{}, error) { + if items, ok := toArrayNum(arguments[0]); ok { + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item < best { + best = item + } + } + return best, nil + } + items, _ := toArrayStr(arguments[0]) + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item < best { + best = item + } + } + return best, nil +} + +func jpfMinBy(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + arr := arguments[1].([]interface{}) + exp := arguments[2].(expRef) + node := exp.ref + if len(arr) == 0 { + return nil, nil + } else if len(arr) == 1 { + return arr[0], nil + } + start, err := intr.Execute(node, arr[0]) + if err != nil { + return nil, err + } + if t, ok := start.(float64); ok { + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(float64) + if !ok { + return nil, errors.New("invalid type, must be number") + } + if current < bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + } else if t, ok := start.(string); ok { + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(string) + if !ok { + return nil, errors.New("invalid type, must be string") + } + if current < bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + } else { + return nil, errors.New("invalid type, must be number of string") + } +} +func jpfType(arguments []interface{}) (interface{}, error) { + arg := arguments[0] + if _, ok := arg.(float64); ok { + return "number", nil + } + if _, ok := arg.(string); ok { + return "string", nil + } + if _, ok := arg.([]interface{}); ok { + return "array", nil + } + if _, ok := arg.(map[string]interface{}); ok { + return "object", nil + } + if arg == nil { + return "null", nil + } + if arg == true || arg == false { + return "boolean", nil + } + return nil, errors.New("unknown type") +} +func jpfKeys(arguments []interface{}) (interface{}, error) { + arg := arguments[0].(map[string]interface{}) + collected := make([]interface{}, 0, len(arg)) + for key := range arg { + collected = append(collected, key) + } + return collected, nil +} +func jpfValues(arguments []interface{}) (interface{}, error) { + arg := arguments[0].(map[string]interface{}) + collected := make([]interface{}, 0, len(arg)) + for _, value := range arg { + collected = append(collected, value) + } + return collected, nil +} +func jpfSort(arguments []interface{}) (interface{}, error) { + if items, ok := toArrayNum(arguments[0]); ok { + d := sort.Float64Slice(items) + sort.Stable(d) + final := make([]interface{}, len(d)) + for i, val := range d { + final[i] = val + } + return final, nil + } + // Otherwise we're dealing with sort()'ing strings. + items, _ := toArrayStr(arguments[0]) + d := sort.StringSlice(items) + sort.Stable(d) + final := make([]interface{}, len(d)) + for i, val := range d { + final[i] = val + } + return final, nil +} +func jpfSortBy(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + arr := arguments[1].([]interface{}) + exp := arguments[2].(expRef) + node := exp.ref + if len(arr) == 0 { + return arr, nil + } else if len(arr) == 1 { + return arr, nil + } + start, err := intr.Execute(node, arr[0]) + if err != nil { + return nil, err + } + if _, ok := start.(float64); ok { + sortable := &byExprFloat{intr, node, arr, false} + sort.Stable(sortable) + if sortable.hasError { + return nil, errors.New("error in sort_by comparison") + } + return arr, nil + } else if _, ok := start.(string); ok { + sortable := &byExprString{intr, node, arr, false} + sort.Stable(sortable) + if sortable.hasError { + return nil, errors.New("error in sort_by comparison") + } + return arr, nil + } else { + return nil, errors.New("invalid type, must be number of string") + } +} +func jpfJoin(arguments []interface{}) (interface{}, error) { + sep := arguments[0].(string) + // We can't just do arguments[1].([]string), we have to + // manually convert each item to a string. + arrayStr := []string{} + for _, item := range arguments[1].([]interface{}) { + arrayStr = append(arrayStr, item.(string)) + } + return strings.Join(arrayStr, sep), nil +} +func jpfReverse(arguments []interface{}) (interface{}, error) { + if s, ok := arguments[0].(string); ok { + r := []rune(s) + for i, j := 0, len(r)-1; i < len(r)/2; i, j = i+1, j-1 { + r[i], r[j] = r[j], r[i] + } + return string(r), nil + } + items := arguments[0].([]interface{}) + length := len(items) + reversed := make([]interface{}, length) + for i, item := range items { + reversed[length-(i+1)] = item + } + return reversed, nil +} +func jpfToArray(arguments []interface{}) (interface{}, error) { + if _, ok := arguments[0].([]interface{}); ok { + return arguments[0], nil + } + return arguments[:1:1], nil +} +func jpfToString(arguments []interface{}) (interface{}, error) { + if v, ok := arguments[0].(string); ok { + return v, nil + } + result, err := json.Marshal(arguments[0]) + if err != nil { + return nil, err + } + return string(result), nil +} +func jpfToNumber(arguments []interface{}) (interface{}, error) { + arg := arguments[0] + if v, ok := arg.(float64); ok { + return v, nil + } + if v, ok := arg.(string); ok { + conv, err := strconv.ParseFloat(v, 64) + if err != nil { + return nil, nil + } + return conv, nil + } + if _, ok := arg.([]interface{}); ok { + return nil, nil + } + if _, ok := arg.(map[string]interface{}); ok { + return nil, nil + } + if arg == nil { + return nil, nil + } + if arg == true || arg == false { + return nil, nil + } + return nil, errors.New("unknown type") +} +func jpfNotNull(arguments []interface{}) (interface{}, error) { + for _, arg := range arguments { + if arg != nil { + return arg, nil + } + } + return nil, nil +} diff --git a/vendor/github.com/jmespath/go-jmespath/interpreter.go b/vendor/github.com/jmespath/go-jmespath/interpreter.go new file mode 100644 index 000000000..13c74604c --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/interpreter.go @@ -0,0 +1,418 @@ +package jmespath + +import ( + "errors" + "reflect" + "unicode" + "unicode/utf8" +) + +/* This is a tree based interpreter. It walks the AST and directly + interprets the AST to search through a JSON document. +*/ + +type treeInterpreter struct { + fCall *functionCaller +} + +func newInterpreter() *treeInterpreter { + interpreter := treeInterpreter{} + interpreter.fCall = newFunctionCaller() + return &interpreter +} + +type expRef struct { + ref ASTNode +} + +// Execute takes an ASTNode and input data and interprets the AST directly. +// It will produce the result of applying the JMESPath expression associated +// with the ASTNode to the input data "value". +func (intr *treeInterpreter) Execute(node ASTNode, value interface{}) (interface{}, error) { + switch node.nodeType { + case ASTComparator: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + right, err := intr.Execute(node.children[1], value) + if err != nil { + return nil, err + } + switch node.value { + case tEQ: + return objsEqual(left, right), nil + case tNE: + return !objsEqual(left, right), nil + } + leftNum, ok := left.(float64) + if !ok { + return nil, nil + } + rightNum, ok := right.(float64) + if !ok { + return nil, nil + } + switch node.value { + case tGT: + return leftNum > rightNum, nil + case tGTE: + return leftNum >= rightNum, nil + case tLT: + return leftNum < rightNum, nil + case tLTE: + return leftNum <= rightNum, nil + } + case ASTExpRef: + return expRef{ref: node.children[0]}, nil + case ASTFunctionExpression: + resolvedArgs := []interface{}{} + for _, arg := range node.children { + current, err := intr.Execute(arg, value) + if err != nil { + return nil, err + } + resolvedArgs = append(resolvedArgs, current) + } + return intr.fCall.CallFunction(node.value.(string), resolvedArgs, intr) + case ASTField: + if m, ok := value.(map[string]interface{}); ok { + key := node.value.(string) + return m[key], nil + } + return intr.fieldFromStruct(node.value.(string), value) + case ASTFilterProjection: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, nil + } + sliceType, ok := left.([]interface{}) + if !ok { + if isSliceType(left) { + return intr.filterProjectionWithReflection(node, left) + } + return nil, nil + } + compareNode := node.children[2] + collected := []interface{}{} + for _, element := range sliceType { + result, err := intr.Execute(compareNode, element) + if err != nil { + return nil, err + } + if !isFalse(result) { + current, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + } + return collected, nil + case ASTFlatten: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, nil + } + sliceType, ok := left.([]interface{}) + if !ok { + // If we can't type convert to []interface{}, there's + // a chance this could still work via reflection if we're + // dealing with user provided types. + if isSliceType(left) { + return intr.flattenWithReflection(left) + } + return nil, nil + } + flattened := []interface{}{} + for _, element := range sliceType { + if elementSlice, ok := element.([]interface{}); ok { + flattened = append(flattened, elementSlice...) + } else if isSliceType(element) { + reflectFlat := []interface{}{} + v := reflect.ValueOf(element) + for i := 0; i < v.Len(); i++ { + reflectFlat = append(reflectFlat, v.Index(i).Interface()) + } + flattened = append(flattened, reflectFlat...) + } else { + flattened = append(flattened, element) + } + } + return flattened, nil + case ASTIdentity, ASTCurrentNode: + return value, nil + case ASTIndex: + if sliceType, ok := value.([]interface{}); ok { + index := node.value.(int) + if index < 0 { + index += len(sliceType) + } + if index < len(sliceType) && index >= 0 { + return sliceType[index], nil + } + return nil, nil + } + // Otherwise try via reflection. + rv := reflect.ValueOf(value) + if rv.Kind() == reflect.Slice { + index := node.value.(int) + if index < 0 { + index += rv.Len() + } + if index < rv.Len() && index >= 0 { + v := rv.Index(index) + return v.Interface(), nil + } + } + return nil, nil + case ASTKeyValPair: + return intr.Execute(node.children[0], value) + case ASTLiteral: + return node.value, nil + case ASTMultiSelectHash: + if value == nil { + return nil, nil + } + collected := make(map[string]interface{}) + for _, child := range node.children { + current, err := intr.Execute(child, value) + if err != nil { + return nil, err + } + key := child.value.(string) + collected[key] = current + } + return collected, nil + case ASTMultiSelectList: + if value == nil { + return nil, nil + } + collected := []interface{}{} + for _, child := range node.children { + current, err := intr.Execute(child, value) + if err != nil { + return nil, err + } + collected = append(collected, current) + } + return collected, nil + case ASTOrExpression: + matched, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + if isFalse(matched) { + matched, err = intr.Execute(node.children[1], value) + if err != nil { + return nil, err + } + } + return matched, nil + case ASTAndExpression: + matched, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + if isFalse(matched) { + return matched, nil + } + return intr.Execute(node.children[1], value) + case ASTNotExpression: + matched, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + if isFalse(matched) { + return true, nil + } + return false, nil + case ASTPipe: + result := value + var err error + for _, child := range node.children { + result, err = intr.Execute(child, result) + if err != nil { + return nil, err + } + } + return result, nil + case ASTProjection: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + sliceType, ok := left.([]interface{}) + if !ok { + if isSliceType(left) { + return intr.projectWithReflection(node, left) + } + return nil, nil + } + collected := []interface{}{} + var current interface{} + for _, element := range sliceType { + current, err = intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + return collected, nil + case ASTSubexpression, ASTIndexExpression: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + return intr.Execute(node.children[1], left) + case ASTSlice: + sliceType, ok := value.([]interface{}) + if !ok { + if isSliceType(value) { + return intr.sliceWithReflection(node, value) + } + return nil, nil + } + parts := node.value.([]*int) + sliceParams := make([]sliceParam, 3) + for i, part := range parts { + if part != nil { + sliceParams[i].Specified = true + sliceParams[i].N = *part + } + } + return slice(sliceType, sliceParams) + case ASTValueProjection: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, nil + } + mapType, ok := left.(map[string]interface{}) + if !ok { + return nil, nil + } + values := make([]interface{}, len(mapType)) + for _, value := range mapType { + values = append(values, value) + } + collected := []interface{}{} + for _, element := range values { + current, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + return collected, nil + } + return nil, errors.New("Unknown AST node: " + node.nodeType.String()) +} + +func (intr *treeInterpreter) fieldFromStruct(key string, value interface{}) (interface{}, error) { + rv := reflect.ValueOf(value) + first, n := utf8.DecodeRuneInString(key) + fieldName := string(unicode.ToUpper(first)) + key[n:] + if rv.Kind() == reflect.Struct { + v := rv.FieldByName(fieldName) + if !v.IsValid() { + return nil, nil + } + return v.Interface(), nil + } else if rv.Kind() == reflect.Ptr { + // Handle multiple levels of indirection? + if rv.IsNil() { + return nil, nil + } + rv = rv.Elem() + v := rv.FieldByName(fieldName) + if !v.IsValid() { + return nil, nil + } + return v.Interface(), nil + } + return nil, nil +} + +func (intr *treeInterpreter) flattenWithReflection(value interface{}) (interface{}, error) { + v := reflect.ValueOf(value) + flattened := []interface{}{} + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + if reflect.TypeOf(element).Kind() == reflect.Slice { + // Then insert the contents of the element + // slice into the flattened slice, + // i.e flattened = append(flattened, mySlice...) + elementV := reflect.ValueOf(element) + for j := 0; j < elementV.Len(); j++ { + flattened = append( + flattened, elementV.Index(j).Interface()) + } + } else { + flattened = append(flattened, element) + } + } + return flattened, nil +} + +func (intr *treeInterpreter) sliceWithReflection(node ASTNode, value interface{}) (interface{}, error) { + v := reflect.ValueOf(value) + parts := node.value.([]*int) + sliceParams := make([]sliceParam, 3) + for i, part := range parts { + if part != nil { + sliceParams[i].Specified = true + sliceParams[i].N = *part + } + } + final := []interface{}{} + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + final = append(final, element) + } + return slice(final, sliceParams) +} + +func (intr *treeInterpreter) filterProjectionWithReflection(node ASTNode, value interface{}) (interface{}, error) { + compareNode := node.children[2] + collected := []interface{}{} + v := reflect.ValueOf(value) + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + result, err := intr.Execute(compareNode, element) + if err != nil { + return nil, err + } + if !isFalse(result) { + current, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + } + return collected, nil +} + +func (intr *treeInterpreter) projectWithReflection(node ASTNode, value interface{}) (interface{}, error) { + collected := []interface{}{} + v := reflect.ValueOf(value) + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + result, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if result != nil { + collected = append(collected, result) + } + } + return collected, nil +} diff --git a/vendor/github.com/jmespath/go-jmespath/lexer.go b/vendor/github.com/jmespath/go-jmespath/lexer.go new file mode 100644 index 000000000..817900c8f --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/lexer.go @@ -0,0 +1,420 @@ +package jmespath + +import ( + "bytes" + "encoding/json" + "fmt" + "strconv" + "strings" + "unicode/utf8" +) + +type token struct { + tokenType tokType + value string + position int + length int +} + +type tokType int + +const eof = -1 + +// Lexer contains information about the expression being tokenized. +type Lexer struct { + expression string // The expression provided by the user. + currentPos int // The current position in the string. + lastWidth int // The width of the current rune. This + buf bytes.Buffer // Internal buffer used for building up values. +} + +// SyntaxError is the main error used whenever a lexing or parsing error occurs. +type SyntaxError struct { + msg string // Error message displayed to user + Expression string // Expression that generated a SyntaxError + Offset int // The location in the string where the error occurred +} + +func (e SyntaxError) Error() string { + // In the future, it would be good to underline the specific + // location where the error occurred. + return "SyntaxError: " + e.msg +} + +// HighlightLocation will show where the syntax error occurred. +// It will place a "^" character on a line below the expression +// at the point where the syntax error occurred. +func (e SyntaxError) HighlightLocation() string { + return e.Expression + "\n" + strings.Repeat(" ", e.Offset) + "^" +} + +//go:generate stringer -type=tokType +const ( + tUnknown tokType = iota + tStar + tDot + tFilter + tFlatten + tLparen + tRparen + tLbracket + tRbracket + tLbrace + tRbrace + tOr + tPipe + tNumber + tUnquotedIdentifier + tQuotedIdentifier + tComma + tColon + tLT + tLTE + tGT + tGTE + tEQ + tNE + tJSONLiteral + tStringLiteral + tCurrent + tExpref + tAnd + tNot + tEOF +) + +var basicTokens = map[rune]tokType{ + '.': tDot, + '*': tStar, + ',': tComma, + ':': tColon, + '{': tLbrace, + '}': tRbrace, + ']': tRbracket, // tLbracket not included because it could be "[]" + '(': tLparen, + ')': tRparen, + '@': tCurrent, +} + +// Bit mask for [a-zA-Z_] shifted down 64 bits to fit in a single uint64. +// When using this bitmask just be sure to shift the rune down 64 bits +// before checking against identifierStartBits. +const identifierStartBits uint64 = 576460745995190270 + +// Bit mask for [a-zA-Z0-9], 128 bits -> 2 uint64s. +var identifierTrailingBits = [2]uint64{287948901175001088, 576460745995190270} + +var whiteSpace = map[rune]bool{ + ' ': true, '\t': true, '\n': true, '\r': true, +} + +func (t token) String() string { + return fmt.Sprintf("Token{%+v, %s, %d, %d}", + t.tokenType, t.value, t.position, t.length) +} + +// NewLexer creates a new JMESPath lexer. +func NewLexer() *Lexer { + lexer := Lexer{} + return &lexer +} + +func (lexer *Lexer) next() rune { + if lexer.currentPos >= len(lexer.expression) { + lexer.lastWidth = 0 + return eof + } + r, w := utf8.DecodeRuneInString(lexer.expression[lexer.currentPos:]) + lexer.lastWidth = w + lexer.currentPos += w + return r +} + +func (lexer *Lexer) back() { + lexer.currentPos -= lexer.lastWidth +} + +func (lexer *Lexer) peek() rune { + t := lexer.next() + lexer.back() + return t +} + +// tokenize takes an expression and returns corresponding tokens. +func (lexer *Lexer) tokenize(expression string) ([]token, error) { + var tokens []token + lexer.expression = expression + lexer.currentPos = 0 + lexer.lastWidth = 0 +loop: + for { + r := lexer.next() + if identifierStartBits&(1<<(uint64(r)-64)) > 0 { + t := lexer.consumeUnquotedIdentifier() + tokens = append(tokens, t) + } else if val, ok := basicTokens[r]; ok { + // Basic single char token. + t := token{ + tokenType: val, + value: string(r), + position: lexer.currentPos - lexer.lastWidth, + length: 1, + } + tokens = append(tokens, t) + } else if r == '-' || (r >= '0' && r <= '9') { + t := lexer.consumeNumber() + tokens = append(tokens, t) + } else if r == '[' { + t := lexer.consumeLBracket() + tokens = append(tokens, t) + } else if r == '"' { + t, err := lexer.consumeQuotedIdentifier() + if err != nil { + return tokens, err + } + tokens = append(tokens, t) + } else if r == '\'' { + t, err := lexer.consumeRawStringLiteral() + if err != nil { + return tokens, err + } + tokens = append(tokens, t) + } else if r == '`' { + t, err := lexer.consumeLiteral() + if err != nil { + return tokens, err + } + tokens = append(tokens, t) + } else if r == '|' { + t := lexer.matchOrElse(r, '|', tOr, tPipe) + tokens = append(tokens, t) + } else if r == '<' { + t := lexer.matchOrElse(r, '=', tLTE, tLT) + tokens = append(tokens, t) + } else if r == '>' { + t := lexer.matchOrElse(r, '=', tGTE, tGT) + tokens = append(tokens, t) + } else if r == '!' { + t := lexer.matchOrElse(r, '=', tNE, tNot) + tokens = append(tokens, t) + } else if r == '=' { + t := lexer.matchOrElse(r, '=', tEQ, tUnknown) + tokens = append(tokens, t) + } else if r == '&' { + t := lexer.matchOrElse(r, '&', tAnd, tExpref) + tokens = append(tokens, t) + } else if r == eof { + break loop + } else if _, ok := whiteSpace[r]; ok { + // Ignore whitespace + } else { + return tokens, lexer.syntaxError(fmt.Sprintf("Unknown char: %s", strconv.QuoteRuneToASCII(r))) + } + } + tokens = append(tokens, token{tEOF, "", len(lexer.expression), 0}) + return tokens, nil +} + +// Consume characters until the ending rune "r" is reached. +// If the end of the expression is reached before seeing the +// terminating rune "r", then an error is returned. +// If no error occurs then the matching substring is returned. +// The returned string will not include the ending rune. +func (lexer *Lexer) consumeUntil(end rune) (string, error) { + start := lexer.currentPos + current := lexer.next() + for current != end && current != eof { + if current == '\\' && lexer.peek() != eof { + lexer.next() + } + current = lexer.next() + } + if lexer.lastWidth == 0 { + // Then we hit an EOF so we never reached the closing + // delimiter. + return "", SyntaxError{ + msg: "Unclosed delimiter: " + string(end), + Expression: lexer.expression, + Offset: len(lexer.expression), + } + } + return lexer.expression[start : lexer.currentPos-lexer.lastWidth], nil +} + +func (lexer *Lexer) consumeLiteral() (token, error) { + start := lexer.currentPos + value, err := lexer.consumeUntil('`') + if err != nil { + return token{}, err + } + value = strings.Replace(value, "\\`", "`", -1) + return token{ + tokenType: tJSONLiteral, + value: value, + position: start, + length: len(value), + }, nil +} + +func (lexer *Lexer) consumeRawStringLiteral() (token, error) { + start := lexer.currentPos + currentIndex := start + current := lexer.next() + for current != '\'' && lexer.peek() != eof { + if current == '\\' && lexer.peek() == '\'' { + chunk := lexer.expression[currentIndex : lexer.currentPos-1] + lexer.buf.WriteString(chunk) + lexer.buf.WriteString("'") + lexer.next() + currentIndex = lexer.currentPos + } + current = lexer.next() + } + if lexer.lastWidth == 0 { + // Then we hit an EOF so we never reached the closing + // delimiter. + return token{}, SyntaxError{ + msg: "Unclosed delimiter: '", + Expression: lexer.expression, + Offset: len(lexer.expression), + } + } + if currentIndex < lexer.currentPos { + lexer.buf.WriteString(lexer.expression[currentIndex : lexer.currentPos-1]) + } + value := lexer.buf.String() + // Reset the buffer so it can reused again. + lexer.buf.Reset() + return token{ + tokenType: tStringLiteral, + value: value, + position: start, + length: len(value), + }, nil +} + +func (lexer *Lexer) syntaxError(msg string) SyntaxError { + return SyntaxError{ + msg: msg, + Expression: lexer.expression, + Offset: lexer.currentPos - 1, + } +} + +// Checks for a two char token, otherwise matches a single character +// token. This is used whenever a two char token overlaps a single +// char token, e.g. "||" -> tPipe, "|" -> tOr. +func (lexer *Lexer) matchOrElse(first rune, second rune, matchedType tokType, singleCharType tokType) token { + start := lexer.currentPos - lexer.lastWidth + nextRune := lexer.next() + var t token + if nextRune == second { + t = token{ + tokenType: matchedType, + value: string(first) + string(second), + position: start, + length: 2, + } + } else { + lexer.back() + t = token{ + tokenType: singleCharType, + value: string(first), + position: start, + length: 1, + } + } + return t +} + +func (lexer *Lexer) consumeLBracket() token { + // There's three options here: + // 1. A filter expression "[?" + // 2. A flatten operator "[]" + // 3. A bare rbracket "[" + start := lexer.currentPos - lexer.lastWidth + nextRune := lexer.next() + var t token + if nextRune == '?' { + t = token{ + tokenType: tFilter, + value: "[?", + position: start, + length: 2, + } + } else if nextRune == ']' { + t = token{ + tokenType: tFlatten, + value: "[]", + position: start, + length: 2, + } + } else { + t = token{ + tokenType: tLbracket, + value: "[", + position: start, + length: 1, + } + lexer.back() + } + return t +} + +func (lexer *Lexer) consumeQuotedIdentifier() (token, error) { + start := lexer.currentPos + value, err := lexer.consumeUntil('"') + if err != nil { + return token{}, err + } + var decoded string + asJSON := []byte("\"" + value + "\"") + if err := json.Unmarshal([]byte(asJSON), &decoded); err != nil { + return token{}, err + } + return token{ + tokenType: tQuotedIdentifier, + value: decoded, + position: start - 1, + length: len(decoded), + }, nil +} + +func (lexer *Lexer) consumeUnquotedIdentifier() token { + // Consume runes until we reach the end of an unquoted + // identifier. + start := lexer.currentPos - lexer.lastWidth + for { + r := lexer.next() + if r < 0 || r > 128 || identifierTrailingBits[uint64(r)/64]&(1<<(uint64(r)%64)) == 0 { + lexer.back() + break + } + } + value := lexer.expression[start:lexer.currentPos] + return token{ + tokenType: tUnquotedIdentifier, + value: value, + position: start, + length: lexer.currentPos - start, + } +} + +func (lexer *Lexer) consumeNumber() token { + // Consume runes until we reach something that's not a number. + start := lexer.currentPos - lexer.lastWidth + for { + r := lexer.next() + if r < '0' || r > '9' { + lexer.back() + break + } + } + value := lexer.expression[start:lexer.currentPos] + return token{ + tokenType: tNumber, + value: value, + position: start, + length: lexer.currentPos - start, + } +} diff --git a/vendor/github.com/jmespath/go-jmespath/parser.go b/vendor/github.com/jmespath/go-jmespath/parser.go new file mode 100644 index 000000000..1240a1755 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/parser.go @@ -0,0 +1,603 @@ +package jmespath + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" +) + +type astNodeType int + +//go:generate stringer -type astNodeType +const ( + ASTEmpty astNodeType = iota + ASTComparator + ASTCurrentNode + ASTExpRef + ASTFunctionExpression + ASTField + ASTFilterProjection + ASTFlatten + ASTIdentity + ASTIndex + ASTIndexExpression + ASTKeyValPair + ASTLiteral + ASTMultiSelectHash + ASTMultiSelectList + ASTOrExpression + ASTAndExpression + ASTNotExpression + ASTPipe + ASTProjection + ASTSubexpression + ASTSlice + ASTValueProjection +) + +// ASTNode represents the abstract syntax tree of a JMESPath expression. +type ASTNode struct { + nodeType astNodeType + value interface{} + children []ASTNode +} + +func (node ASTNode) String() string { + return node.PrettyPrint(0) +} + +// PrettyPrint will pretty print the parsed AST. +// The AST is an implementation detail and this pretty print +// function is provided as a convenience method to help with +// debugging. You should not rely on its output as the internal +// structure of the AST may change at any time. +func (node ASTNode) PrettyPrint(indent int) string { + spaces := strings.Repeat(" ", indent) + output := fmt.Sprintf("%s%s {\n", spaces, node.nodeType) + nextIndent := indent + 2 + if node.value != nil { + if converted, ok := node.value.(fmt.Stringer); ok { + // Account for things like comparator nodes + // that are enums with a String() method. + output += fmt.Sprintf("%svalue: %s\n", strings.Repeat(" ", nextIndent), converted.String()) + } else { + output += fmt.Sprintf("%svalue: %#v\n", strings.Repeat(" ", nextIndent), node.value) + } + } + lastIndex := len(node.children) + if lastIndex > 0 { + output += fmt.Sprintf("%schildren: {\n", strings.Repeat(" ", nextIndent)) + childIndent := nextIndent + 2 + for _, elem := range node.children { + output += elem.PrettyPrint(childIndent) + } + } + output += fmt.Sprintf("%s}\n", spaces) + return output +} + +var bindingPowers = map[tokType]int{ + tEOF: 0, + tUnquotedIdentifier: 0, + tQuotedIdentifier: 0, + tRbracket: 0, + tRparen: 0, + tComma: 0, + tRbrace: 0, + tNumber: 0, + tCurrent: 0, + tExpref: 0, + tColon: 0, + tPipe: 1, + tOr: 2, + tAnd: 3, + tEQ: 5, + tLT: 5, + tLTE: 5, + tGT: 5, + tGTE: 5, + tNE: 5, + tFlatten: 9, + tStar: 20, + tFilter: 21, + tDot: 40, + tNot: 45, + tLbrace: 50, + tLbracket: 55, + tLparen: 60, +} + +// Parser holds state about the current expression being parsed. +type Parser struct { + expression string + tokens []token + index int +} + +// NewParser creates a new JMESPath parser. +func NewParser() *Parser { + p := Parser{} + return &p +} + +// Parse will compile a JMESPath expression. +func (p *Parser) Parse(expression string) (ASTNode, error) { + lexer := NewLexer() + p.expression = expression + p.index = 0 + tokens, err := lexer.tokenize(expression) + if err != nil { + return ASTNode{}, err + } + p.tokens = tokens + parsed, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if p.current() != tEOF { + return ASTNode{}, p.syntaxError(fmt.Sprintf( + "Unexpected token at the end of the expresssion: %s", p.current())) + } + return parsed, nil +} + +func (p *Parser) parseExpression(bindingPower int) (ASTNode, error) { + var err error + leftToken := p.lookaheadToken(0) + p.advance() + leftNode, err := p.nud(leftToken) + if err != nil { + return ASTNode{}, err + } + currentToken := p.current() + for bindingPower < bindingPowers[currentToken] { + p.advance() + leftNode, err = p.led(currentToken, leftNode) + if err != nil { + return ASTNode{}, err + } + currentToken = p.current() + } + return leftNode, nil +} + +func (p *Parser) parseIndexExpression() (ASTNode, error) { + if p.lookahead(0) == tColon || p.lookahead(1) == tColon { + return p.parseSliceExpression() + } + indexStr := p.lookaheadToken(0).value + parsedInt, err := strconv.Atoi(indexStr) + if err != nil { + return ASTNode{}, err + } + indexNode := ASTNode{nodeType: ASTIndex, value: parsedInt} + p.advance() + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + return indexNode, nil +} + +func (p *Parser) parseSliceExpression() (ASTNode, error) { + parts := []*int{nil, nil, nil} + index := 0 + current := p.current() + for current != tRbracket && index < 3 { + if current == tColon { + index++ + p.advance() + } else if current == tNumber { + parsedInt, err := strconv.Atoi(p.lookaheadToken(0).value) + if err != nil { + return ASTNode{}, err + } + parts[index] = &parsedInt + p.advance() + } else { + return ASTNode{}, p.syntaxError( + "Expected tColon or tNumber" + ", received: " + p.current().String()) + } + current = p.current() + } + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTSlice, + value: parts, + }, nil +} + +func (p *Parser) match(tokenType tokType) error { + if p.current() == tokenType { + p.advance() + return nil + } + return p.syntaxError("Expected " + tokenType.String() + ", received: " + p.current().String()) +} + +func (p *Parser) led(tokenType tokType, node ASTNode) (ASTNode, error) { + switch tokenType { + case tDot: + if p.current() != tStar { + right, err := p.parseDotRHS(bindingPowers[tDot]) + return ASTNode{ + nodeType: ASTSubexpression, + children: []ASTNode{node, right}, + }, err + } + p.advance() + right, err := p.parseProjectionRHS(bindingPowers[tDot]) + return ASTNode{ + nodeType: ASTValueProjection, + children: []ASTNode{node, right}, + }, err + case tPipe: + right, err := p.parseExpression(bindingPowers[tPipe]) + return ASTNode{nodeType: ASTPipe, children: []ASTNode{node, right}}, err + case tOr: + right, err := p.parseExpression(bindingPowers[tOr]) + return ASTNode{nodeType: ASTOrExpression, children: []ASTNode{node, right}}, err + case tAnd: + right, err := p.parseExpression(bindingPowers[tAnd]) + return ASTNode{nodeType: ASTAndExpression, children: []ASTNode{node, right}}, err + case tLparen: + name := node.value + var args []ASTNode + for p.current() != tRparen { + expression, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if p.current() == tComma { + if err := p.match(tComma); err != nil { + return ASTNode{}, err + } + } + args = append(args, expression) + } + if err := p.match(tRparen); err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTFunctionExpression, + value: name, + children: args, + }, nil + case tFilter: + return p.parseFilter(node) + case tFlatten: + left := ASTNode{nodeType: ASTFlatten, children: []ASTNode{node}} + right, err := p.parseProjectionRHS(bindingPowers[tFlatten]) + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{left, right}, + }, err + case tEQ, tNE, tGT, tGTE, tLT, tLTE: + right, err := p.parseExpression(bindingPowers[tokenType]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTComparator, + value: tokenType, + children: []ASTNode{node, right}, + }, nil + case tLbracket: + tokenType := p.current() + var right ASTNode + var err error + if tokenType == tNumber || tokenType == tColon { + right, err = p.parseIndexExpression() + if err != nil { + return ASTNode{}, err + } + return p.projectIfSlice(node, right) + } + // Otherwise this is a projection. + if err := p.match(tStar); err != nil { + return ASTNode{}, err + } + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + right, err = p.parseProjectionRHS(bindingPowers[tStar]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{node, right}, + }, nil + } + return ASTNode{}, p.syntaxError("Unexpected token: " + tokenType.String()) +} + +func (p *Parser) nud(token token) (ASTNode, error) { + switch token.tokenType { + case tJSONLiteral: + var parsed interface{} + err := json.Unmarshal([]byte(token.value), &parsed) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTLiteral, value: parsed}, nil + case tStringLiteral: + return ASTNode{nodeType: ASTLiteral, value: token.value}, nil + case tUnquotedIdentifier: + return ASTNode{ + nodeType: ASTField, + value: token.value, + }, nil + case tQuotedIdentifier: + node := ASTNode{nodeType: ASTField, value: token.value} + if p.current() == tLparen { + return ASTNode{}, p.syntaxErrorToken("Can't have quoted identifier as function name.", token) + } + return node, nil + case tStar: + left := ASTNode{nodeType: ASTIdentity} + var right ASTNode + var err error + if p.current() == tRbracket { + right = ASTNode{nodeType: ASTIdentity} + } else { + right, err = p.parseProjectionRHS(bindingPowers[tStar]) + } + return ASTNode{nodeType: ASTValueProjection, children: []ASTNode{left, right}}, err + case tFilter: + return p.parseFilter(ASTNode{nodeType: ASTIdentity}) + case tLbrace: + return p.parseMultiSelectHash() + case tFlatten: + left := ASTNode{ + nodeType: ASTFlatten, + children: []ASTNode{{nodeType: ASTIdentity}}, + } + right, err := p.parseProjectionRHS(bindingPowers[tFlatten]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTProjection, children: []ASTNode{left, right}}, nil + case tLbracket: + tokenType := p.current() + //var right ASTNode + if tokenType == tNumber || tokenType == tColon { + right, err := p.parseIndexExpression() + if err != nil { + return ASTNode{}, nil + } + return p.projectIfSlice(ASTNode{nodeType: ASTIdentity}, right) + } else if tokenType == tStar && p.lookahead(1) == tRbracket { + p.advance() + p.advance() + right, err := p.parseProjectionRHS(bindingPowers[tStar]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{{nodeType: ASTIdentity}, right}, + }, nil + } else { + return p.parseMultiSelectList() + } + case tCurrent: + return ASTNode{nodeType: ASTCurrentNode}, nil + case tExpref: + expression, err := p.parseExpression(bindingPowers[tExpref]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTExpRef, children: []ASTNode{expression}}, nil + case tNot: + expression, err := p.parseExpression(bindingPowers[tNot]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTNotExpression, children: []ASTNode{expression}}, nil + case tLparen: + expression, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if err := p.match(tRparen); err != nil { + return ASTNode{}, err + } + return expression, nil + case tEOF: + return ASTNode{}, p.syntaxErrorToken("Incomplete expression", token) + } + + return ASTNode{}, p.syntaxErrorToken("Invalid token: "+token.tokenType.String(), token) +} + +func (p *Parser) parseMultiSelectList() (ASTNode, error) { + var expressions []ASTNode + for { + expression, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + expressions = append(expressions, expression) + if p.current() == tRbracket { + break + } + err = p.match(tComma) + if err != nil { + return ASTNode{}, err + } + } + err := p.match(tRbracket) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTMultiSelectList, + children: expressions, + }, nil +} + +func (p *Parser) parseMultiSelectHash() (ASTNode, error) { + var children []ASTNode + for { + keyToken := p.lookaheadToken(0) + if err := p.match(tUnquotedIdentifier); err != nil { + if err := p.match(tQuotedIdentifier); err != nil { + return ASTNode{}, p.syntaxError("Expected tQuotedIdentifier or tUnquotedIdentifier") + } + } + keyName := keyToken.value + err := p.match(tColon) + if err != nil { + return ASTNode{}, err + } + value, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + node := ASTNode{ + nodeType: ASTKeyValPair, + value: keyName, + children: []ASTNode{value}, + } + children = append(children, node) + if p.current() == tComma { + err := p.match(tComma) + if err != nil { + return ASTNode{}, nil + } + } else if p.current() == tRbrace { + err := p.match(tRbrace) + if err != nil { + return ASTNode{}, nil + } + break + } + } + return ASTNode{ + nodeType: ASTMultiSelectHash, + children: children, + }, nil +} + +func (p *Parser) projectIfSlice(left ASTNode, right ASTNode) (ASTNode, error) { + indexExpr := ASTNode{ + nodeType: ASTIndexExpression, + children: []ASTNode{left, right}, + } + if right.nodeType == ASTSlice { + right, err := p.parseProjectionRHS(bindingPowers[tStar]) + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{indexExpr, right}, + }, err + } + return indexExpr, nil +} +func (p *Parser) parseFilter(node ASTNode) (ASTNode, error) { + var right, condition ASTNode + var err error + condition, err = p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + if p.current() == tFlatten { + right = ASTNode{nodeType: ASTIdentity} + } else { + right, err = p.parseProjectionRHS(bindingPowers[tFilter]) + if err != nil { + return ASTNode{}, err + } + } + + return ASTNode{ + nodeType: ASTFilterProjection, + children: []ASTNode{node, right, condition}, + }, nil +} + +func (p *Parser) parseDotRHS(bindingPower int) (ASTNode, error) { + lookahead := p.current() + if tokensOneOf([]tokType{tQuotedIdentifier, tUnquotedIdentifier, tStar}, lookahead) { + return p.parseExpression(bindingPower) + } else if lookahead == tLbracket { + if err := p.match(tLbracket); err != nil { + return ASTNode{}, err + } + return p.parseMultiSelectList() + } else if lookahead == tLbrace { + if err := p.match(tLbrace); err != nil { + return ASTNode{}, err + } + return p.parseMultiSelectHash() + } + return ASTNode{}, p.syntaxError("Expected identifier, lbracket, or lbrace") +} + +func (p *Parser) parseProjectionRHS(bindingPower int) (ASTNode, error) { + current := p.current() + if bindingPowers[current] < 10 { + return ASTNode{nodeType: ASTIdentity}, nil + } else if current == tLbracket { + return p.parseExpression(bindingPower) + } else if current == tFilter { + return p.parseExpression(bindingPower) + } else if current == tDot { + err := p.match(tDot) + if err != nil { + return ASTNode{}, err + } + return p.parseDotRHS(bindingPower) + } else { + return ASTNode{}, p.syntaxError("Error") + } +} + +func (p *Parser) lookahead(number int) tokType { + return p.lookaheadToken(number).tokenType +} + +func (p *Parser) current() tokType { + return p.lookahead(0) +} + +func (p *Parser) lookaheadToken(number int) token { + return p.tokens[p.index+number] +} + +func (p *Parser) advance() { + p.index++ +} + +func tokensOneOf(elements []tokType, token tokType) bool { + for _, elem := range elements { + if elem == token { + return true + } + } + return false +} + +func (p *Parser) syntaxError(msg string) SyntaxError { + return SyntaxError{ + msg: msg, + Expression: p.expression, + Offset: p.lookaheadToken(0).position, + } +} + +// Create a SyntaxError based on the provided token. +// This differs from syntaxError() which creates a SyntaxError +// based on the current lookahead token. +func (p *Parser) syntaxErrorToken(msg string, t token) SyntaxError { + return SyntaxError{ + msg: msg, + Expression: p.expression, + Offset: t.position, + } +} diff --git a/vendor/github.com/jmespath/go-jmespath/toktype_string.go b/vendor/github.com/jmespath/go-jmespath/toktype_string.go new file mode 100644 index 000000000..dae79cbdf --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/toktype_string.go @@ -0,0 +1,16 @@ +// generated by stringer -type=tokType; DO NOT EDIT + +package jmespath + +import "fmt" + +const _tokType_name = "tUnknowntStartDottFiltertFlattentLparentRparentLbrackettRbrackettLbracetRbracetOrtPipetNumbertUnquotedIdentifiertQuotedIdentifiertCommatColontLTtLTEtGTtGTEtEQtNEtJSONLiteraltStringLiteraltCurrenttExpreftAndtNottEOF" + +var _tokType_index = [...]uint8{0, 8, 13, 17, 24, 32, 39, 46, 55, 64, 71, 78, 81, 86, 93, 112, 129, 135, 141, 144, 148, 151, 155, 158, 161, 173, 187, 195, 202, 206, 210, 214} + +func (i tokType) String() string { + if i < 0 || i >= tokType(len(_tokType_index)-1) { + return fmt.Sprintf("tokType(%d)", i) + } + return _tokType_name[_tokType_index[i]:_tokType_index[i+1]] +} diff --git a/vendor/github.com/jmespath/go-jmespath/util.go b/vendor/github.com/jmespath/go-jmespath/util.go new file mode 100644 index 000000000..ddc1b7d7d --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/util.go @@ -0,0 +1,185 @@ +package jmespath + +import ( + "errors" + "reflect" +) + +// IsFalse determines if an object is false based on the JMESPath spec. +// JMESPath defines false values to be any of: +// - An empty string array, or hash. +// - The boolean value false. +// - nil +func isFalse(value interface{}) bool { + switch v := value.(type) { + case bool: + return !v + case []interface{}: + return len(v) == 0 + case map[string]interface{}: + return len(v) == 0 + case string: + return len(v) == 0 + case nil: + return true + } + // Try the reflection cases before returning false. + rv := reflect.ValueOf(value) + switch rv.Kind() { + case reflect.Struct: + // A struct type will never be false, even if + // all of its values are the zero type. + return false + case reflect.Slice, reflect.Map: + return rv.Len() == 0 + case reflect.Ptr: + if rv.IsNil() { + return true + } + // If it's a pointer type, we'll try to deref the pointer + // and evaluate the pointer value for isFalse. + element := rv.Elem() + return isFalse(element.Interface()) + } + return false +} + +// ObjsEqual is a generic object equality check. +// It will take two arbitrary objects and recursively determine +// if they are equal. +func objsEqual(left interface{}, right interface{}) bool { + return reflect.DeepEqual(left, right) +} + +// SliceParam refers to a single part of a slice. +// A slice consists of a start, a stop, and a step, similar to +// python slices. +type sliceParam struct { + N int + Specified bool +} + +// Slice supports [start:stop:step] style slicing that's supported in JMESPath. +func slice(slice []interface{}, parts []sliceParam) ([]interface{}, error) { + computed, err := computeSliceParams(len(slice), parts) + if err != nil { + return nil, err + } + start, stop, step := computed[0], computed[1], computed[2] + result := []interface{}{} + if step > 0 { + for i := start; i < stop; i += step { + result = append(result, slice[i]) + } + } else { + for i := start; i > stop; i += step { + result = append(result, slice[i]) + } + } + return result, nil +} + +func computeSliceParams(length int, parts []sliceParam) ([]int, error) { + var start, stop, step int + if !parts[2].Specified { + step = 1 + } else if parts[2].N == 0 { + return nil, errors.New("Invalid slice, step cannot be 0") + } else { + step = parts[2].N + } + var stepValueNegative bool + if step < 0 { + stepValueNegative = true + } else { + stepValueNegative = false + } + + if !parts[0].Specified { + if stepValueNegative { + start = length - 1 + } else { + start = 0 + } + } else { + start = capSlice(length, parts[0].N, step) + } + + if !parts[1].Specified { + if stepValueNegative { + stop = -1 + } else { + stop = length + } + } else { + stop = capSlice(length, parts[1].N, step) + } + return []int{start, stop, step}, nil +} + +func capSlice(length int, actual int, step int) int { + if actual < 0 { + actual += length + if actual < 0 { + if step < 0 { + actual = -1 + } else { + actual = 0 + } + } + } else if actual >= length { + if step < 0 { + actual = length - 1 + } else { + actual = length + } + } + return actual +} + +// ToArrayNum converts an empty interface type to a slice of float64. +// If any element in the array cannot be converted, then nil is returned +// along with a second value of false. +func toArrayNum(data interface{}) ([]float64, bool) { + // Is there a better way to do this with reflect? + if d, ok := data.([]interface{}); ok { + result := make([]float64, len(d)) + for i, el := range d { + item, ok := el.(float64) + if !ok { + return nil, false + } + result[i] = item + } + return result, true + } + return nil, false +} + +// ToArrayStr converts an empty interface type to a slice of strings. +// If any element in the array cannot be converted, then nil is returned +// along with a second value of false. If the input data could be entirely +// converted, then the converted data, along with a second value of true, +// will be returned. +func toArrayStr(data interface{}) ([]string, bool) { + // Is there a better way to do this with reflect? + if d, ok := data.([]interface{}); ok { + result := make([]string, len(d)) + for i, el := range d { + item, ok := el.(string) + if !ok { + return nil, false + } + result[i] = item + } + return result, true + } + return nil, false +} + +func isSliceType(v interface{}) bool { + if v == nil { + return false + } + return reflect.TypeOf(v).Kind() == reflect.Slice +} diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE b/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE new file mode 100644 index 000000000..8dada3eda --- /dev/null +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE b/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE new file mode 100644 index 000000000..5d8cb5b72 --- /dev/null +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE @@ -0,0 +1 @@ +Copyright 2012 Matt T. Proud (matt.proud@gmail.com) diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go new file mode 100644 index 000000000..258c0636a --- /dev/null +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go @@ -0,0 +1,75 @@ +// Copyright 2013 Matt T. Proud +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pbutil + +import ( + "encoding/binary" + "errors" + "io" + + "github.com/golang/protobuf/proto" +) + +var errInvalidVarint = errors.New("invalid varint32 encountered") + +// ReadDelimited decodes a message from the provided length-delimited stream, +// where the length is encoded as 32-bit varint prefix to the message body. +// It returns the total number of bytes read and any applicable error. This is +// roughly equivalent to the companion Java API's +// MessageLite#parseDelimitedFrom. As per the reader contract, this function +// calls r.Read repeatedly as required until exactly one message including its +// prefix is read and decoded (or an error has occurred). The function never +// reads more bytes from the stream than required. The function never returns +// an error if a message has been read and decoded correctly, even if the end +// of the stream has been reached in doing so. In that case, any subsequent +// calls return (0, io.EOF). +func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) { + // Per AbstractParser#parsePartialDelimitedFrom with + // CodedInputStream#readRawVarint32. + var headerBuf [binary.MaxVarintLen32]byte + var bytesRead, varIntBytes int + var messageLength uint64 + for varIntBytes == 0 { // i.e. no varint has been decoded yet. + if bytesRead >= len(headerBuf) { + return bytesRead, errInvalidVarint + } + // We have to read byte by byte here to avoid reading more bytes + // than required. Each read byte is appended to what we have + // read before. + newBytesRead, err := r.Read(headerBuf[bytesRead : bytesRead+1]) + if newBytesRead == 0 { + if err != nil { + return bytesRead, err + } + // A Reader should not return (0, nil), but if it does, + // it should be treated as no-op (according to the + // Reader contract). So let's go on... + continue + } + bytesRead += newBytesRead + // Now present everything read so far to the varint decoder and + // see if a varint can be decoded already. + messageLength, varIntBytes = proto.DecodeVarint(headerBuf[:bytesRead]) + } + + messageBuf := make([]byte, messageLength) + newBytesRead, err := io.ReadFull(r, messageBuf) + bytesRead += newBytesRead + if err != nil { + return bytesRead, err + } + + return bytesRead, proto.Unmarshal(messageBuf, m) +} diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go new file mode 100644 index 000000000..c318385cb --- /dev/null +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go @@ -0,0 +1,16 @@ +// Copyright 2013 Matt T. Proud +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package pbutil provides record length-delimited Protocol Buffer streaming. +package pbutil diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go new file mode 100644 index 000000000..8fb59ad22 --- /dev/null +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go @@ -0,0 +1,46 @@ +// Copyright 2013 Matt T. Proud +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pbutil + +import ( + "encoding/binary" + "io" + + "github.com/golang/protobuf/proto" +) + +// WriteDelimited encodes and dumps a message to the provided writer prefixed +// with a 32-bit varint indicating the length of the encoded message, producing +// a length-delimited record stream, which can be used to chain together +// encoded messages of the same type together in a file. It returns the total +// number of bytes written and any applicable error. This is roughly +// equivalent to the companion Java API's MessageLite#writeDelimitedTo. +func WriteDelimited(w io.Writer, m proto.Message) (n int, err error) { + buffer, err := proto.Marshal(m) + if err != nil { + return 0, err + } + + var buf [binary.MaxVarintLen32]byte + encodedLength := binary.PutUvarint(buf[:], uint64(len(buffer))) + + sync, err := w.Write(buf[:encodedLength]) + if err != nil { + return sync, err + } + + n, err = w.Write(buffer) + return n + sync, err +} diff --git a/vendor/github.com/prometheus/client_golang/AUTHORS.md b/vendor/github.com/prometheus/client_golang/AUTHORS.md new file mode 100644 index 000000000..c5275d5ab --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/AUTHORS.md @@ -0,0 +1,18 @@ +The Prometheus project was started by Matt T. Proud (emeritus) and +Julius Volz in 2012. + +Maintainers of this repository: + +* Björn Rabenstein + +The following individuals have contributed code to this repository +(listed in alphabetical order): + +* Bernerd Schaefer +* Björn Rabenstein +* Daniel Bornkessel +* Jeff Younker +* Julius Volz +* Matt T. Proud +* Tobias Schmidt + diff --git a/vendor/github.com/prometheus/client_golang/LICENSE b/vendor/github.com/prometheus/client_golang/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/prometheus/client_golang/NOTICE b/vendor/github.com/prometheus/client_golang/NOTICE new file mode 100644 index 000000000..dd878a30e --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/NOTICE @@ -0,0 +1,23 @@ +Prometheus instrumentation library for Go applications +Copyright 2012-2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). + + +The following components are included in this product: + +perks - a fork of https://github.com/bmizerany/perks +https://github.com/beorn7/perks +Copyright 2013-2015 Blake Mizerany, Björn Rabenstein +See https://github.com/beorn7/perks/blob/master/README.md for license details. + +Go support for Protocol Buffers - Google's data interchange format +http://github.com/golang/protobuf/ +Copyright 2010 The Go Authors +See source code for license details. + +Support for streaming Protocol Buffer messages for the Go language (golang). +https://github.com/matttproud/golang_protobuf_extensions +Copyright 2013 Matt T. Proud +Licensed under the Apache License, Version 2.0 diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collector.go new file mode 100644 index 000000000..623d3d83f --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/collector.go @@ -0,0 +1,75 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +// Collector is the interface implemented by anything that can be used by +// Prometheus to collect metrics. A Collector has to be registered for +// collection. See Registerer.Register. +// +// The stock metrics provided by this package (Gauge, Counter, Summary, +// Histogram, Untyped) are also Collectors (which only ever collect one metric, +// namely itself). An implementer of Collector may, however, collect multiple +// metrics in a coordinated fashion and/or create metrics on the fly. Examples +// for collectors already implemented in this library are the metric vectors +// (i.e. collection of multiple instances of the same Metric but with different +// label values) like GaugeVec or SummaryVec, and the ExpvarCollector. +type Collector interface { + // Describe sends the super-set of all possible descriptors of metrics + // collected by this Collector to the provided channel and returns once + // the last descriptor has been sent. The sent descriptors fulfill the + // consistency and uniqueness requirements described in the Desc + // documentation. (It is valid if one and the same Collector sends + // duplicate descriptors. Those duplicates are simply ignored. However, + // two different Collectors must not send duplicate descriptors.) This + // method idempotently sends the same descriptors throughout the + // lifetime of the Collector. If a Collector encounters an error while + // executing this method, it must send an invalid descriptor (created + // with NewInvalidDesc) to signal the error to the registry. + Describe(chan<- *Desc) + // Collect is called by the Prometheus registry when collecting + // metrics. The implementation sends each collected metric via the + // provided channel and returns once the last metric has been sent. The + // descriptor of each sent metric is one of those returned by + // Describe. Returned metrics that share the same descriptor must differ + // in their variable label values. This method may be called + // concurrently and must therefore be implemented in a concurrency safe + // way. Blocking occurs at the expense of total performance of rendering + // all registered metrics. Ideally, Collector implementations support + // concurrent readers. + Collect(chan<- Metric) +} + +// selfCollector implements Collector for a single Metric so that the Metric +// collects itself. Add it as an anonymous field to a struct that implements +// Metric, and call init with the Metric itself as an argument. +type selfCollector struct { + self Metric +} + +// init provides the selfCollector with a reference to the metric it is supposed +// to collect. It is usually called within the factory function to create a +// metric. See example. +func (c *selfCollector) init(self Metric) { + c.self = self +} + +// Describe implements Collector. +func (c *selfCollector) Describe(ch chan<- *Desc) { + ch <- c.self.Desc() +} + +// Collect implements Collector. +func (c *selfCollector) Collect(ch chan<- Metric) { + ch <- c.self +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go new file mode 100644 index 000000000..ee37949ad --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/counter.go @@ -0,0 +1,172 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "errors" +) + +// Counter is a Metric that represents a single numerical value that only ever +// goes up. That implies that it cannot be used to count items whose number can +// also go down, e.g. the number of currently running goroutines. Those +// "counters" are represented by Gauges. +// +// A Counter is typically used to count requests served, tasks completed, errors +// occurred, etc. +// +// To create Counter instances, use NewCounter. +type Counter interface { + Metric + Collector + + // Set is used to set the Counter to an arbitrary value. It is only used + // if you have to transfer a value from an external counter into this + // Prometheus metric. Do not use it for regular handling of a + // Prometheus counter (as it can be used to break the contract of + // monotonically increasing values). + // + // Deprecated: Use NewConstMetric to create a counter for an external + // value. A Counter should never be set. + Set(float64) + // Inc increments the counter by 1. + Inc() + // Add adds the given value to the counter. It panics if the value is < + // 0. + Add(float64) +} + +// CounterOpts is an alias for Opts. See there for doc comments. +type CounterOpts Opts + +// NewCounter creates a new Counter based on the provided CounterOpts. +func NewCounter(opts CounterOpts) Counter { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ) + result := &counter{value: value{desc: desc, valType: CounterValue, labelPairs: desc.constLabelPairs}} + result.init(result) // Init self-collection. + return result +} + +type counter struct { + value +} + +func (c *counter) Add(v float64) { + if v < 0 { + panic(errors.New("counter cannot decrease in value")) + } + c.value.Add(v) +} + +// CounterVec is a Collector that bundles a set of Counters that all share the +// same Desc, but have different values for their variable labels. This is used +// if you want to count the same thing partitioned by various dimensions +// (e.g. number of HTTP requests, partitioned by response code and +// method). Create instances with NewCounterVec. +// +// CounterVec embeds MetricVec. See there for a full list of methods with +// detailed documentation. +type CounterVec struct { + *MetricVec +} + +// NewCounterVec creates a new CounterVec based on the provided CounterOpts and +// partitioned by the given label names. At least one label name must be +// provided. +func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &CounterVec{ + MetricVec: newMetricVec(desc, func(lvs ...string) Metric { + result := &counter{value: value{ + desc: desc, + valType: CounterValue, + labelPairs: makeLabelPairs(desc, lvs), + }} + result.init(result) // Init self-collection. + return result + }), + } +} + +// GetMetricWithLabelValues replaces the method of the same name in +// MetricVec. The difference is that this method returns a Counter and not a +// Metric so that no type conversion is required. +func (m *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) { + metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Counter), err + } + return nil, err +} + +// GetMetricWith replaces the method of the same name in MetricVec. The +// difference is that this method returns a Counter and not a Metric so that no +// type conversion is required. +func (m *CounterVec) GetMetricWith(labels Labels) (Counter, error) { + metric, err := m.MetricVec.GetMetricWith(labels) + if metric != nil { + return metric.(Counter), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. By not returning an +// error, WithLabelValues allows shortcuts like +// myVec.WithLabelValues("404", "GET").Add(42) +func (m *CounterVec) WithLabelValues(lvs ...string) Counter { + return m.MetricVec.WithLabelValues(lvs...).(Counter) +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. By not returning an error, With allows shortcuts like +// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) +func (m *CounterVec) With(labels Labels) Counter { + return m.MetricVec.With(labels).(Counter) +} + +// CounterFunc is a Counter whose value is determined at collect time by calling a +// provided function. +// +// To create CounterFunc instances, use NewCounterFunc. +type CounterFunc interface { + Metric + Collector +} + +// NewCounterFunc creates a new CounterFunc based on the provided +// CounterOpts. The value reported is determined by calling the given function +// from within the Write method. Take into account that metric collection may +// happen concurrently. If that results in concurrent calls to Write, like in +// the case where a CounterFunc is directly registered with Prometheus, the +// provided function must be concurrency-safe. The function should also honor +// the contract for a Counter (values only go up, not down), but compliance will +// not be checked. +func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc { + return newValueFunc(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), CounterValue, function) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go new file mode 100644 index 000000000..77f4b30e8 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go @@ -0,0 +1,205 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "errors" + "fmt" + "regexp" + "sort" + "strings" + + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" +) + +var ( + metricNameRE = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_:]*$`) + labelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") +) + +// reservedLabelPrefix is a prefix which is not legal in user-supplied +// label names. +const reservedLabelPrefix = "__" + +// Labels represents a collection of label name -> value mappings. This type is +// commonly used with the With(Labels) and GetMetricWith(Labels) methods of +// metric vector Collectors, e.g.: +// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) +// +// The other use-case is the specification of constant label pairs in Opts or to +// create a Desc. +type Labels map[string]string + +// Desc is the descriptor used by every Prometheus Metric. It is essentially +// the immutable meta-data of a Metric. The normal Metric implementations +// included in this package manage their Desc under the hood. Users only have to +// deal with Desc if they use advanced features like the ExpvarCollector or +// custom Collectors and Metrics. +// +// Descriptors registered with the same registry have to fulfill certain +// consistency and uniqueness criteria if they share the same fully-qualified +// name: They must have the same help string and the same label names (aka label +// dimensions) in each, constLabels and variableLabels, but they must differ in +// the values of the constLabels. +// +// Descriptors that share the same fully-qualified names and the same label +// values of their constLabels are considered equal. +// +// Use NewDesc to create new Desc instances. +type Desc struct { + // fqName has been built from Namespace, Subsystem, and Name. + fqName string + // help provides some helpful information about this metric. + help string + // constLabelPairs contains precalculated DTO label pairs based on + // the constant labels. + constLabelPairs []*dto.LabelPair + // VariableLabels contains names of labels for which the metric + // maintains variable values. + variableLabels []string + // id is a hash of the values of the ConstLabels and fqName. This + // must be unique among all registered descriptors and can therefore be + // used as an identifier of the descriptor. + id uint64 + // dimHash is a hash of the label names (preset and variable) and the + // Help string. Each Desc with the same fqName must have the same + // dimHash. + dimHash uint64 + // err is an error that occured during construction. It is reported on + // registration time. + err error +} + +// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc +// and will be reported on registration time. variableLabels and constLabels can +// be nil if no such labels should be set. fqName and help must not be empty. +// +// variableLabels only contain the label names. Their label values are variable +// and therefore not part of the Desc. (They are managed within the Metric.) +// +// For constLabels, the label values are constant. Therefore, they are fully +// specified in the Desc. See the Opts documentation for the implications of +// constant labels. +func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc { + d := &Desc{ + fqName: fqName, + help: help, + variableLabels: variableLabels, + } + if help == "" { + d.err = errors.New("empty help string") + return d + } + if !metricNameRE.MatchString(fqName) { + d.err = fmt.Errorf("%q is not a valid metric name", fqName) + return d + } + // labelValues contains the label values of const labels (in order of + // their sorted label names) plus the fqName (at position 0). + labelValues := make([]string, 1, len(constLabels)+1) + labelValues[0] = fqName + labelNames := make([]string, 0, len(constLabels)+len(variableLabels)) + labelNameSet := map[string]struct{}{} + // First add only the const label names and sort them... + for labelName := range constLabels { + if !checkLabelName(labelName) { + d.err = fmt.Errorf("%q is not a valid label name", labelName) + return d + } + labelNames = append(labelNames, labelName) + labelNameSet[labelName] = struct{}{} + } + sort.Strings(labelNames) + // ... so that we can now add const label values in the order of their names. + for _, labelName := range labelNames { + labelValues = append(labelValues, constLabels[labelName]) + } + // Now add the variable label names, but prefix them with something that + // cannot be in a regular label name. That prevents matching the label + // dimension with a different mix between preset and variable labels. + for _, labelName := range variableLabels { + if !checkLabelName(labelName) { + d.err = fmt.Errorf("%q is not a valid label name", labelName) + return d + } + labelNames = append(labelNames, "$"+labelName) + labelNameSet[labelName] = struct{}{} + } + if len(labelNames) != len(labelNameSet) { + d.err = errors.New("duplicate label names") + return d + } + vh := hashNew() + for _, val := range labelValues { + vh = hashAdd(vh, val) + vh = hashAddByte(vh, separatorByte) + } + d.id = vh + // Sort labelNames so that order doesn't matter for the hash. + sort.Strings(labelNames) + // Now hash together (in this order) the help string and the sorted + // label names. + lh := hashNew() + lh = hashAdd(lh, help) + lh = hashAddByte(lh, separatorByte) + for _, labelName := range labelNames { + lh = hashAdd(lh, labelName) + lh = hashAddByte(lh, separatorByte) + } + d.dimHash = lh + + d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels)) + for n, v := range constLabels { + d.constLabelPairs = append(d.constLabelPairs, &dto.LabelPair{ + Name: proto.String(n), + Value: proto.String(v), + }) + } + sort.Sort(LabelPairSorter(d.constLabelPairs)) + return d +} + +// NewInvalidDesc returns an invalid descriptor, i.e. a descriptor with the +// provided error set. If a collector returning such a descriptor is registered, +// registration will fail with the provided error. NewInvalidDesc can be used by +// a Collector to signal inability to describe itself. +func NewInvalidDesc(err error) *Desc { + return &Desc{ + err: err, + } +} + +func (d *Desc) String() string { + lpStrings := make([]string, 0, len(d.constLabelPairs)) + for _, lp := range d.constLabelPairs { + lpStrings = append( + lpStrings, + fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()), + ) + } + return fmt.Sprintf( + "Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: %v}", + d.fqName, + d.help, + strings.Join(lpStrings, ","), + d.variableLabels, + ) +} + +func checkLabelName(l string) bool { + return labelNameRE.MatchString(l) && + !strings.HasPrefix(l, reservedLabelPrefix) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/doc.go new file mode 100644 index 000000000..b15a2d3b9 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/doc.go @@ -0,0 +1,181 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package prometheus provides metrics primitives to instrument code for +// monitoring. It also offers a registry for metrics. Sub-packages allow to +// expose the registered metrics via HTTP (package promhttp) or push them to a +// Pushgateway (package push). +// +// All exported functions and methods are safe to be used concurrently unless +//specified otherwise. +// +// A Basic Example +// +// As a starting point, a very basic usage example: +// +// package main +// +// import ( +// "net/http" +// +// "github.com/prometheus/client_golang/prometheus" +// "github.com/prometheus/client_golang/prometheus/promhttp" +// ) +// +// var ( +// cpuTemp = prometheus.NewGauge(prometheus.GaugeOpts{ +// Name: "cpu_temperature_celsius", +// Help: "Current temperature of the CPU.", +// }) +// hdFailures = prometheus.NewCounterVec( +// prometheus.CounterOpts{ +// Name: "hd_errors_total", +// Help: "Number of hard-disk errors.", +// }, +// []string{"device"}, +// ) +// ) +// +// func init() { +// // Metrics have to be registered to be exposed: +// prometheus.MustRegister(cpuTemp) +// prometheus.MustRegister(hdFailures) +// } +// +// func main() { +// cpuTemp.Set(65.3) +// hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc() +// +// // The Handler function provides a default handler to expose metrics +// // via an HTTP server. "/metrics" is the usual endpoint for that. +// http.Handle("/metrics", promhttp.Handler()) +// http.ListenAndServe(":8080", nil) +// } +// +// +// This is a complete program that exports two metrics, a Gauge and a Counter, +// the latter with a label attached to turn it into a (one-dimensional) vector. +// +// Metrics +// +// The number of exported identifiers in this package might appear a bit +// overwhelming. Hovever, in addition to the basic plumbing shown in the example +// above, you only need to understand the different metric types and their +// vector versions for basic usage. +// +// Above, you have already touched the Counter and the Gauge. There are two more +// advanced metric types: the Summary and Histogram. A more thorough description +// of those four metric types can be found in the Prometheus docs: +// https://prometheus.io/docs/concepts/metric_types/ +// +// A fifth "type" of metric is Untyped. It behaves like a Gauge, but signals the +// Prometheus server not to assume anything about its type. +// +// In addition to the fundamental metric types Gauge, Counter, Summary, +// Histogram, and Untyped, a very important part of the Prometheus data model is +// the partitioning of samples along dimensions called labels, which results in +// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec, +// HistogramVec, and UntypedVec. +// +// While only the fundamental metric types implement the Metric interface, both +// the metrics and their vector versions implement the Collector interface. A +// Collector manages the collection of a number of Metrics, but for convenience, +// a Metric can also “collect itself”. Note that Gauge, Counter, Summary, +// Histogram, and Untyped are interfaces themselves while GaugeVec, CounterVec, +// SummaryVec, HistogramVec, and UntypedVec are not. +// +// To create instances of Metrics and their vector versions, you need a suitable +// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, +// HistogramOpts, or UntypedOpts. +// +// Custom Collectors and constant Metrics +// +// While you could create your own implementations of Metric, most likely you +// will only ever implement the Collector interface on your own. At a first +// glance, a custom Collector seems handy to bundle Metrics for common +// registration (with the prime example of the different metric vectors above, +// which bundle all the metrics of the same name but with different labels). +// +// There is a more involved use case, too: If you already have metrics +// available, created outside of the Prometheus context, you don't need the +// interface of the various Metric types. You essentially want to mirror the +// existing numbers into Prometheus Metrics during collection. An own +// implementation of the Collector interface is perfect for that. You can create +// Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and +// NewConstSummary (and their respective Must… versions). That will happen in +// the Collect method. The Describe method has to return separate Desc +// instances, representative of the “throw-away” metrics to be created +// later. NewDesc comes in handy to create those Desc instances. +// +// The Collector example illustrates the use case. You can also look at the +// source code of the processCollector (mirroring process metrics), the +// goCollector (mirroring Go metrics), or the expvarCollector (mirroring expvar +// metrics) as examples that are used in this package itself. +// +// If you just need to call a function to get a single float value to collect as +// a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting +// shortcuts. +// +// Advanced Uses of the Registry +// +// While MustRegister is the by far most common way of registering a Collector, +// sometimes you might want to handle the errors the registration might +// cause. As suggested by the name, MustRegister panics if an error occurs. With +// the Register function, the error is returned and can be handled. +// +// An error is returned if the registered Collector is incompatible or +// inconsistent with already registered metrics. The registry aims for +// consistency of the collected metrics according to the Prometheus data +// model. Inconsistencies are ideally detected at registration time, not at +// collect time. The former will usually be detected at start-up time of a +// program, while the latter will only happen at scrape time, possibly not even +// on the first scrape if the inconsistency only becomes relevant later. That is +// the main reason why a Collector and a Metric have to describe themselves to +// the registry. +// +// So far, everything we did operated on the so-called default registry, as it +// can be found in the global DefaultRegistry variable. With NewRegistry, you +// can create a custom registry, or you can even implement the Registerer or +// Gatherer interfaces yourself. The methods Register and Unregister work in +// the same way on a custom registry as the global functions Register and +// Unregister on the default registry. +// +// There are a number of uses for custom registries: You can use registries +// with special properties, see NewPedanticRegistry. You can avoid global state, +// as it is imposed by the DefaultRegistry. You can use multiple registries at +// the same time to expose different metrics in different ways. You can use +// separate registries for testing purposes. +// +// Also note that the DefaultRegistry comes registered with a Collector for Go +// runtime metrics (via NewGoCollector) and a Collector for process metrics (via +// NewProcessCollector). With a custom registry, you are in control and decide +// yourself about the Collectors to register. +// +// HTTP Exposition +// +// The Registry implements the Gatherer interface. The caller of the Gather +// method can then expose the gathered metrics in some way. Usually, the metrics +// are served via HTTP on the /metrics endpoint. That's happening in the example +// above. The tools to expose metrics via HTTP are in the promhttp +// sub-package. (The top-level functions in the prometheus package are +// deprecated.) +// +// Pushing to the Pushgateway +// +// Function for pushing to the Pushgateway can be found in the push sub-package. +// +// Other Means of Exposition +// +// More ways of exposing metrics can easily be added. Sending metrics to +// Graphite would be an example that will soon be implemented. +package prometheus diff --git a/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go new file mode 100644 index 000000000..18a99d5fa --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go @@ -0,0 +1,119 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "encoding/json" + "expvar" +) + +type expvarCollector struct { + exports map[string]*Desc +} + +// NewExpvarCollector returns a newly allocated expvar Collector that still has +// to be registered with a Prometheus registry. +// +// An expvar Collector collects metrics from the expvar interface. It provides a +// quick way to expose numeric values that are already exported via expvar as +// Prometheus metrics. Note that the data models of expvar and Prometheus are +// fundamentally different, and that the expvar Collector is inherently slower +// than native Prometheus metrics. Thus, the expvar Collector is probably great +// for experiments and prototying, but you should seriously consider a more +// direct implementation of Prometheus metrics for monitoring production +// systems. +// +// The exports map has the following meaning: +// +// The keys in the map correspond to expvar keys, i.e. for every expvar key you +// want to export as Prometheus metric, you need an entry in the exports +// map. The descriptor mapped to each key describes how to export the expvar +// value. It defines the name and the help string of the Prometheus metric +// proxying the expvar value. The type will always be Untyped. +// +// For descriptors without variable labels, the expvar value must be a number or +// a bool. The number is then directly exported as the Prometheus sample +// value. (For a bool, 'false' translates to 0 and 'true' to 1). Expvar values +// that are not numbers or bools are silently ignored. +// +// If the descriptor has one variable label, the expvar value must be an expvar +// map. The keys in the expvar map become the various values of the one +// Prometheus label. The values in the expvar map must be numbers or bools again +// as above. +// +// For descriptors with more than one variable label, the expvar must be a +// nested expvar map, i.e. where the values of the topmost map are maps again +// etc. until a depth is reached that corresponds to the number of labels. The +// leaves of that structure must be numbers or bools as above to serve as the +// sample values. +// +// Anything that does not fit into the scheme above is silently ignored. +func NewExpvarCollector(exports map[string]*Desc) Collector { + return &expvarCollector{ + exports: exports, + } +} + +// Describe implements Collector. +func (e *expvarCollector) Describe(ch chan<- *Desc) { + for _, desc := range e.exports { + ch <- desc + } +} + +// Collect implements Collector. +func (e *expvarCollector) Collect(ch chan<- Metric) { + for name, desc := range e.exports { + var m Metric + expVar := expvar.Get(name) + if expVar == nil { + continue + } + var v interface{} + labels := make([]string, len(desc.variableLabels)) + if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil { + ch <- NewInvalidMetric(desc, err) + continue + } + var processValue func(v interface{}, i int) + processValue = func(v interface{}, i int) { + if i >= len(labels) { + copiedLabels := append(make([]string, 0, len(labels)), labels...) + switch v := v.(type) { + case float64: + m = MustNewConstMetric(desc, UntypedValue, v, copiedLabels...) + case bool: + if v { + m = MustNewConstMetric(desc, UntypedValue, 1, copiedLabels...) + } else { + m = MustNewConstMetric(desc, UntypedValue, 0, copiedLabels...) + } + default: + return + } + ch <- m + return + } + vm, ok := v.(map[string]interface{}) + if !ok { + return + } + for lv, val := range vm { + labels[i] = lv + processValue(val, i+1) + } + } + processValue(v, 0) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/fnv.go b/vendor/github.com/prometheus/client_golang/prometheus/fnv.go new file mode 100644 index 000000000..e3b67df8a --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/fnv.go @@ -0,0 +1,29 @@ +package prometheus + +// Inline and byte-free variant of hash/fnv's fnv64a. + +const ( + offset64 = 14695981039346656037 + prime64 = 1099511628211 +) + +// hashNew initializies a new fnv64a hash value. +func hashNew() uint64 { + return offset64 +} + +// hashAdd adds a string to a fnv64a hash value, returning the updated hash. +func hashAdd(h uint64, s string) uint64 { + for i := 0; i < len(s); i++ { + h ^= uint64(s[i]) + h *= prime64 + } + return h +} + +// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash. +func hashAddByte(h uint64, b byte) uint64 { + h ^= uint64(b) + h *= prime64 + return h +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go new file mode 100644 index 000000000..8b70e5141 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go @@ -0,0 +1,140 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +// Gauge is a Metric that represents a single numerical value that can +// arbitrarily go up and down. +// +// A Gauge is typically used for measured values like temperatures or current +// memory usage, but also "counts" that can go up and down, like the number of +// running goroutines. +// +// To create Gauge instances, use NewGauge. +type Gauge interface { + Metric + Collector + + // Set sets the Gauge to an arbitrary value. + Set(float64) + // Inc increments the Gauge by 1. + Inc() + // Dec decrements the Gauge by 1. + Dec() + // Add adds the given value to the Gauge. (The value can be + // negative, resulting in a decrease of the Gauge.) + Add(float64) + // Sub subtracts the given value from the Gauge. (The value can be + // negative, resulting in an increase of the Gauge.) + Sub(float64) +} + +// GaugeOpts is an alias for Opts. See there for doc comments. +type GaugeOpts Opts + +// NewGauge creates a new Gauge based on the provided GaugeOpts. +func NewGauge(opts GaugeOpts) Gauge { + return newValue(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), GaugeValue, 0) +} + +// GaugeVec is a Collector that bundles a set of Gauges that all share the same +// Desc, but have different values for their variable labels. This is used if +// you want to count the same thing partitioned by various dimensions +// (e.g. number of operations queued, partitioned by user and operation +// type). Create instances with NewGaugeVec. +type GaugeVec struct { + *MetricVec +} + +// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and +// partitioned by the given label names. At least one label name must be +// provided. +func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &GaugeVec{ + MetricVec: newMetricVec(desc, func(lvs ...string) Metric { + return newValue(desc, GaugeValue, 0, lvs...) + }), + } +} + +// GetMetricWithLabelValues replaces the method of the same name in +// MetricVec. The difference is that this method returns a Gauge and not a +// Metric so that no type conversion is required. +func (m *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) { + metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Gauge), err + } + return nil, err +} + +// GetMetricWith replaces the method of the same name in MetricVec. The +// difference is that this method returns a Gauge and not a Metric so that no +// type conversion is required. +func (m *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) { + metric, err := m.MetricVec.GetMetricWith(labels) + if metric != nil { + return metric.(Gauge), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. By not returning an +// error, WithLabelValues allows shortcuts like +// myVec.WithLabelValues("404", "GET").Add(42) +func (m *GaugeVec) WithLabelValues(lvs ...string) Gauge { + return m.MetricVec.WithLabelValues(lvs...).(Gauge) +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. By not returning an error, With allows shortcuts like +// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) +func (m *GaugeVec) With(labels Labels) Gauge { + return m.MetricVec.With(labels).(Gauge) +} + +// GaugeFunc is a Gauge whose value is determined at collect time by calling a +// provided function. +// +// To create GaugeFunc instances, use NewGaugeFunc. +type GaugeFunc interface { + Metric + Collector +} + +// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The +// value reported is determined by calling the given function from within the +// Write method. Take into account that metric collection may happen +// concurrently. If that results in concurrent calls to Write, like in the case +// where a GaugeFunc is directly registered with Prometheus, the provided +// function must be concurrency-safe. +func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc { + return newValueFunc(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), GaugeValue, function) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go new file mode 100644 index 000000000..abc9d4ec4 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go @@ -0,0 +1,263 @@ +package prometheus + +import ( + "fmt" + "runtime" + "runtime/debug" + "time" +) + +type goCollector struct { + goroutines Gauge + gcDesc *Desc + + // metrics to describe and collect + metrics memStatsMetrics +} + +// NewGoCollector returns a collector which exports metrics about the current +// go process. +func NewGoCollector() Collector { + return &goCollector{ + goroutines: NewGauge(GaugeOpts{ + Namespace: "go", + Name: "goroutines", + Help: "Number of goroutines that currently exist.", + }), + gcDesc: NewDesc( + "go_gc_duration_seconds", + "A summary of the GC invocation durations.", + nil, nil), + metrics: memStatsMetrics{ + { + desc: NewDesc( + memstatNamespace("alloc_bytes"), + "Number of bytes allocated and still in use.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("alloc_bytes_total"), + "Total number of bytes allocated, even if freed.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) }, + valType: CounterValue, + }, { + desc: NewDesc( + memstatNamespace("sys_bytes"), + "Number of bytes obtained by system. Sum of all system allocations.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("lookups_total"), + "Total number of pointer lookups.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) }, + valType: CounterValue, + }, { + desc: NewDesc( + memstatNamespace("mallocs_total"), + "Total number of mallocs.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) }, + valType: CounterValue, + }, { + desc: NewDesc( + memstatNamespace("frees_total"), + "Total number of frees.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) }, + valType: CounterValue, + }, { + desc: NewDesc( + memstatNamespace("heap_alloc_bytes"), + "Number of heap bytes allocated and still in use.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_sys_bytes"), + "Number of heap bytes obtained from system.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_idle_bytes"), + "Number of heap bytes waiting to be used.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_inuse_bytes"), + "Number of heap bytes that are in use.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_released_bytes_total"), + "Total number of heap bytes released to OS.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) }, + valType: CounterValue, + }, { + desc: NewDesc( + memstatNamespace("heap_objects"), + "Number of allocated objects.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("stack_inuse_bytes"), + "Number of bytes in use by the stack allocator.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("stack_sys_bytes"), + "Number of bytes obtained from system for stack allocator.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("mspan_inuse_bytes"), + "Number of bytes in use by mspan structures.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("mspan_sys_bytes"), + "Number of bytes used for mspan structures obtained from system.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("mcache_inuse_bytes"), + "Number of bytes in use by mcache structures.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("mcache_sys_bytes"), + "Number of bytes used for mcache structures obtained from system.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("buck_hash_sys_bytes"), + "Number of bytes used by the profiling bucket hash table.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("gc_sys_bytes"), + "Number of bytes used for garbage collection system metadata.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("other_sys_bytes"), + "Number of bytes used for other system allocations.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("next_gc_bytes"), + "Number of heap bytes when next garbage collection will take place.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("last_gc_time_seconds"), + "Number of seconds since 1970 of last garbage collection.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC) / 1e9 }, + valType: GaugeValue, + }, + }, + } +} + +func memstatNamespace(s string) string { + return fmt.Sprintf("go_memstats_%s", s) +} + +// Describe returns all descriptions of the collector. +func (c *goCollector) Describe(ch chan<- *Desc) { + ch <- c.goroutines.Desc() + ch <- c.gcDesc + + for _, i := range c.metrics { + ch <- i.desc + } +} + +// Collect returns the current state of all metrics of the collector. +func (c *goCollector) Collect(ch chan<- Metric) { + c.goroutines.Set(float64(runtime.NumGoroutine())) + ch <- c.goroutines + + var stats debug.GCStats + stats.PauseQuantiles = make([]time.Duration, 5) + debug.ReadGCStats(&stats) + + quantiles := make(map[float64]float64) + for idx, pq := range stats.PauseQuantiles[1:] { + quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds() + } + quantiles[0.0] = stats.PauseQuantiles[0].Seconds() + ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), float64(stats.PauseTotal.Seconds()), quantiles) + + ms := &runtime.MemStats{} + runtime.ReadMemStats(ms) + for _, i := range c.metrics { + ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms)) + } +} + +// memStatsMetrics provide description, value, and value type for memstat metrics. +type memStatsMetrics []struct { + desc *Desc + eval func(*runtime.MemStats) float64 + valType ValueType +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go new file mode 100644 index 000000000..9719e8fac --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -0,0 +1,444 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "math" + "sort" + "sync/atomic" + + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" +) + +// A Histogram counts individual observations from an event or sample stream in +// configurable buckets. Similar to a summary, it also provides a sum of +// observations and an observation count. +// +// On the Prometheus server, quantiles can be calculated from a Histogram using +// the histogram_quantile function in the query language. +// +// Note that Histograms, in contrast to Summaries, can be aggregated with the +// Prometheus query language (see the documentation for detailed +// procedures). However, Histograms require the user to pre-define suitable +// buckets, and they are in general less accurate. The Observe method of a +// Histogram has a very low performance overhead in comparison with the Observe +// method of a Summary. +// +// To create Histogram instances, use NewHistogram. +type Histogram interface { + Metric + Collector + + // Observe adds a single observation to the histogram. + Observe(float64) +} + +// bucketLabel is used for the label that defines the upper bound of a +// bucket of a histogram ("le" -> "less or equal"). +const bucketLabel = "le" + +// DefBuckets are the default Histogram buckets. The default buckets are +// tailored to broadly measure the response time (in seconds) of a network +// service. Most likely, however, you will be required to define buckets +// customized to your use case. +var ( + DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10} + + errBucketLabelNotAllowed = fmt.Errorf( + "%q is not allowed as label name in histograms", bucketLabel, + ) +) + +// LinearBuckets creates 'count' buckets, each 'width' wide, where the lowest +// bucket has an upper bound of 'start'. The final +Inf bucket is not counted +// and not included in the returned slice. The returned slice is meant to be +// used for the Buckets field of HistogramOpts. +// +// The function panics if 'count' is zero or negative. +func LinearBuckets(start, width float64, count int) []float64 { + if count < 1 { + panic("LinearBuckets needs a positive count") + } + buckets := make([]float64, count) + for i := range buckets { + buckets[i] = start + start += width + } + return buckets +} + +// ExponentialBuckets creates 'count' buckets, where the lowest bucket has an +// upper bound of 'start' and each following bucket's upper bound is 'factor' +// times the previous bucket's upper bound. The final +Inf bucket is not counted +// and not included in the returned slice. The returned slice is meant to be +// used for the Buckets field of HistogramOpts. +// +// The function panics if 'count' is 0 or negative, if 'start' is 0 or negative, +// or if 'factor' is less than or equal 1. +func ExponentialBuckets(start, factor float64, count int) []float64 { + if count < 1 { + panic("ExponentialBuckets needs a positive count") + } + if start <= 0 { + panic("ExponentialBuckets needs a positive start value") + } + if factor <= 1 { + panic("ExponentialBuckets needs a factor greater than 1") + } + buckets := make([]float64, count) + for i := range buckets { + buckets[i] = start + start *= factor + } + return buckets +} + +// HistogramOpts bundles the options for creating a Histogram metric. It is +// mandatory to set Name and Help to a non-empty string. All other fields are +// optional and can safely be left at their zero value. +type HistogramOpts struct { + // Namespace, Subsystem, and Name are components of the fully-qualified + // name of the Histogram (created by joining these components with + // "_"). Only Name is mandatory, the others merely help structuring the + // name. Note that the fully-qualified name of the Histogram must be a + // valid Prometheus metric name. + Namespace string + Subsystem string + Name string + + // Help provides information about this Histogram. Mandatory! + // + // Metrics with the same fully-qualified name must have the same Help + // string. + Help string + + // ConstLabels are used to attach fixed labels to this + // Histogram. Histograms with the same fully-qualified name must have the + // same label names in their ConstLabels. + // + // Note that in most cases, labels have a value that varies during the + // lifetime of a process. Those labels are usually managed with a + // HistogramVec. ConstLabels serve only special purposes. One is for the + // special case where the value of a label does not change during the + // lifetime of a process, e.g. if the revision of the running binary is + // put into a label. Another, more advanced purpose is if more than one + // Collector needs to collect Histograms with the same fully-qualified + // name. In that case, those Summaries must differ in the values of + // their ConstLabels. See the Collector examples. + // + // If the value of a label never changes (not even between binaries), + // that label most likely should not be a label at all (but part of the + // metric name). + ConstLabels Labels + + // Buckets defines the buckets into which observations are counted. Each + // element in the slice is the upper inclusive bound of a bucket. The + // values must be sorted in strictly increasing order. There is no need + // to add a highest bucket with +Inf bound, it will be added + // implicitly. The default value is DefBuckets. + Buckets []float64 +} + +// NewHistogram creates a new Histogram based on the provided HistogramOpts. It +// panics if the buckets in HistogramOpts are not in strictly increasing order. +func NewHistogram(opts HistogramOpts) Histogram { + return newHistogram( + NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), + opts, + ) +} + +func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram { + if len(desc.variableLabels) != len(labelValues) { + panic(errInconsistentCardinality) + } + + for _, n := range desc.variableLabels { + if n == bucketLabel { + panic(errBucketLabelNotAllowed) + } + } + for _, lp := range desc.constLabelPairs { + if lp.GetName() == bucketLabel { + panic(errBucketLabelNotAllowed) + } + } + + if len(opts.Buckets) == 0 { + opts.Buckets = DefBuckets + } + + h := &histogram{ + desc: desc, + upperBounds: opts.Buckets, + labelPairs: makeLabelPairs(desc, labelValues), + } + for i, upperBound := range h.upperBounds { + if i < len(h.upperBounds)-1 { + if upperBound >= h.upperBounds[i+1] { + panic(fmt.Errorf( + "histogram buckets must be in increasing order: %f >= %f", + upperBound, h.upperBounds[i+1], + )) + } + } else { + if math.IsInf(upperBound, +1) { + // The +Inf bucket is implicit. Remove it here. + h.upperBounds = h.upperBounds[:i] + } + } + } + // Finally we know the final length of h.upperBounds and can make counts. + h.counts = make([]uint64, len(h.upperBounds)) + + h.init(h) // Init self-collection. + return h +} + +type histogram struct { + // sumBits contains the bits of the float64 representing the sum of all + // observations. sumBits and count have to go first in the struct to + // guarantee alignment for atomic operations. + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG + sumBits uint64 + count uint64 + + selfCollector + // Note that there is no mutex required. + + desc *Desc + + upperBounds []float64 + counts []uint64 + + labelPairs []*dto.LabelPair +} + +func (h *histogram) Desc() *Desc { + return h.desc +} + +func (h *histogram) Observe(v float64) { + // TODO(beorn7): For small numbers of buckets (<30), a linear search is + // slightly faster than the binary search. If we really care, we could + // switch from one search strategy to the other depending on the number + // of buckets. + // + // Microbenchmarks (BenchmarkHistogramNoLabels): + // 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op + // 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op + // 300 buckets: 154 ns/op linear - binary 61.6 ns/op + i := sort.SearchFloat64s(h.upperBounds, v) + if i < len(h.counts) { + atomic.AddUint64(&h.counts[i], 1) + } + atomic.AddUint64(&h.count, 1) + for { + oldBits := atomic.LoadUint64(&h.sumBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + v) + if atomic.CompareAndSwapUint64(&h.sumBits, oldBits, newBits) { + break + } + } +} + +func (h *histogram) Write(out *dto.Metric) error { + his := &dto.Histogram{} + buckets := make([]*dto.Bucket, len(h.upperBounds)) + + his.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&h.sumBits))) + his.SampleCount = proto.Uint64(atomic.LoadUint64(&h.count)) + var count uint64 + for i, upperBound := range h.upperBounds { + count += atomic.LoadUint64(&h.counts[i]) + buckets[i] = &dto.Bucket{ + CumulativeCount: proto.Uint64(count), + UpperBound: proto.Float64(upperBound), + } + } + his.Bucket = buckets + out.Histogram = his + out.Label = h.labelPairs + return nil +} + +// HistogramVec is a Collector that bundles a set of Histograms that all share the +// same Desc, but have different values for their variable labels. This is used +// if you want to count the same thing partitioned by various dimensions +// (e.g. HTTP request latencies, partitioned by status code and method). Create +// instances with NewHistogramVec. +type HistogramVec struct { + *MetricVec +} + +// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and +// partitioned by the given label names. At least one label name must be +// provided. +func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &HistogramVec{ + MetricVec: newMetricVec(desc, func(lvs ...string) Metric { + return newHistogram(desc, opts, lvs...) + }), + } +} + +// GetMetricWithLabelValues replaces the method of the same name in +// MetricVec. The difference is that this method returns a Histogram and not a +// Metric so that no type conversion is required. +func (m *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Histogram, error) { + metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Histogram), err + } + return nil, err +} + +// GetMetricWith replaces the method of the same name in MetricVec. The +// difference is that this method returns a Histogram and not a Metric so that no +// type conversion is required. +func (m *HistogramVec) GetMetricWith(labels Labels) (Histogram, error) { + metric, err := m.MetricVec.GetMetricWith(labels) + if metric != nil { + return metric.(Histogram), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. By not returning an +// error, WithLabelValues allows shortcuts like +// myVec.WithLabelValues("404", "GET").Observe(42.21) +func (m *HistogramVec) WithLabelValues(lvs ...string) Histogram { + return m.MetricVec.WithLabelValues(lvs...).(Histogram) +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. By not returning an error, With allows shortcuts like +// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21) +func (m *HistogramVec) With(labels Labels) Histogram { + return m.MetricVec.With(labels).(Histogram) +} + +type constHistogram struct { + desc *Desc + count uint64 + sum float64 + buckets map[float64]uint64 + labelPairs []*dto.LabelPair +} + +func (h *constHistogram) Desc() *Desc { + return h.desc +} + +func (h *constHistogram) Write(out *dto.Metric) error { + his := &dto.Histogram{} + buckets := make([]*dto.Bucket, 0, len(h.buckets)) + + his.SampleCount = proto.Uint64(h.count) + his.SampleSum = proto.Float64(h.sum) + + for upperBound, count := range h.buckets { + buckets = append(buckets, &dto.Bucket{ + CumulativeCount: proto.Uint64(count), + UpperBound: proto.Float64(upperBound), + }) + } + + if len(buckets) > 0 { + sort.Sort(buckSort(buckets)) + } + his.Bucket = buckets + + out.Histogram = his + out.Label = h.labelPairs + + return nil +} + +// NewConstHistogram returns a metric representing a Prometheus histogram with +// fixed values for the count, sum, and bucket counts. As those parameters +// cannot be changed, the returned value does not implement the Histogram +// interface (but only the Metric interface). Users of this package will not +// have much use for it in regular operations. However, when implementing custom +// Collectors, it is useful as a throw-away metric that is generated on the fly +// to send it to Prometheus in the Collect method. +// +// buckets is a map of upper bounds to cumulative counts, excluding the +Inf +// bucket. +// +// NewConstHistogram returns an error if the length of labelValues is not +// consistent with the variable labels in Desc. +func NewConstHistogram( + desc *Desc, + count uint64, + sum float64, + buckets map[float64]uint64, + labelValues ...string, +) (Metric, error) { + if len(desc.variableLabels) != len(labelValues) { + return nil, errInconsistentCardinality + } + return &constHistogram{ + desc: desc, + count: count, + sum: sum, + buckets: buckets, + labelPairs: makeLabelPairs(desc, labelValues), + }, nil +} + +// MustNewConstHistogram is a version of NewConstHistogram that panics where +// NewConstMetric would have returned an error. +func MustNewConstHistogram( + desc *Desc, + count uint64, + sum float64, + buckets map[float64]uint64, + labelValues ...string, +) Metric { + m, err := NewConstHistogram(desc, count, sum, buckets, labelValues...) + if err != nil { + panic(err) + } + return m +} + +type buckSort []*dto.Bucket + +func (s buckSort) Len() int { + return len(s) +} + +func (s buckSort) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s buckSort) Less(i, j int) bool { + return s[i].GetUpperBound() < s[j].GetUpperBound() +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/http.go b/vendor/github.com/prometheus/client_golang/prometheus/http.go new file mode 100644 index 000000000..67ee5ac79 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/http.go @@ -0,0 +1,490 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "bufio" + "bytes" + "compress/gzip" + "fmt" + "io" + "net" + "net/http" + "strconv" + "strings" + "sync" + "time" + + "github.com/prometheus/common/expfmt" +) + +// TODO(beorn7): Remove this whole file. It is a partial mirror of +// promhttp/http.go (to avoid circular import chains) where everything HTTP +// related should live. The functions here are just for avoiding +// breakage. Everything is deprecated. + +const ( + contentTypeHeader = "Content-Type" + contentLengthHeader = "Content-Length" + contentEncodingHeader = "Content-Encoding" + acceptEncodingHeader = "Accept-Encoding" +) + +var bufPool sync.Pool + +func getBuf() *bytes.Buffer { + buf := bufPool.Get() + if buf == nil { + return &bytes.Buffer{} + } + return buf.(*bytes.Buffer) +} + +func giveBuf(buf *bytes.Buffer) { + buf.Reset() + bufPool.Put(buf) +} + +// Handler returns an HTTP handler for the DefaultGatherer. It is +// already instrumented with InstrumentHandler (using "prometheus" as handler +// name). +// +// Deprecated: Please note the issues described in the doc comment of +// InstrumentHandler. You might want to consider using promhttp.Handler instead +// (which is non instrumented). +func Handler() http.Handler { + return InstrumentHandler("prometheus", UninstrumentedHandler()) +} + +// UninstrumentedHandler returns an HTTP handler for the DefaultGatherer. +// +// Deprecated: Use promhttp.Handler instead. See there for further documentation. +func UninstrumentedHandler() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + mfs, err := DefaultGatherer.Gather() + if err != nil { + http.Error(w, "An error has occurred during metrics collection:\n\n"+err.Error(), http.StatusInternalServerError) + return + } + + contentType := expfmt.Negotiate(req.Header) + buf := getBuf() + defer giveBuf(buf) + writer, encoding := decorateWriter(req, buf) + enc := expfmt.NewEncoder(writer, contentType) + var lastErr error + for _, mf := range mfs { + if err := enc.Encode(mf); err != nil { + lastErr = err + http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError) + return + } + } + if closer, ok := writer.(io.Closer); ok { + closer.Close() + } + if lastErr != nil && buf.Len() == 0 { + http.Error(w, "No metrics encoded, last error:\n\n"+err.Error(), http.StatusInternalServerError) + return + } + header := w.Header() + header.Set(contentTypeHeader, string(contentType)) + header.Set(contentLengthHeader, fmt.Sprint(buf.Len())) + if encoding != "" { + header.Set(contentEncodingHeader, encoding) + } + w.Write(buf.Bytes()) + }) +} + +// decorateWriter wraps a writer to handle gzip compression if requested. It +// returns the decorated writer and the appropriate "Content-Encoding" header +// (which is empty if no compression is enabled). +func decorateWriter(request *http.Request, writer io.Writer) (io.Writer, string) { + header := request.Header.Get(acceptEncodingHeader) + parts := strings.Split(header, ",") + for _, part := range parts { + part := strings.TrimSpace(part) + if part == "gzip" || strings.HasPrefix(part, "gzip;") { + return gzip.NewWriter(writer), "gzip" + } + } + return writer, "" +} + +var instLabels = []string{"method", "code"} + +type nower interface { + Now() time.Time +} + +type nowFunc func() time.Time + +func (n nowFunc) Now() time.Time { + return n() +} + +var now nower = nowFunc(func() time.Time { + return time.Now() +}) + +func nowSeries(t ...time.Time) nower { + return nowFunc(func() time.Time { + defer func() { + t = t[1:] + }() + + return t[0] + }) +} + +// InstrumentHandler wraps the given HTTP handler for instrumentation. It +// registers four metric collectors (if not already done) and reports HTTP +// metrics to the (newly or already) registered collectors: http_requests_total +// (CounterVec), http_request_duration_microseconds (Summary), +// http_request_size_bytes (Summary), http_response_size_bytes (Summary). Each +// has a constant label named "handler" with the provided handlerName as +// value. http_requests_total is a metric vector partitioned by HTTP method +// (label name "method") and HTTP status code (label name "code"). +// +// Deprecated: InstrumentHandler has several issues: +// +// - It uses Summaries rather than Histograms. Summaries are not useful if +// aggregation across multiple instances is required. +// +// - It uses microseconds as unit, which is deprecated and should be replaced by +// seconds. +// +// - The size of the request is calculated in a separate goroutine. Since this +// calculator requires access to the request header, it creates a race with +// any writes to the header performed during request handling. +// httputil.ReverseProxy is a prominent example for a handler +// performing such writes. +// +// Upcoming versions of this package will provide ways of instrumenting HTTP +// handlers that are more flexible and have fewer issues. Please prefer direct +// instrumentation in the meantime. +func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc { + return InstrumentHandlerFunc(handlerName, handler.ServeHTTP) +} + +// InstrumentHandlerFunc wraps the given function for instrumentation. It +// otherwise works in the same way as InstrumentHandler (and shares the same +// issues). +// +// Deprecated: InstrumentHandlerFunc is deprecated for the same reasons as +// InstrumentHandler is. +func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc { + return InstrumentHandlerFuncWithOpts( + SummaryOpts{ + Subsystem: "http", + ConstLabels: Labels{"handler": handlerName}, + }, + handlerFunc, + ) +} + +// InstrumentHandlerWithOpts works like InstrumentHandler (and shares the same +// issues) but provides more flexibility (at the cost of a more complex call +// syntax). As InstrumentHandler, this function registers four metric +// collectors, but it uses the provided SummaryOpts to create them. However, the +// fields "Name" and "Help" in the SummaryOpts are ignored. "Name" is replaced +// by "requests_total", "request_duration_microseconds", "request_size_bytes", +// and "response_size_bytes", respectively. "Help" is replaced by an appropriate +// help string. The names of the variable labels of the http_requests_total +// CounterVec are "method" (get, post, etc.), and "code" (HTTP status code). +// +// If InstrumentHandlerWithOpts is called as follows, it mimics exactly the +// behavior of InstrumentHandler: +// +// prometheus.InstrumentHandlerWithOpts( +// prometheus.SummaryOpts{ +// Subsystem: "http", +// ConstLabels: prometheus.Labels{"handler": handlerName}, +// }, +// handler, +// ) +// +// Technical detail: "requests_total" is a CounterVec, not a SummaryVec, so it +// cannot use SummaryOpts. Instead, a CounterOpts struct is created internally, +// and all its fields are set to the equally named fields in the provided +// SummaryOpts. +// +// Deprecated: InstrumentHandlerWithOpts is deprecated for the same reasons as +// InstrumentHandler is. +func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc { + return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP) +} + +// InstrumentHandlerFuncWithOpts works like InstrumentHandlerFunc (and shares +// the same issues) but provides more flexibility (at the cost of a more complex +// call syntax). See InstrumentHandlerWithOpts for details how the provided +// SummaryOpts are used. +// +// Deprecated: InstrumentHandlerFuncWithOpts is deprecated for the same reasons +// as InstrumentHandler is. +func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc { + reqCnt := NewCounterVec( + CounterOpts{ + Namespace: opts.Namespace, + Subsystem: opts.Subsystem, + Name: "requests_total", + Help: "Total number of HTTP requests made.", + ConstLabels: opts.ConstLabels, + }, + instLabels, + ) + + opts.Name = "request_duration_microseconds" + opts.Help = "The HTTP request latencies in microseconds." + reqDur := NewSummary(opts) + + opts.Name = "request_size_bytes" + opts.Help = "The HTTP request sizes in bytes." + reqSz := NewSummary(opts) + + opts.Name = "response_size_bytes" + opts.Help = "The HTTP response sizes in bytes." + resSz := NewSummary(opts) + + regReqCnt := MustRegisterOrGet(reqCnt).(*CounterVec) + regReqDur := MustRegisterOrGet(reqDur).(Summary) + regReqSz := MustRegisterOrGet(reqSz).(Summary) + regResSz := MustRegisterOrGet(resSz).(Summary) + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + now := time.Now() + + delegate := &responseWriterDelegator{ResponseWriter: w} + out := make(chan int) + urlLen := 0 + if r.URL != nil { + urlLen = len(r.URL.String()) + } + go computeApproximateRequestSize(r, out, urlLen) + + _, cn := w.(http.CloseNotifier) + _, fl := w.(http.Flusher) + _, hj := w.(http.Hijacker) + _, rf := w.(io.ReaderFrom) + var rw http.ResponseWriter + if cn && fl && hj && rf { + rw = &fancyResponseWriterDelegator{delegate} + } else { + rw = delegate + } + handlerFunc(rw, r) + + elapsed := float64(time.Since(now)) / float64(time.Microsecond) + + method := sanitizeMethod(r.Method) + code := sanitizeCode(delegate.status) + regReqCnt.WithLabelValues(method, code).Inc() + regReqDur.Observe(elapsed) + regResSz.Observe(float64(delegate.written)) + regReqSz.Observe(float64(<-out)) + }) +} + +func computeApproximateRequestSize(r *http.Request, out chan int, s int) { + s += len(r.Method) + s += len(r.Proto) + for name, values := range r.Header { + s += len(name) + for _, value := range values { + s += len(value) + } + } + s += len(r.Host) + + // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL. + + if r.ContentLength != -1 { + s += int(r.ContentLength) + } + out <- s +} + +type responseWriterDelegator struct { + http.ResponseWriter + + handler, method string + status int + written int64 + wroteHeader bool +} + +func (r *responseWriterDelegator) WriteHeader(code int) { + r.status = code + r.wroteHeader = true + r.ResponseWriter.WriteHeader(code) +} + +func (r *responseWriterDelegator) Write(b []byte) (int, error) { + if !r.wroteHeader { + r.WriteHeader(http.StatusOK) + } + n, err := r.ResponseWriter.Write(b) + r.written += int64(n) + return n, err +} + +type fancyResponseWriterDelegator struct { + *responseWriterDelegator +} + +func (f *fancyResponseWriterDelegator) CloseNotify() <-chan bool { + return f.ResponseWriter.(http.CloseNotifier).CloseNotify() +} + +func (f *fancyResponseWriterDelegator) Flush() { + f.ResponseWriter.(http.Flusher).Flush() +} + +func (f *fancyResponseWriterDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) { + return f.ResponseWriter.(http.Hijacker).Hijack() +} + +func (f *fancyResponseWriterDelegator) ReadFrom(r io.Reader) (int64, error) { + if !f.wroteHeader { + f.WriteHeader(http.StatusOK) + } + n, err := f.ResponseWriter.(io.ReaderFrom).ReadFrom(r) + f.written += n + return n, err +} + +func sanitizeMethod(m string) string { + switch m { + case "GET", "get": + return "get" + case "PUT", "put": + return "put" + case "HEAD", "head": + return "head" + case "POST", "post": + return "post" + case "DELETE", "delete": + return "delete" + case "CONNECT", "connect": + return "connect" + case "OPTIONS", "options": + return "options" + case "NOTIFY", "notify": + return "notify" + default: + return strings.ToLower(m) + } +} + +func sanitizeCode(s int) string { + switch s { + case 100: + return "100" + case 101: + return "101" + + case 200: + return "200" + case 201: + return "201" + case 202: + return "202" + case 203: + return "203" + case 204: + return "204" + case 205: + return "205" + case 206: + return "206" + + case 300: + return "300" + case 301: + return "301" + case 302: + return "302" + case 304: + return "304" + case 305: + return "305" + case 307: + return "307" + + case 400: + return "400" + case 401: + return "401" + case 402: + return "402" + case 403: + return "403" + case 404: + return "404" + case 405: + return "405" + case 406: + return "406" + case 407: + return "407" + case 408: + return "408" + case 409: + return "409" + case 410: + return "410" + case 411: + return "411" + case 412: + return "412" + case 413: + return "413" + case 414: + return "414" + case 415: + return "415" + case 416: + return "416" + case 417: + return "417" + case 418: + return "418" + + case 500: + return "500" + case 501: + return "501" + case 502: + return "502" + case 503: + return "503" + case 504: + return "504" + case 505: + return "505" + + case 428: + return "428" + case 429: + return "429" + case 431: + return "431" + case 511: + return "511" + + default: + return strconv.Itoa(s) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go new file mode 100644 index 000000000..d4063d98f --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go @@ -0,0 +1,166 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "strings" + + dto "github.com/prometheus/client_model/go" +) + +const separatorByte byte = 255 + +// A Metric models a single sample value with its meta data being exported to +// Prometheus. Implementations of Metric in this package are Gauge, Counter, +// Histogram, Summary, and Untyped. +type Metric interface { + // Desc returns the descriptor for the Metric. This method idempotently + // returns the same descriptor throughout the lifetime of the + // Metric. The returned descriptor is immutable by contract. A Metric + // unable to describe itself must return an invalid descriptor (created + // with NewInvalidDesc). + Desc() *Desc + // Write encodes the Metric into a "Metric" Protocol Buffer data + // transmission object. + // + // Metric implementations must observe concurrency safety as reads of + // this metric may occur at any time, and any blocking occurs at the + // expense of total performance of rendering all registered + // metrics. Ideally, Metric implementations should support concurrent + // readers. + // + // While populating dto.Metric, it is the responsibility of the + // implementation to ensure validity of the Metric protobuf (like valid + // UTF-8 strings or syntactically valid metric and label names). It is + // recommended to sort labels lexicographically. (Implementers may find + // LabelPairSorter useful for that.) Callers of Write should still make + // sure of sorting if they depend on it. + Write(*dto.Metric) error + // TODO(beorn7): The original rationale of passing in a pre-allocated + // dto.Metric protobuf to save allocations has disappeared. The + // signature of this method should be changed to "Write() (*dto.Metric, + // error)". +} + +// Opts bundles the options for creating most Metric types. Each metric +// implementation XXX has its own XXXOpts type, but in most cases, it is just be +// an alias of this type (which might change when the requirement arises.) +// +// It is mandatory to set Name and Help to a non-empty string. All other fields +// are optional and can safely be left at their zero value. +type Opts struct { + // Namespace, Subsystem, and Name are components of the fully-qualified + // name of the Metric (created by joining these components with + // "_"). Only Name is mandatory, the others merely help structuring the + // name. Note that the fully-qualified name of the metric must be a + // valid Prometheus metric name. + Namespace string + Subsystem string + Name string + + // Help provides information about this metric. Mandatory! + // + // Metrics with the same fully-qualified name must have the same Help + // string. + Help string + + // ConstLabels are used to attach fixed labels to this metric. Metrics + // with the same fully-qualified name must have the same label names in + // their ConstLabels. + // + // Note that in most cases, labels have a value that varies during the + // lifetime of a process. Those labels are usually managed with a metric + // vector collector (like CounterVec, GaugeVec, UntypedVec). ConstLabels + // serve only special purposes. One is for the special case where the + // value of a label does not change during the lifetime of a process, + // e.g. if the revision of the running binary is put into a + // label. Another, more advanced purpose is if more than one Collector + // needs to collect Metrics with the same fully-qualified name. In that + // case, those Metrics must differ in the values of their + // ConstLabels. See the Collector examples. + // + // If the value of a label never changes (not even between binaries), + // that label most likely should not be a label at all (but part of the + // metric name). + ConstLabels Labels +} + +// BuildFQName joins the given three name components by "_". Empty name +// components are ignored. If the name parameter itself is empty, an empty +// string is returned, no matter what. Metric implementations included in this +// library use this function internally to generate the fully-qualified metric +// name from the name component in their Opts. Users of the library will only +// need this function if they implement their own Metric or instantiate a Desc +// (with NewDesc) directly. +func BuildFQName(namespace, subsystem, name string) string { + if name == "" { + return "" + } + switch { + case namespace != "" && subsystem != "": + return strings.Join([]string{namespace, subsystem, name}, "_") + case namespace != "": + return strings.Join([]string{namespace, name}, "_") + case subsystem != "": + return strings.Join([]string{subsystem, name}, "_") + } + return name +} + +// LabelPairSorter implements sort.Interface. It is used to sort a slice of +// dto.LabelPair pointers. This is useful for implementing the Write method of +// custom metrics. +type LabelPairSorter []*dto.LabelPair + +func (s LabelPairSorter) Len() int { + return len(s) +} + +func (s LabelPairSorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s LabelPairSorter) Less(i, j int) bool { + return s[i].GetName() < s[j].GetName() +} + +type hashSorter []uint64 + +func (s hashSorter) Len() int { + return len(s) +} + +func (s hashSorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s hashSorter) Less(i, j int) bool { + return s[i] < s[j] +} + +type invalidMetric struct { + desc *Desc + err error +} + +// NewInvalidMetric returns a metric whose Write method always returns the +// provided error. It is useful if a Collector finds itself unable to collect +// a metric and wishes to report an error to the registry. +func NewInvalidMetric(desc *Desc, err error) Metric { + return &invalidMetric{desc, err} +} + +func (m *invalidMetric) Desc() *Desc { return m.desc } + +func (m *invalidMetric) Write(*dto.Metric) error { return m.err } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go new file mode 100644 index 000000000..e31e62e78 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go @@ -0,0 +1,142 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import "github.com/prometheus/procfs" + +type processCollector struct { + pid int + collectFn func(chan<- Metric) + pidFn func() (int, error) + cpuTotal Counter + openFDs, maxFDs Gauge + vsize, rss Gauge + startTime Gauge +} + +// NewProcessCollector returns a collector which exports the current state of +// process metrics including cpu, memory and file descriptor usage as well as +// the process start time for the given process id under the given namespace. +func NewProcessCollector(pid int, namespace string) Collector { + return NewProcessCollectorPIDFn( + func() (int, error) { return pid, nil }, + namespace, + ) +} + +// NewProcessCollectorPIDFn returns a collector which exports the current state +// of process metrics including cpu, memory and file descriptor usage as well +// as the process start time under the given namespace. The given pidFn is +// called on each collect and is used to determine the process to export +// metrics for. +func NewProcessCollectorPIDFn( + pidFn func() (int, error), + namespace string, +) Collector { + c := processCollector{ + pidFn: pidFn, + collectFn: func(chan<- Metric) {}, + + cpuTotal: NewCounter(CounterOpts{ + Namespace: namespace, + Name: "process_cpu_seconds_total", + Help: "Total user and system CPU time spent in seconds.", + }), + openFDs: NewGauge(GaugeOpts{ + Namespace: namespace, + Name: "process_open_fds", + Help: "Number of open file descriptors.", + }), + maxFDs: NewGauge(GaugeOpts{ + Namespace: namespace, + Name: "process_max_fds", + Help: "Maximum number of open file descriptors.", + }), + vsize: NewGauge(GaugeOpts{ + Namespace: namespace, + Name: "process_virtual_memory_bytes", + Help: "Virtual memory size in bytes.", + }), + rss: NewGauge(GaugeOpts{ + Namespace: namespace, + Name: "process_resident_memory_bytes", + Help: "Resident memory size in bytes.", + }), + startTime: NewGauge(GaugeOpts{ + Namespace: namespace, + Name: "process_start_time_seconds", + Help: "Start time of the process since unix epoch in seconds.", + }), + } + + // Set up process metric collection if supported by the runtime. + if _, err := procfs.NewStat(); err == nil { + c.collectFn = c.processCollect + } + + return &c +} + +// Describe returns all descriptions of the collector. +func (c *processCollector) Describe(ch chan<- *Desc) { + ch <- c.cpuTotal.Desc() + ch <- c.openFDs.Desc() + ch <- c.maxFDs.Desc() + ch <- c.vsize.Desc() + ch <- c.rss.Desc() + ch <- c.startTime.Desc() +} + +// Collect returns the current state of all metrics of the collector. +func (c *processCollector) Collect(ch chan<- Metric) { + c.collectFn(ch) +} + +// TODO(ts): Bring back error reporting by reverting 7faf9e7 as soon as the +// client allows users to configure the error behavior. +func (c *processCollector) processCollect(ch chan<- Metric) { + pid, err := c.pidFn() + if err != nil { + return + } + + p, err := procfs.NewProc(pid) + if err != nil { + return + } + + if stat, err := p.NewStat(); err == nil { + c.cpuTotal.Set(stat.CPUTime()) + ch <- c.cpuTotal + c.vsize.Set(float64(stat.VirtualMemory())) + ch <- c.vsize + c.rss.Set(float64(stat.ResidentMemory())) + ch <- c.rss + + if startTime, err := stat.StartTime(); err == nil { + c.startTime.Set(startTime) + ch <- c.startTime + } + } + + if fds, err := p.FileDescriptorsLen(); err == nil { + c.openFDs.Set(float64(fds)) + ch <- c.openFDs + } + + if limits, err := p.NewLimits(); err == nil { + c.maxFDs.Set(float64(limits.OpenFiles)) + ch <- c.maxFDs + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go new file mode 100644 index 000000000..b6dd5a266 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go @@ -0,0 +1,201 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Copyright (c) 2013, The Prometheus Authors +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be found +// in the LICENSE file. + +// Package promhttp contains functions to create http.Handler instances to +// expose Prometheus metrics via HTTP. In later versions of this package, it +// will also contain tooling to instrument instances of http.Handler and +// http.RoundTripper. +// +// promhttp.Handler acts on the prometheus.DefaultGatherer. With HandlerFor, +// you can create a handler for a custom registry or anything that implements +// the Gatherer interface. It also allows to create handlers that act +// differently on errors or allow to log errors. +package promhttp + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "net/http" + "strings" + "sync" + + "github.com/prometheus/common/expfmt" + + "github.com/prometheus/client_golang/prometheus" +) + +const ( + contentTypeHeader = "Content-Type" + contentLengthHeader = "Content-Length" + contentEncodingHeader = "Content-Encoding" + acceptEncodingHeader = "Accept-Encoding" +) + +var bufPool sync.Pool + +func getBuf() *bytes.Buffer { + buf := bufPool.Get() + if buf == nil { + return &bytes.Buffer{} + } + return buf.(*bytes.Buffer) +} + +func giveBuf(buf *bytes.Buffer) { + buf.Reset() + bufPool.Put(buf) +} + +// Handler returns an HTTP handler for the prometheus.DefaultGatherer. The +// Handler uses the default HandlerOpts, i.e. report the first error as an HTTP +// error, no error logging, and compression if requested by the client. +// +// If you want to create a Handler for the DefaultGatherer with different +// HandlerOpts, create it with HandlerFor with prometheus.DefaultGatherer and +// your desired HandlerOpts. +func Handler() http.Handler { + return HandlerFor(prometheus.DefaultGatherer, HandlerOpts{}) +} + +// HandlerFor returns an http.Handler for the provided Gatherer. The behavior +// of the Handler is defined by the provided HandlerOpts. +func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + mfs, err := reg.Gather() + if err != nil { + if opts.ErrorLog != nil { + opts.ErrorLog.Println("error gathering metrics:", err) + } + switch opts.ErrorHandling { + case PanicOnError: + panic(err) + case ContinueOnError: + if len(mfs) == 0 { + http.Error(w, "No metrics gathered, last error:\n\n"+err.Error(), http.StatusInternalServerError) + return + } + case HTTPErrorOnError: + http.Error(w, "An error has occurred during metrics gathering:\n\n"+err.Error(), http.StatusInternalServerError) + return + } + } + + contentType := expfmt.Negotiate(req.Header) + buf := getBuf() + defer giveBuf(buf) + writer, encoding := decorateWriter(req, buf, opts.DisableCompression) + enc := expfmt.NewEncoder(writer, contentType) + var lastErr error + for _, mf := range mfs { + if err := enc.Encode(mf); err != nil { + lastErr = err + if opts.ErrorLog != nil { + opts.ErrorLog.Println("error encoding metric family:", err) + } + switch opts.ErrorHandling { + case PanicOnError: + panic(err) + case ContinueOnError: + // Handled later. + case HTTPErrorOnError: + http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError) + return + } + } + } + if closer, ok := writer.(io.Closer); ok { + closer.Close() + } + if lastErr != nil && buf.Len() == 0 { + http.Error(w, "No metrics encoded, last error:\n\n"+err.Error(), http.StatusInternalServerError) + return + } + header := w.Header() + header.Set(contentTypeHeader, string(contentType)) + header.Set(contentLengthHeader, fmt.Sprint(buf.Len())) + if encoding != "" { + header.Set(contentEncodingHeader, encoding) + } + w.Write(buf.Bytes()) + // TODO(beorn7): Consider streaming serving of metrics. + }) +} + +// HandlerErrorHandling defines how a Handler serving metrics will handle +// errors. +type HandlerErrorHandling int + +// These constants cause handlers serving metrics to behave as described if +// errors are encountered. +const ( + // Serve an HTTP status code 500 upon the first error + // encountered. Report the error message in the body. + HTTPErrorOnError HandlerErrorHandling = iota + // Ignore errors and try to serve as many metrics as possible. However, + // if no metrics can be served, serve an HTTP status code 500 and the + // last error message in the body. Only use this in deliberate "best + // effort" metrics collection scenarios. It is recommended to at least + // log errors (by providing an ErrorLog in HandlerOpts) to not mask + // errors completely. + ContinueOnError + // Panic upon the first error encountered (useful for "crash only" apps). + PanicOnError +) + +// Logger is the minimal interface HandlerOpts needs for logging. Note that +// log.Logger from the standard library implements this interface, and it is +// easy to implement by custom loggers, if they don't do so already anyway. +type Logger interface { + Println(v ...interface{}) +} + +// HandlerOpts specifies options how to serve metrics via an http.Handler. The +// zero value of HandlerOpts is a reasonable default. +type HandlerOpts struct { + // ErrorLog specifies an optional logger for errors collecting and + // serving metrics. If nil, errors are not logged at all. + ErrorLog Logger + // ErrorHandling defines how errors are handled. Note that errors are + // logged regardless of the configured ErrorHandling provided ErrorLog + // is not nil. + ErrorHandling HandlerErrorHandling + // If DisableCompression is true, the handler will never compress the + // response, even if requested by the client. + DisableCompression bool +} + +// decorateWriter wraps a writer to handle gzip compression if requested. It +// returns the decorated writer and the appropriate "Content-Encoding" header +// (which is empty if no compression is enabled). +func decorateWriter(request *http.Request, writer io.Writer, compressionDisabled bool) (io.Writer, string) { + if compressionDisabled { + return writer, "" + } + header := request.Header.Get(acceptEncodingHeader) + parts := strings.Split(header, ",") + for _, part := range parts { + part := strings.TrimSpace(part) + if part == "gzip" || strings.HasPrefix(part, "gzip;") { + return gzip.NewWriter(writer), "gzip" + } + } + return writer, "" +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go new file mode 100644 index 000000000..32a3986b0 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go @@ -0,0 +1,806 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "bytes" + "errors" + "fmt" + "os" + "sort" + "sync" + + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" +) + +const ( + // Capacity for the channel to collect metrics and descriptors. + capMetricChan = 1000 + capDescChan = 10 +) + +// DefaultRegisterer and DefaultGatherer are the implementations of the +// Registerer and Gatherer interface a number of convenience functions in this +// package act on. Initially, both variables point to the same Registry, which +// has a process collector (see NewProcessCollector) and a Go collector (see +// NewGoCollector) already registered. This approach to keep default instances +// as global state mirrors the approach of other packages in the Go standard +// library. Note that there are caveats. Change the variables with caution and +// only if you understand the consequences. Users who want to avoid global state +// altogether should not use the convenience function and act on custom +// instances instead. +var ( + defaultRegistry = NewRegistry() + DefaultRegisterer Registerer = defaultRegistry + DefaultGatherer Gatherer = defaultRegistry +) + +func init() { + MustRegister(NewProcessCollector(os.Getpid(), "")) + MustRegister(NewGoCollector()) +} + +// NewRegistry creates a new vanilla Registry without any Collectors +// pre-registered. +func NewRegistry() *Registry { + return &Registry{ + collectorsByID: map[uint64]Collector{}, + descIDs: map[uint64]struct{}{}, + dimHashesByName: map[string]uint64{}, + } +} + +// NewPedanticRegistry returns a registry that checks during collection if each +// collected Metric is consistent with its reported Desc, and if the Desc has +// actually been registered with the registry. +// +// Usually, a Registry will be happy as long as the union of all collected +// Metrics is consistent and valid even if some metrics are not consistent with +// their own Desc or a Desc provided by their registered Collector. Well-behaved +// Collectors and Metrics will only provide consistent Descs. This Registry is +// useful to test the implementation of Collectors and Metrics. +func NewPedanticRegistry() *Registry { + r := NewRegistry() + r.pedanticChecksEnabled = true + return r +} + +// Registerer is the interface for the part of a registry in charge of +// registering and unregistering. Users of custom registries should use +// Registerer as type for registration purposes (rather then the Registry type +// directly). In that way, they are free to use custom Registerer implementation +// (e.g. for testing purposes). +type Registerer interface { + // Register registers a new Collector to be included in metrics + // collection. It returns an error if the descriptors provided by the + // Collector are invalid or if they — in combination with descriptors of + // already registered Collectors — do not fulfill the consistency and + // uniqueness criteria described in the documentation of metric.Desc. + // + // If the provided Collector is equal to a Collector already registered + // (which includes the case of re-registering the same Collector), the + // returned error is an instance of AlreadyRegisteredError, which + // contains the previously registered Collector. + // + // It is in general not safe to register the same Collector multiple + // times concurrently. + Register(Collector) error + // MustRegister works like Register but registers any number of + // Collectors and panics upon the first registration that causes an + // error. + MustRegister(...Collector) + // Unregister unregisters the Collector that equals the Collector passed + // in as an argument. (Two Collectors are considered equal if their + // Describe method yields the same set of descriptors.) The function + // returns whether a Collector was unregistered. + // + // Note that even after unregistering, it will not be possible to + // register a new Collector that is inconsistent with the unregistered + // Collector, e.g. a Collector collecting metrics with the same name but + // a different help string. The rationale here is that the same registry + // instance must only collect consistent metrics throughout its + // lifetime. + Unregister(Collector) bool +} + +// Gatherer is the interface for the part of a registry in charge of gathering +// the collected metrics into a number of MetricFamilies. The Gatherer interface +// comes with the same general implication as described for the Registerer +// interface. +type Gatherer interface { + // Gather calls the Collect method of the registered Collectors and then + // gathers the collected metrics into a lexicographically sorted slice + // of MetricFamily protobufs. Even if an error occurs, Gather attempts + // to gather as many metrics as possible. Hence, if a non-nil error is + // returned, the returned MetricFamily slice could be nil (in case of a + // fatal error that prevented any meaningful metric collection) or + // contain a number of MetricFamily protobufs, some of which might be + // incomplete, and some might be missing altogether. The returned error + // (which might be a MultiError) explains the details. In scenarios + // where complete collection is critical, the returned MetricFamily + // protobufs should be disregarded if the returned error is non-nil. + Gather() ([]*dto.MetricFamily, error) +} + +// Register registers the provided Collector with the DefaultRegisterer. +// +// Register is a shortcut for DefaultRegisterer.Register(c). See there for more +// details. +func Register(c Collector) error { + return DefaultRegisterer.Register(c) +} + +// MustRegister registers the provided Collectors with the DefaultRegisterer and +// panics if any error occurs. +// +// MustRegister is a shortcut for DefaultRegisterer.MustRegister(cs...). See +// there for more details. +func MustRegister(cs ...Collector) { + DefaultRegisterer.MustRegister(cs...) +} + +// RegisterOrGet registers the provided Collector with the DefaultRegisterer and +// returns the Collector, unless an equal Collector was registered before, in +// which case that Collector is returned. +// +// Deprecated: RegisterOrGet is merely a convenience function for the +// implementation as described in the documentation for +// AlreadyRegisteredError. As the use case is relatively rare, this function +// will be removed in a future version of this package to clean up the +// namespace. +func RegisterOrGet(c Collector) (Collector, error) { + if err := Register(c); err != nil { + if are, ok := err.(AlreadyRegisteredError); ok { + return are.ExistingCollector, nil + } + return nil, err + } + return c, nil +} + +// MustRegisterOrGet behaves like RegisterOrGet but panics instead of returning +// an error. +// +// Deprecated: This is deprecated for the same reason RegisterOrGet is. See +// there for details. +func MustRegisterOrGet(c Collector) Collector { + c, err := RegisterOrGet(c) + if err != nil { + panic(err) + } + return c +} + +// Unregister removes the registration of the provided Collector from the +// DefaultRegisterer. +// +// Unregister is a shortcut for DefaultRegisterer.Unregister(c). See there for +// more details. +func Unregister(c Collector) bool { + return DefaultRegisterer.Unregister(c) +} + +// GathererFunc turns a function into a Gatherer. +type GathererFunc func() ([]*dto.MetricFamily, error) + +// Gather implements Gatherer. +func (gf GathererFunc) Gather() ([]*dto.MetricFamily, error) { + return gf() +} + +// SetMetricFamilyInjectionHook replaces the DefaultGatherer with one that +// gathers from the previous DefaultGatherers but then merges the MetricFamily +// protobufs returned from the provided hook function with the MetricFamily +// protobufs returned from the original DefaultGatherer. +// +// Deprecated: This function manipulates the DefaultGatherer variable. Consider +// the implications, i.e. don't do this concurrently with any uses of the +// DefaultGatherer. In the rare cases where you need to inject MetricFamily +// protobufs directly, it is recommended to use a custom Registry and combine it +// with a custom Gatherer using the Gatherers type (see +// there). SetMetricFamilyInjectionHook only exists for compatibility reasons +// with previous versions of this package. +func SetMetricFamilyInjectionHook(hook func() []*dto.MetricFamily) { + DefaultGatherer = Gatherers{ + DefaultGatherer, + GathererFunc(func() ([]*dto.MetricFamily, error) { return hook(), nil }), + } +} + +// AlreadyRegisteredError is returned by the Register method if the Collector to +// be registered has already been registered before, or a different Collector +// that collects the same metrics has been registered before. Registration fails +// in that case, but you can detect from the kind of error what has +// happened. The error contains fields for the existing Collector and the +// (rejected) new Collector that equals the existing one. This can be used to +// find out if an equal Collector has been registered before and switch over to +// using the old one, as demonstrated in the example. +type AlreadyRegisteredError struct { + ExistingCollector, NewCollector Collector +} + +func (err AlreadyRegisteredError) Error() string { + return "duplicate metrics collector registration attempted" +} + +// MultiError is a slice of errors implementing the error interface. It is used +// by a Gatherer to report multiple errors during MetricFamily gathering. +type MultiError []error + +func (errs MultiError) Error() string { + if len(errs) == 0 { + return "" + } + buf := &bytes.Buffer{} + fmt.Fprintf(buf, "%d error(s) occurred:", len(errs)) + for _, err := range errs { + fmt.Fprintf(buf, "\n* %s", err) + } + return buf.String() +} + +// MaybeUnwrap returns nil if len(errs) is 0. It returns the first and only +// contained error as error if len(errs is 1). In all other cases, it returns +// the MultiError directly. This is helpful for returning a MultiError in a way +// that only uses the MultiError if needed. +func (errs MultiError) MaybeUnwrap() error { + switch len(errs) { + case 0: + return nil + case 1: + return errs[0] + default: + return errs + } +} + +// Registry registers Prometheus collectors, collects their metrics, and gathers +// them into MetricFamilies for exposition. It implements both Registerer and +// Gatherer. The zero value is not usable. Create instances with NewRegistry or +// NewPedanticRegistry. +type Registry struct { + mtx sync.RWMutex + collectorsByID map[uint64]Collector // ID is a hash of the descIDs. + descIDs map[uint64]struct{} + dimHashesByName map[string]uint64 + pedanticChecksEnabled bool +} + +// Register implements Registerer. +func (r *Registry) Register(c Collector) error { + var ( + descChan = make(chan *Desc, capDescChan) + newDescIDs = map[uint64]struct{}{} + newDimHashesByName = map[string]uint64{} + collectorID uint64 // Just a sum of all desc IDs. + duplicateDescErr error + ) + go func() { + c.Describe(descChan) + close(descChan) + }() + r.mtx.Lock() + defer r.mtx.Unlock() + // Coduct various tests... + for desc := range descChan { + + // Is the descriptor valid at all? + if desc.err != nil { + return fmt.Errorf("descriptor %s is invalid: %s", desc, desc.err) + } + + // Is the descID unique? + // (In other words: Is the fqName + constLabel combination unique?) + if _, exists := r.descIDs[desc.id]; exists { + duplicateDescErr = fmt.Errorf("descriptor %s already exists with the same fully-qualified name and const label values", desc) + } + // If it is not a duplicate desc in this collector, add it to + // the collectorID. (We allow duplicate descs within the same + // collector, but their existence must be a no-op.) + if _, exists := newDescIDs[desc.id]; !exists { + newDescIDs[desc.id] = struct{}{} + collectorID += desc.id + } + + // Are all the label names and the help string consistent with + // previous descriptors of the same name? + // First check existing descriptors... + if dimHash, exists := r.dimHashesByName[desc.fqName]; exists { + if dimHash != desc.dimHash { + return fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc) + } + } else { + // ...then check the new descriptors already seen. + if dimHash, exists := newDimHashesByName[desc.fqName]; exists { + if dimHash != desc.dimHash { + return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc) + } + } else { + newDimHashesByName[desc.fqName] = desc.dimHash + } + } + } + // Did anything happen at all? + if len(newDescIDs) == 0 { + return errors.New("collector has no descriptors") + } + if existing, exists := r.collectorsByID[collectorID]; exists { + return AlreadyRegisteredError{ + ExistingCollector: existing, + NewCollector: c, + } + } + // If the collectorID is new, but at least one of the descs existed + // before, we are in trouble. + if duplicateDescErr != nil { + return duplicateDescErr + } + + // Only after all tests have passed, actually register. + r.collectorsByID[collectorID] = c + for hash := range newDescIDs { + r.descIDs[hash] = struct{}{} + } + for name, dimHash := range newDimHashesByName { + r.dimHashesByName[name] = dimHash + } + return nil +} + +// Unregister implements Registerer. +func (r *Registry) Unregister(c Collector) bool { + var ( + descChan = make(chan *Desc, capDescChan) + descIDs = map[uint64]struct{}{} + collectorID uint64 // Just a sum of the desc IDs. + ) + go func() { + c.Describe(descChan) + close(descChan) + }() + for desc := range descChan { + if _, exists := descIDs[desc.id]; !exists { + collectorID += desc.id + descIDs[desc.id] = struct{}{} + } + } + + r.mtx.RLock() + if _, exists := r.collectorsByID[collectorID]; !exists { + r.mtx.RUnlock() + return false + } + r.mtx.RUnlock() + + r.mtx.Lock() + defer r.mtx.Unlock() + + delete(r.collectorsByID, collectorID) + for id := range descIDs { + delete(r.descIDs, id) + } + // dimHashesByName is left untouched as those must be consistent + // throughout the lifetime of a program. + return true +} + +// MustRegister implements Registerer. +func (r *Registry) MustRegister(cs ...Collector) { + for _, c := range cs { + if err := r.Register(c); err != nil { + panic(err) + } + } +} + +// Gather implements Gatherer. +func (r *Registry) Gather() ([]*dto.MetricFamily, error) { + var ( + metricChan = make(chan Metric, capMetricChan) + metricHashes = map[uint64]struct{}{} + dimHashes = map[string]uint64{} + wg sync.WaitGroup + errs MultiError // The collected errors to return in the end. + registeredDescIDs map[uint64]struct{} // Only used for pedantic checks + ) + + r.mtx.RLock() + metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName)) + + // Scatter. + // (Collectors could be complex and slow, so we call them all at once.) + wg.Add(len(r.collectorsByID)) + go func() { + wg.Wait() + close(metricChan) + }() + for _, collector := range r.collectorsByID { + go func(collector Collector) { + defer wg.Done() + collector.Collect(metricChan) + }(collector) + } + + // In case pedantic checks are enabled, we have to copy the map before + // giving up the RLock. + if r.pedanticChecksEnabled { + registeredDescIDs = make(map[uint64]struct{}, len(r.descIDs)) + for id := range r.descIDs { + registeredDescIDs[id] = struct{}{} + } + } + + r.mtx.RUnlock() + + // Drain metricChan in case of premature return. + defer func() { + for _ = range metricChan { + } + }() + + // Gather. + for metric := range metricChan { + // This could be done concurrently, too, but it required locking + // of metricFamiliesByName (and of metricHashes if checks are + // enabled). Most likely not worth it. + desc := metric.Desc() + dtoMetric := &dto.Metric{} + if err := metric.Write(dtoMetric); err != nil { + errs = append(errs, fmt.Errorf( + "error collecting metric %v: %s", desc, err, + )) + continue + } + metricFamily, ok := metricFamiliesByName[desc.fqName] + if ok { + if metricFamily.GetHelp() != desc.help { + errs = append(errs, fmt.Errorf( + "collected metric %s %s has help %q but should have %q", + desc.fqName, dtoMetric, desc.help, metricFamily.GetHelp(), + )) + continue + } + // TODO(beorn7): Simplify switch once Desc has type. + switch metricFamily.GetType() { + case dto.MetricType_COUNTER: + if dtoMetric.Counter == nil { + errs = append(errs, fmt.Errorf( + "collected metric %s %s should be a Counter", + desc.fqName, dtoMetric, + )) + continue + } + case dto.MetricType_GAUGE: + if dtoMetric.Gauge == nil { + errs = append(errs, fmt.Errorf( + "collected metric %s %s should be a Gauge", + desc.fqName, dtoMetric, + )) + continue + } + case dto.MetricType_SUMMARY: + if dtoMetric.Summary == nil { + errs = append(errs, fmt.Errorf( + "collected metric %s %s should be a Summary", + desc.fqName, dtoMetric, + )) + continue + } + case dto.MetricType_UNTYPED: + if dtoMetric.Untyped == nil { + errs = append(errs, fmt.Errorf( + "collected metric %s %s should be Untyped", + desc.fqName, dtoMetric, + )) + continue + } + case dto.MetricType_HISTOGRAM: + if dtoMetric.Histogram == nil { + errs = append(errs, fmt.Errorf( + "collected metric %s %s should be a Histogram", + desc.fqName, dtoMetric, + )) + continue + } + default: + panic("encountered MetricFamily with invalid type") + } + } else { + metricFamily = &dto.MetricFamily{} + metricFamily.Name = proto.String(desc.fqName) + metricFamily.Help = proto.String(desc.help) + // TODO(beorn7): Simplify switch once Desc has type. + switch { + case dtoMetric.Gauge != nil: + metricFamily.Type = dto.MetricType_GAUGE.Enum() + case dtoMetric.Counter != nil: + metricFamily.Type = dto.MetricType_COUNTER.Enum() + case dtoMetric.Summary != nil: + metricFamily.Type = dto.MetricType_SUMMARY.Enum() + case dtoMetric.Untyped != nil: + metricFamily.Type = dto.MetricType_UNTYPED.Enum() + case dtoMetric.Histogram != nil: + metricFamily.Type = dto.MetricType_HISTOGRAM.Enum() + default: + errs = append(errs, fmt.Errorf( + "empty metric collected: %s", dtoMetric, + )) + continue + } + metricFamiliesByName[desc.fqName] = metricFamily + } + if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes, dimHashes); err != nil { + errs = append(errs, err) + continue + } + if r.pedanticChecksEnabled { + // Is the desc registered at all? + if _, exist := registeredDescIDs[desc.id]; !exist { + errs = append(errs, fmt.Errorf( + "collected metric %s %s with unregistered descriptor %s", + metricFamily.GetName(), dtoMetric, desc, + )) + continue + } + if err := checkDescConsistency(metricFamily, dtoMetric, desc); err != nil { + errs = append(errs, err) + continue + } + } + metricFamily.Metric = append(metricFamily.Metric, dtoMetric) + } + return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() +} + +// Gatherers is a slice of Gatherer instances that implements the Gatherer +// interface itself. Its Gather method calls Gather on all Gatherers in the +// slice in order and returns the merged results. Errors returned from the +// Gather calles are all returned in a flattened MultiError. Duplicate and +// inconsistent Metrics are skipped (first occurrence in slice order wins) and +// reported in the returned error. +// +// Gatherers can be used to merge the Gather results from multiple +// Registries. It also provides a way to directly inject existing MetricFamily +// protobufs into the gathering by creating a custom Gatherer with a Gather +// method that simply returns the existing MetricFamily protobufs. Note that no +// registration is involved (in contrast to Collector registration), so +// obviously registration-time checks cannot happen. Any inconsistencies between +// the gathered MetricFamilies are reported as errors by the Gather method, and +// inconsistent Metrics are dropped. Invalid parts of the MetricFamilies +// (e.g. syntactically invalid metric or label names) will go undetected. +type Gatherers []Gatherer + +// Gather implements Gatherer. +func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) { + var ( + metricFamiliesByName = map[string]*dto.MetricFamily{} + metricHashes = map[uint64]struct{}{} + dimHashes = map[string]uint64{} + errs MultiError // The collected errors to return in the end. + ) + + for i, g := range gs { + mfs, err := g.Gather() + if err != nil { + if multiErr, ok := err.(MultiError); ok { + for _, err := range multiErr { + errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err)) + } + } else { + errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err)) + } + } + for _, mf := range mfs { + existingMF, exists := metricFamiliesByName[mf.GetName()] + if exists { + if existingMF.GetHelp() != mf.GetHelp() { + errs = append(errs, fmt.Errorf( + "gathered metric family %s has help %q but should have %q", + mf.GetName(), mf.GetHelp(), existingMF.GetHelp(), + )) + continue + } + if existingMF.GetType() != mf.GetType() { + errs = append(errs, fmt.Errorf( + "gathered metric family %s has type %s but should have %s", + mf.GetName(), mf.GetType(), existingMF.GetType(), + )) + continue + } + } else { + existingMF = &dto.MetricFamily{} + existingMF.Name = mf.Name + existingMF.Help = mf.Help + existingMF.Type = mf.Type + metricFamiliesByName[mf.GetName()] = existingMF + } + for _, m := range mf.Metric { + if err := checkMetricConsistency(existingMF, m, metricHashes, dimHashes); err != nil { + errs = append(errs, err) + continue + } + existingMF.Metric = append(existingMF.Metric, m) + } + } + } + return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() +} + +// metricSorter is a sortable slice of *dto.Metric. +type metricSorter []*dto.Metric + +func (s metricSorter) Len() int { + return len(s) +} + +func (s metricSorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s metricSorter) Less(i, j int) bool { + if len(s[i].Label) != len(s[j].Label) { + // This should not happen. The metrics are + // inconsistent. However, we have to deal with the fact, as + // people might use custom collectors or metric family injection + // to create inconsistent metrics. So let's simply compare the + // number of labels in this case. That will still yield + // reproducible sorting. + return len(s[i].Label) < len(s[j].Label) + } + for n, lp := range s[i].Label { + vi := lp.GetValue() + vj := s[j].Label[n].GetValue() + if vi != vj { + return vi < vj + } + } + + // We should never arrive here. Multiple metrics with the same + // label set in the same scrape will lead to undefined ingestion + // behavior. However, as above, we have to provide stable sorting + // here, even for inconsistent metrics. So sort equal metrics + // by their timestamp, with missing timestamps (implying "now") + // coming last. + if s[i].TimestampMs == nil { + return false + } + if s[j].TimestampMs == nil { + return true + } + return s[i].GetTimestampMs() < s[j].GetTimestampMs() +} + +// normalizeMetricFamilies returns a MetricFamily slice whith empty +// MetricFamilies pruned and the remaining MetricFamilies sorted by name within +// the slice, with the contained Metrics sorted within each MetricFamily. +func normalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily { + for _, mf := range metricFamiliesByName { + sort.Sort(metricSorter(mf.Metric)) + } + names := make([]string, 0, len(metricFamiliesByName)) + for name, mf := range metricFamiliesByName { + if len(mf.Metric) > 0 { + names = append(names, name) + } + } + sort.Strings(names) + result := make([]*dto.MetricFamily, 0, len(names)) + for _, name := range names { + result = append(result, metricFamiliesByName[name]) + } + return result +} + +// checkMetricConsistency checks if the provided Metric is consistent with the +// provided MetricFamily. It also hashed the Metric labels and the MetricFamily +// name. If the resulting hash is alread in the provided metricHashes, an error +// is returned. If not, it is added to metricHashes. The provided dimHashes maps +// MetricFamily names to their dimHash (hashed sorted label names). If dimHashes +// doesn't yet contain a hash for the provided MetricFamily, it is +// added. Otherwise, an error is returned if the existing dimHashes in not equal +// the calculated dimHash. +func checkMetricConsistency( + metricFamily *dto.MetricFamily, + dtoMetric *dto.Metric, + metricHashes map[uint64]struct{}, + dimHashes map[string]uint64, +) error { + // Type consistency with metric family. + if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil || + metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil || + metricFamily.GetType() == dto.MetricType_SUMMARY && dtoMetric.Summary == nil || + metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil || + metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil { + return fmt.Errorf( + "collected metric %s %s is not a %s", + metricFamily.GetName(), dtoMetric, metricFamily.GetType(), + ) + } + + // Is the metric unique (i.e. no other metric with the same name and the same label values)? + h := hashNew() + h = hashAdd(h, metricFamily.GetName()) + h = hashAddByte(h, separatorByte) + dh := hashNew() + // Make sure label pairs are sorted. We depend on it for the consistency + // check. + sort.Sort(LabelPairSorter(dtoMetric.Label)) + for _, lp := range dtoMetric.Label { + h = hashAdd(h, lp.GetValue()) + h = hashAddByte(h, separatorByte) + dh = hashAdd(dh, lp.GetName()) + dh = hashAddByte(dh, separatorByte) + } + if _, exists := metricHashes[h]; exists { + return fmt.Errorf( + "collected metric %s %s was collected before with the same name and label values", + metricFamily.GetName(), dtoMetric, + ) + } + if dimHash, ok := dimHashes[metricFamily.GetName()]; ok { + if dimHash != dh { + return fmt.Errorf( + "collected metric %s %s has label dimensions inconsistent with previously collected metrics in the same metric family", + metricFamily.GetName(), dtoMetric, + ) + } + } else { + dimHashes[metricFamily.GetName()] = dh + } + metricHashes[h] = struct{}{} + return nil +} + +func checkDescConsistency( + metricFamily *dto.MetricFamily, + dtoMetric *dto.Metric, + desc *Desc, +) error { + // Desc help consistency with metric family help. + if metricFamily.GetHelp() != desc.help { + return fmt.Errorf( + "collected metric %s %s has help %q but should have %q", + metricFamily.GetName(), dtoMetric, metricFamily.GetHelp(), desc.help, + ) + } + + // Is the desc consistent with the content of the metric? + lpsFromDesc := make([]*dto.LabelPair, 0, len(dtoMetric.Label)) + lpsFromDesc = append(lpsFromDesc, desc.constLabelPairs...) + for _, l := range desc.variableLabels { + lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{ + Name: proto.String(l), + }) + } + if len(lpsFromDesc) != len(dtoMetric.Label) { + return fmt.Errorf( + "labels in collected metric %s %s are inconsistent with descriptor %s", + metricFamily.GetName(), dtoMetric, desc, + ) + } + sort.Sort(LabelPairSorter(lpsFromDesc)) + for i, lpFromDesc := range lpsFromDesc { + lpFromMetric := dtoMetric.Label[i] + if lpFromDesc.GetName() != lpFromMetric.GetName() || + lpFromDesc.Value != nil && lpFromDesc.GetValue() != lpFromMetric.GetValue() { + return fmt.Errorf( + "labels in collected metric %s %s are inconsistent with descriptor %s", + metricFamily.GetName(), dtoMetric, desc, + ) + } + } + return nil +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go new file mode 100644 index 000000000..bce05bf9a --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go @@ -0,0 +1,534 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "math" + "sort" + "sync" + "time" + + "github.com/beorn7/perks/quantile" + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" +) + +// quantileLabel is used for the label that defines the quantile in a +// summary. +const quantileLabel = "quantile" + +// A Summary captures individual observations from an event or sample stream and +// summarizes them in a manner similar to traditional summary statistics: 1. sum +// of observations, 2. observation count, 3. rank estimations. +// +// A typical use-case is the observation of request latencies. By default, a +// Summary provides the median, the 90th and the 99th percentile of the latency +// as rank estimations. +// +// Note that the rank estimations cannot be aggregated in a meaningful way with +// the Prometheus query language (i.e. you cannot average or add them). If you +// need aggregatable quantiles (e.g. you want the 99th percentile latency of all +// queries served across all instances of a service), consider the Histogram +// metric type. See the Prometheus documentation for more details. +// +// To create Summary instances, use NewSummary. +type Summary interface { + Metric + Collector + + // Observe adds a single observation to the summary. + Observe(float64) +} + +// DefObjectives are the default Summary quantile values. +var ( + DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001} + + errQuantileLabelNotAllowed = fmt.Errorf( + "%q is not allowed as label name in summaries", quantileLabel, + ) +) + +// Default values for SummaryOpts. +const ( + // DefMaxAge is the default duration for which observations stay + // relevant. + DefMaxAge time.Duration = 10 * time.Minute + // DefAgeBuckets is the default number of buckets used to calculate the + // age of observations. + DefAgeBuckets = 5 + // DefBufCap is the standard buffer size for collecting Summary observations. + DefBufCap = 500 +) + +// SummaryOpts bundles the options for creating a Summary metric. It is +// mandatory to set Name and Help to a non-empty string. All other fields are +// optional and can safely be left at their zero value. +type SummaryOpts struct { + // Namespace, Subsystem, and Name are components of the fully-qualified + // name of the Summary (created by joining these components with + // "_"). Only Name is mandatory, the others merely help structuring the + // name. Note that the fully-qualified name of the Summary must be a + // valid Prometheus metric name. + Namespace string + Subsystem string + Name string + + // Help provides information about this Summary. Mandatory! + // + // Metrics with the same fully-qualified name must have the same Help + // string. + Help string + + // ConstLabels are used to attach fixed labels to this + // Summary. Summaries with the same fully-qualified name must have the + // same label names in their ConstLabels. + // + // Note that in most cases, labels have a value that varies during the + // lifetime of a process. Those labels are usually managed with a + // SummaryVec. ConstLabels serve only special purposes. One is for the + // special case where the value of a label does not change during the + // lifetime of a process, e.g. if the revision of the running binary is + // put into a label. Another, more advanced purpose is if more than one + // Collector needs to collect Summaries with the same fully-qualified + // name. In that case, those Summaries must differ in the values of + // their ConstLabels. See the Collector examples. + // + // If the value of a label never changes (not even between binaries), + // that label most likely should not be a label at all (but part of the + // metric name). + ConstLabels Labels + + // Objectives defines the quantile rank estimates with their respective + // absolute error. If Objectives[q] = e, then the value reported + // for q will be the φ-quantile value for some φ between q-e and q+e. + // The default value is DefObjectives. + Objectives map[float64]float64 + + // MaxAge defines the duration for which an observation stays relevant + // for the summary. Must be positive. The default value is DefMaxAge. + MaxAge time.Duration + + // AgeBuckets is the number of buckets used to exclude observations that + // are older than MaxAge from the summary. A higher number has a + // resource penalty, so only increase it if the higher resolution is + // really required. For very high observation rates, you might want to + // reduce the number of age buckets. With only one age bucket, you will + // effectively see a complete reset of the summary each time MaxAge has + // passed. The default value is DefAgeBuckets. + AgeBuckets uint32 + + // BufCap defines the default sample stream buffer size. The default + // value of DefBufCap should suffice for most uses. If there is a need + // to increase the value, a multiple of 500 is recommended (because that + // is the internal buffer size of the underlying package + // "github.com/bmizerany/perks/quantile"). + BufCap uint32 +} + +// Great fuck-up with the sliding-window decay algorithm... The Merge method of +// perk/quantile is actually not working as advertised - and it might be +// unfixable, as the underlying algorithm is apparently not capable of merging +// summaries in the first place. To avoid using Merge, we are currently adding +// observations to _each_ age bucket, i.e. the effort to add a sample is +// essentially multiplied by the number of age buckets. When rotating age +// buckets, we empty the previous head stream. On scrape time, we simply take +// the quantiles from the head stream (no merging required). Result: More effort +// on observation time, less effort on scrape time, which is exactly the +// opposite of what we try to accomplish, but at least the results are correct. +// +// The quite elegant previous contraption to merge the age buckets efficiently +// on scrape time (see code up commit 6b9530d72ea715f0ba612c0120e6e09fbf1d49d0) +// can't be used anymore. + +// NewSummary creates a new Summary based on the provided SummaryOpts. +func NewSummary(opts SummaryOpts) Summary { + return newSummary( + NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), + opts, + ) +} + +func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { + if len(desc.variableLabels) != len(labelValues) { + panic(errInconsistentCardinality) + } + + for _, n := range desc.variableLabels { + if n == quantileLabel { + panic(errQuantileLabelNotAllowed) + } + } + for _, lp := range desc.constLabelPairs { + if lp.GetName() == quantileLabel { + panic(errQuantileLabelNotAllowed) + } + } + + if len(opts.Objectives) == 0 { + opts.Objectives = DefObjectives + } + + if opts.MaxAge < 0 { + panic(fmt.Errorf("illegal max age MaxAge=%v", opts.MaxAge)) + } + if opts.MaxAge == 0 { + opts.MaxAge = DefMaxAge + } + + if opts.AgeBuckets == 0 { + opts.AgeBuckets = DefAgeBuckets + } + + if opts.BufCap == 0 { + opts.BufCap = DefBufCap + } + + s := &summary{ + desc: desc, + + objectives: opts.Objectives, + sortedObjectives: make([]float64, 0, len(opts.Objectives)), + + labelPairs: makeLabelPairs(desc, labelValues), + + hotBuf: make([]float64, 0, opts.BufCap), + coldBuf: make([]float64, 0, opts.BufCap), + streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets), + } + s.headStreamExpTime = time.Now().Add(s.streamDuration) + s.hotBufExpTime = s.headStreamExpTime + + for i := uint32(0); i < opts.AgeBuckets; i++ { + s.streams = append(s.streams, s.newStream()) + } + s.headStream = s.streams[0] + + for qu := range s.objectives { + s.sortedObjectives = append(s.sortedObjectives, qu) + } + sort.Float64s(s.sortedObjectives) + + s.init(s) // Init self-collection. + return s +} + +type summary struct { + selfCollector + + bufMtx sync.Mutex // Protects hotBuf and hotBufExpTime. + mtx sync.Mutex // Protects every other moving part. + // Lock bufMtx before mtx if both are needed. + + desc *Desc + + objectives map[float64]float64 + sortedObjectives []float64 + + labelPairs []*dto.LabelPair + + sum float64 + cnt uint64 + + hotBuf, coldBuf []float64 + + streams []*quantile.Stream + streamDuration time.Duration + headStream *quantile.Stream + headStreamIdx int + headStreamExpTime, hotBufExpTime time.Time +} + +func (s *summary) Desc() *Desc { + return s.desc +} + +func (s *summary) Observe(v float64) { + s.bufMtx.Lock() + defer s.bufMtx.Unlock() + + now := time.Now() + if now.After(s.hotBufExpTime) { + s.asyncFlush(now) + } + s.hotBuf = append(s.hotBuf, v) + if len(s.hotBuf) == cap(s.hotBuf) { + s.asyncFlush(now) + } +} + +func (s *summary) Write(out *dto.Metric) error { + sum := &dto.Summary{} + qs := make([]*dto.Quantile, 0, len(s.objectives)) + + s.bufMtx.Lock() + s.mtx.Lock() + // Swap bufs even if hotBuf is empty to set new hotBufExpTime. + s.swapBufs(time.Now()) + s.bufMtx.Unlock() + + s.flushColdBuf() + sum.SampleCount = proto.Uint64(s.cnt) + sum.SampleSum = proto.Float64(s.sum) + + for _, rank := range s.sortedObjectives { + var q float64 + if s.headStream.Count() == 0 { + q = math.NaN() + } else { + q = s.headStream.Query(rank) + } + qs = append(qs, &dto.Quantile{ + Quantile: proto.Float64(rank), + Value: proto.Float64(q), + }) + } + + s.mtx.Unlock() + + if len(qs) > 0 { + sort.Sort(quantSort(qs)) + } + sum.Quantile = qs + + out.Summary = sum + out.Label = s.labelPairs + return nil +} + +func (s *summary) newStream() *quantile.Stream { + return quantile.NewTargeted(s.objectives) +} + +// asyncFlush needs bufMtx locked. +func (s *summary) asyncFlush(now time.Time) { + s.mtx.Lock() + s.swapBufs(now) + + // Unblock the original goroutine that was responsible for the mutation + // that triggered the compaction. But hold onto the global non-buffer + // state mutex until the operation finishes. + go func() { + s.flushColdBuf() + s.mtx.Unlock() + }() +} + +// rotateStreams needs mtx AND bufMtx locked. +func (s *summary) maybeRotateStreams() { + for !s.hotBufExpTime.Equal(s.headStreamExpTime) { + s.headStream.Reset() + s.headStreamIdx++ + if s.headStreamIdx >= len(s.streams) { + s.headStreamIdx = 0 + } + s.headStream = s.streams[s.headStreamIdx] + s.headStreamExpTime = s.headStreamExpTime.Add(s.streamDuration) + } +} + +// flushColdBuf needs mtx locked. +func (s *summary) flushColdBuf() { + for _, v := range s.coldBuf { + for _, stream := range s.streams { + stream.Insert(v) + } + s.cnt++ + s.sum += v + } + s.coldBuf = s.coldBuf[0:0] + s.maybeRotateStreams() +} + +// swapBufs needs mtx AND bufMtx locked, coldBuf must be empty. +func (s *summary) swapBufs(now time.Time) { + if len(s.coldBuf) != 0 { + panic("coldBuf is not empty") + } + s.hotBuf, s.coldBuf = s.coldBuf, s.hotBuf + // hotBuf is now empty and gets new expiration set. + for now.After(s.hotBufExpTime) { + s.hotBufExpTime = s.hotBufExpTime.Add(s.streamDuration) + } +} + +type quantSort []*dto.Quantile + +func (s quantSort) Len() int { + return len(s) +} + +func (s quantSort) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s quantSort) Less(i, j int) bool { + return s[i].GetQuantile() < s[j].GetQuantile() +} + +// SummaryVec is a Collector that bundles a set of Summaries that all share the +// same Desc, but have different values for their variable labels. This is used +// if you want to count the same thing partitioned by various dimensions +// (e.g. HTTP request latencies, partitioned by status code and method). Create +// instances with NewSummaryVec. +type SummaryVec struct { + *MetricVec +} + +// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and +// partitioned by the given label names. At least one label name must be +// provided. +func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &SummaryVec{ + MetricVec: newMetricVec(desc, func(lvs ...string) Metric { + return newSummary(desc, opts, lvs...) + }), + } +} + +// GetMetricWithLabelValues replaces the method of the same name in +// MetricVec. The difference is that this method returns a Summary and not a +// Metric so that no type conversion is required. +func (m *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Summary, error) { + metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Summary), err + } + return nil, err +} + +// GetMetricWith replaces the method of the same name in MetricVec. The +// difference is that this method returns a Summary and not a Metric so that no +// type conversion is required. +func (m *SummaryVec) GetMetricWith(labels Labels) (Summary, error) { + metric, err := m.MetricVec.GetMetricWith(labels) + if metric != nil { + return metric.(Summary), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. By not returning an +// error, WithLabelValues allows shortcuts like +// myVec.WithLabelValues("404", "GET").Observe(42.21) +func (m *SummaryVec) WithLabelValues(lvs ...string) Summary { + return m.MetricVec.WithLabelValues(lvs...).(Summary) +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. By not returning an error, With allows shortcuts like +// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21) +func (m *SummaryVec) With(labels Labels) Summary { + return m.MetricVec.With(labels).(Summary) +} + +type constSummary struct { + desc *Desc + count uint64 + sum float64 + quantiles map[float64]float64 + labelPairs []*dto.LabelPair +} + +func (s *constSummary) Desc() *Desc { + return s.desc +} + +func (s *constSummary) Write(out *dto.Metric) error { + sum := &dto.Summary{} + qs := make([]*dto.Quantile, 0, len(s.quantiles)) + + sum.SampleCount = proto.Uint64(s.count) + sum.SampleSum = proto.Float64(s.sum) + + for rank, q := range s.quantiles { + qs = append(qs, &dto.Quantile{ + Quantile: proto.Float64(rank), + Value: proto.Float64(q), + }) + } + + if len(qs) > 0 { + sort.Sort(quantSort(qs)) + } + sum.Quantile = qs + + out.Summary = sum + out.Label = s.labelPairs + + return nil +} + +// NewConstSummary returns a metric representing a Prometheus summary with fixed +// values for the count, sum, and quantiles. As those parameters cannot be +// changed, the returned value does not implement the Summary interface (but +// only the Metric interface). Users of this package will not have much use for +// it in regular operations. However, when implementing custom Collectors, it is +// useful as a throw-away metric that is generated on the fly to send it to +// Prometheus in the Collect method. +// +// quantiles maps ranks to quantile values. For example, a median latency of +// 0.23s and a 99th percentile latency of 0.56s would be expressed as: +// map[float64]float64{0.5: 0.23, 0.99: 0.56} +// +// NewConstSummary returns an error if the length of labelValues is not +// consistent with the variable labels in Desc. +func NewConstSummary( + desc *Desc, + count uint64, + sum float64, + quantiles map[float64]float64, + labelValues ...string, +) (Metric, error) { + if len(desc.variableLabels) != len(labelValues) { + return nil, errInconsistentCardinality + } + return &constSummary{ + desc: desc, + count: count, + sum: sum, + quantiles: quantiles, + labelPairs: makeLabelPairs(desc, labelValues), + }, nil +} + +// MustNewConstSummary is a version of NewConstSummary that panics where +// NewConstMetric would have returned an error. +func MustNewConstSummary( + desc *Desc, + count uint64, + sum float64, + quantiles map[float64]float64, + labelValues ...string, +) Metric { + m, err := NewConstSummary(desc, count, sum, quantiles, labelValues...) + if err != nil { + panic(err) + } + return m +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/untyped.go b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go new file mode 100644 index 000000000..5faf7e6e3 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go @@ -0,0 +1,138 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +// Untyped is a Metric that represents a single numerical value that can +// arbitrarily go up and down. +// +// An Untyped metric works the same as a Gauge. The only difference is that to +// no type information is implied. +// +// To create Untyped instances, use NewUntyped. +type Untyped interface { + Metric + Collector + + // Set sets the Untyped metric to an arbitrary value. + Set(float64) + // Inc increments the Untyped metric by 1. + Inc() + // Dec decrements the Untyped metric by 1. + Dec() + // Add adds the given value to the Untyped metric. (The value can be + // negative, resulting in a decrease.) + Add(float64) + // Sub subtracts the given value from the Untyped metric. (The value can + // be negative, resulting in an increase.) + Sub(float64) +} + +// UntypedOpts is an alias for Opts. See there for doc comments. +type UntypedOpts Opts + +// NewUntyped creates a new Untyped metric from the provided UntypedOpts. +func NewUntyped(opts UntypedOpts) Untyped { + return newValue(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), UntypedValue, 0) +} + +// UntypedVec is a Collector that bundles a set of Untyped metrics that all +// share the same Desc, but have different values for their variable +// labels. This is used if you want to count the same thing partitioned by +// various dimensions. Create instances with NewUntypedVec. +type UntypedVec struct { + *MetricVec +} + +// NewUntypedVec creates a new UntypedVec based on the provided UntypedOpts and +// partitioned by the given label names. At least one label name must be +// provided. +func NewUntypedVec(opts UntypedOpts, labelNames []string) *UntypedVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &UntypedVec{ + MetricVec: newMetricVec(desc, func(lvs ...string) Metric { + return newValue(desc, UntypedValue, 0, lvs...) + }), + } +} + +// GetMetricWithLabelValues replaces the method of the same name in +// MetricVec. The difference is that this method returns an Untyped and not a +// Metric so that no type conversion is required. +func (m *UntypedVec) GetMetricWithLabelValues(lvs ...string) (Untyped, error) { + metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Untyped), err + } + return nil, err +} + +// GetMetricWith replaces the method of the same name in MetricVec. The +// difference is that this method returns an Untyped and not a Metric so that no +// type conversion is required. +func (m *UntypedVec) GetMetricWith(labels Labels) (Untyped, error) { + metric, err := m.MetricVec.GetMetricWith(labels) + if metric != nil { + return metric.(Untyped), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. By not returning an +// error, WithLabelValues allows shortcuts like +// myVec.WithLabelValues("404", "GET").Add(42) +func (m *UntypedVec) WithLabelValues(lvs ...string) Untyped { + return m.MetricVec.WithLabelValues(lvs...).(Untyped) +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. By not returning an error, With allows shortcuts like +// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) +func (m *UntypedVec) With(labels Labels) Untyped { + return m.MetricVec.With(labels).(Untyped) +} + +// UntypedFunc is an Untyped whose value is determined at collect time by +// calling a provided function. +// +// To create UntypedFunc instances, use NewUntypedFunc. +type UntypedFunc interface { + Metric + Collector +} + +// NewUntypedFunc creates a new UntypedFunc based on the provided +// UntypedOpts. The value reported is determined by calling the given function +// from within the Write method. Take into account that metric collection may +// happen concurrently. If that results in concurrent calls to Write, like in +// the case where an UntypedFunc is directly registered with Prometheus, the +// provided function must be concurrency-safe. +func NewUntypedFunc(opts UntypedOpts, function func() float64) UntypedFunc { + return newValueFunc(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), UntypedValue, function) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go new file mode 100644 index 000000000..a944c3775 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/value.go @@ -0,0 +1,234 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "errors" + "fmt" + "math" + "sort" + "sync/atomic" + + dto "github.com/prometheus/client_model/go" + + "github.com/golang/protobuf/proto" +) + +// ValueType is an enumeration of metric types that represent a simple value. +type ValueType int + +// Possible values for the ValueType enum. +const ( + _ ValueType = iota + CounterValue + GaugeValue + UntypedValue +) + +var errInconsistentCardinality = errors.New("inconsistent label cardinality") + +// value is a generic metric for simple values. It implements Metric, Collector, +// Counter, Gauge, and Untyped. Its effective type is determined by +// ValueType. This is a low-level building block used by the library to back the +// implementations of Counter, Gauge, and Untyped. +type value struct { + // valBits containst the bits of the represented float64 value. It has + // to go first in the struct to guarantee alignment for atomic + // operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG + valBits uint64 + + selfCollector + + desc *Desc + valType ValueType + labelPairs []*dto.LabelPair +} + +// newValue returns a newly allocated value with the given Desc, ValueType, +// sample value and label values. It panics if the number of label +// values is different from the number of variable labels in Desc. +func newValue(desc *Desc, valueType ValueType, val float64, labelValues ...string) *value { + if len(labelValues) != len(desc.variableLabels) { + panic(errInconsistentCardinality) + } + result := &value{ + desc: desc, + valType: valueType, + valBits: math.Float64bits(val), + labelPairs: makeLabelPairs(desc, labelValues), + } + result.init(result) + return result +} + +func (v *value) Desc() *Desc { + return v.desc +} + +func (v *value) Set(val float64) { + atomic.StoreUint64(&v.valBits, math.Float64bits(val)) +} + +func (v *value) Inc() { + v.Add(1) +} + +func (v *value) Dec() { + v.Add(-1) +} + +func (v *value) Add(val float64) { + for { + oldBits := atomic.LoadUint64(&v.valBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + val) + if atomic.CompareAndSwapUint64(&v.valBits, oldBits, newBits) { + return + } + } +} + +func (v *value) Sub(val float64) { + v.Add(val * -1) +} + +func (v *value) Write(out *dto.Metric) error { + val := math.Float64frombits(atomic.LoadUint64(&v.valBits)) + return populateMetric(v.valType, val, v.labelPairs, out) +} + +// valueFunc is a generic metric for simple values retrieved on collect time +// from a function. It implements Metric and Collector. Its effective type is +// determined by ValueType. This is a low-level building block used by the +// library to back the implementations of CounterFunc, GaugeFunc, and +// UntypedFunc. +type valueFunc struct { + selfCollector + + desc *Desc + valType ValueType + function func() float64 + labelPairs []*dto.LabelPair +} + +// newValueFunc returns a newly allocated valueFunc with the given Desc and +// ValueType. The value reported is determined by calling the given function +// from within the Write method. Take into account that metric collection may +// happen concurrently. If that results in concurrent calls to Write, like in +// the case where a valueFunc is directly registered with Prometheus, the +// provided function must be concurrency-safe. +func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *valueFunc { + result := &valueFunc{ + desc: desc, + valType: valueType, + function: function, + labelPairs: makeLabelPairs(desc, nil), + } + result.init(result) + return result +} + +func (v *valueFunc) Desc() *Desc { + return v.desc +} + +func (v *valueFunc) Write(out *dto.Metric) error { + return populateMetric(v.valType, v.function(), v.labelPairs, out) +} + +// NewConstMetric returns a metric with one fixed value that cannot be +// changed. Users of this package will not have much use for it in regular +// operations. However, when implementing custom Collectors, it is useful as a +// throw-away metric that is generated on the fly to send it to Prometheus in +// the Collect method. NewConstMetric returns an error if the length of +// labelValues is not consistent with the variable labels in Desc. +func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) { + if len(desc.variableLabels) != len(labelValues) { + return nil, errInconsistentCardinality + } + return &constMetric{ + desc: desc, + valType: valueType, + val: value, + labelPairs: makeLabelPairs(desc, labelValues), + }, nil +} + +// MustNewConstMetric is a version of NewConstMetric that panics where +// NewConstMetric would have returned an error. +func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) Metric { + m, err := NewConstMetric(desc, valueType, value, labelValues...) + if err != nil { + panic(err) + } + return m +} + +type constMetric struct { + desc *Desc + valType ValueType + val float64 + labelPairs []*dto.LabelPair +} + +func (m *constMetric) Desc() *Desc { + return m.desc +} + +func (m *constMetric) Write(out *dto.Metric) error { + return populateMetric(m.valType, m.val, m.labelPairs, out) +} + +func populateMetric( + t ValueType, + v float64, + labelPairs []*dto.LabelPair, + m *dto.Metric, +) error { + m.Label = labelPairs + switch t { + case CounterValue: + m.Counter = &dto.Counter{Value: proto.Float64(v)} + case GaugeValue: + m.Gauge = &dto.Gauge{Value: proto.Float64(v)} + case UntypedValue: + m.Untyped = &dto.Untyped{Value: proto.Float64(v)} + default: + return fmt.Errorf("encountered unknown type %v", t) + } + return nil +} + +func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair { + totalLen := len(desc.variableLabels) + len(desc.constLabelPairs) + if totalLen == 0 { + // Super fast path. + return nil + } + if len(desc.variableLabels) == 0 { + // Moderately fast path. + return desc.constLabelPairs + } + labelPairs := make([]*dto.LabelPair, 0, totalLen) + for i, n := range desc.variableLabels { + labelPairs = append(labelPairs, &dto.LabelPair{ + Name: proto.String(n), + Value: proto.String(labelValues[i]), + }) + } + for _, lp := range desc.constLabelPairs { + labelPairs = append(labelPairs, lp) + } + sort.Sort(LabelPairSorter(labelPairs)) + return labelPairs +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go new file mode 100644 index 000000000..7f3eef9a4 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go @@ -0,0 +1,404 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "sync" + + "github.com/prometheus/common/model" +) + +// MetricVec is a Collector to bundle metrics of the same name that +// differ in their label values. MetricVec is usually not used directly but as a +// building block for implementations of vectors of a given metric +// type. GaugeVec, CounterVec, SummaryVec, and UntypedVec are examples already +// provided in this package. +type MetricVec struct { + mtx sync.RWMutex // Protects the children. + children map[uint64][]metricWithLabelValues + desc *Desc + + newMetric func(labelValues ...string) Metric + hashAdd func(h uint64, s string) uint64 // replace hash function for testing collision handling + hashAddByte func(h uint64, b byte) uint64 +} + +// newMetricVec returns an initialized MetricVec. The concrete value is +// returned for embedding into another struct. +func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec { + return &MetricVec{ + children: map[uint64][]metricWithLabelValues{}, + desc: desc, + newMetric: newMetric, + hashAdd: hashAdd, + hashAddByte: hashAddByte, + } +} + +// metricWithLabelValues provides the metric and its label values for +// disambiguation on hash collision. +type metricWithLabelValues struct { + values []string + metric Metric +} + +// Describe implements Collector. The length of the returned slice +// is always one. +func (m *MetricVec) Describe(ch chan<- *Desc) { + ch <- m.desc +} + +// Collect implements Collector. +func (m *MetricVec) Collect(ch chan<- Metric) { + m.mtx.RLock() + defer m.mtx.RUnlock() + + for _, metrics := range m.children { + for _, metric := range metrics { + ch <- metric.metric + } + } +} + +// GetMetricWithLabelValues returns the Metric for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new Metric is created. +// +// It is possible to call this method without using the returned Metric to only +// create the new Metric but leave it at its start value (e.g. a Summary or +// Histogram without any observations). See also the SummaryVec example. +// +// Keeping the Metric for later use is possible (and should be considered if +// performance is critical), but keep in mind that Reset, DeleteLabelValues and +// Delete can be used to delete the Metric from the MetricVec. In that case, the +// Metric will still exist, but it will not be exported anymore, even if a +// Metric with the same label values is created later. See also the CounterVec +// example. +// +// An error is returned if the number of label values is not the same as the +// number of VariableLabels in Desc. +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as +// an alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the GaugeVec example. +func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) { + h, err := m.hashLabelValues(lvs) + if err != nil { + return nil, err + } + + return m.getOrCreateMetricWithLabelValues(h, lvs), nil +} + +// GetMetricWith returns the Metric for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new Metric is created. Implications of +// creating a Metric without using it and keeping the Metric for later use are +// the same as for GetMetricWithLabelValues. +// +// An error is returned if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc. +// +// This method is used for the same purpose as +// GetMetricWithLabelValues(...string). See there for pros and cons of the two +// methods. +func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) { + h, err := m.hashLabels(labels) + if err != nil { + return nil, err + } + + return m.getOrCreateMetricWithLabels(h, labels), nil +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics if an error +// occurs. The method allows neat syntax like: +// httpReqs.WithLabelValues("404", "POST").Inc() +func (m *MetricVec) WithLabelValues(lvs ...string) Metric { + metric, err := m.GetMetricWithLabelValues(lvs...) + if err != nil { + panic(err) + } + return metric +} + +// With works as GetMetricWith, but panics if an error occurs. The method allows +// neat syntax like: +// httpReqs.With(Labels{"status":"404", "method":"POST"}).Inc() +func (m *MetricVec) With(labels Labels) Metric { + metric, err := m.GetMetricWith(labels) + if err != nil { + panic(err) + } + return metric +} + +// DeleteLabelValues removes the metric where the variable labels are the same +// as those passed in as labels (same order as the VariableLabels in Desc). It +// returns true if a metric was deleted. +// +// It is not an error if the number of label values is not the same as the +// number of VariableLabels in Desc. However, such inconsistent label count can +// never match an actual Metric, so the method will always return false in that +// case. +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider Delete(Labels) as an +// alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the CounterVec example. +func (m *MetricVec) DeleteLabelValues(lvs ...string) bool { + m.mtx.Lock() + defer m.mtx.Unlock() + + h, err := m.hashLabelValues(lvs) + if err != nil { + return false + } + return m.deleteByHashWithLabelValues(h, lvs) +} + +// Delete deletes the metric where the variable labels are the same as those +// passed in as labels. It returns true if a metric was deleted. +// +// It is not an error if the number and names of the Labels are inconsistent +// with those of the VariableLabels in the Desc of the MetricVec. However, such +// inconsistent Labels can never match an actual Metric, so the method will +// always return false in that case. +// +// This method is used for the same purpose as DeleteLabelValues(...string). See +// there for pros and cons of the two methods. +func (m *MetricVec) Delete(labels Labels) bool { + m.mtx.Lock() + defer m.mtx.Unlock() + + h, err := m.hashLabels(labels) + if err != nil { + return false + } + + return m.deleteByHashWithLabels(h, labels) +} + +// deleteByHashWithLabelValues removes the metric from the hash bucket h. If +// there are multiple matches in the bucket, use lvs to select a metric and +// remove only that metric. +func (m *MetricVec) deleteByHashWithLabelValues(h uint64, lvs []string) bool { + metrics, ok := m.children[h] + if !ok { + return false + } + + i := m.findMetricWithLabelValues(metrics, lvs) + if i >= len(metrics) { + return false + } + + if len(metrics) > 1 { + m.children[h] = append(metrics[:i], metrics[i+1:]...) + } else { + delete(m.children, h) + } + return true +} + +// deleteByHashWithLabels removes the metric from the hash bucket h. If there +// are multiple matches in the bucket, use lvs to select a metric and remove +// only that metric. +func (m *MetricVec) deleteByHashWithLabels(h uint64, labels Labels) bool { + metrics, ok := m.children[h] + if !ok { + return false + } + i := m.findMetricWithLabels(metrics, labels) + if i >= len(metrics) { + return false + } + + if len(metrics) > 1 { + m.children[h] = append(metrics[:i], metrics[i+1:]...) + } else { + delete(m.children, h) + } + return true +} + +// Reset deletes all metrics in this vector. +func (m *MetricVec) Reset() { + m.mtx.Lock() + defer m.mtx.Unlock() + + for h := range m.children { + delete(m.children, h) + } +} + +func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) { + if len(vals) != len(m.desc.variableLabels) { + return 0, errInconsistentCardinality + } + h := hashNew() + for _, val := range vals { + h = m.hashAdd(h, val) + h = m.hashAddByte(h, model.SeparatorByte) + } + return h, nil +} + +func (m *MetricVec) hashLabels(labels Labels) (uint64, error) { + if len(labels) != len(m.desc.variableLabels) { + return 0, errInconsistentCardinality + } + h := hashNew() + for _, label := range m.desc.variableLabels { + val, ok := labels[label] + if !ok { + return 0, fmt.Errorf("label name %q missing in label map", label) + } + h = m.hashAdd(h, val) + h = m.hashAddByte(h, model.SeparatorByte) + } + return h, nil +} + +// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value +// or creates it and returns the new one. +// +// This function holds the mutex. +func (m *MetricVec) getOrCreateMetricWithLabelValues(hash uint64, lvs []string) Metric { + m.mtx.RLock() + metric, ok := m.getMetricWithLabelValues(hash, lvs) + m.mtx.RUnlock() + if ok { + return metric + } + + m.mtx.Lock() + defer m.mtx.Unlock() + metric, ok = m.getMetricWithLabelValues(hash, lvs) + if !ok { + // Copy to avoid allocation in case wo don't go down this code path. + copiedLVs := make([]string, len(lvs)) + copy(copiedLVs, lvs) + metric = m.newMetric(copiedLVs...) + m.children[hash] = append(m.children[hash], metricWithLabelValues{values: copiedLVs, metric: metric}) + } + return metric +} + +// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value +// or creates it and returns the new one. +// +// This function holds the mutex. +func (m *MetricVec) getOrCreateMetricWithLabels(hash uint64, labels Labels) Metric { + m.mtx.RLock() + metric, ok := m.getMetricWithLabels(hash, labels) + m.mtx.RUnlock() + if ok { + return metric + } + + m.mtx.Lock() + defer m.mtx.Unlock() + metric, ok = m.getMetricWithLabels(hash, labels) + if !ok { + lvs := m.extractLabelValues(labels) + metric = m.newMetric(lvs...) + m.children[hash] = append(m.children[hash], metricWithLabelValues{values: lvs, metric: metric}) + } + return metric +} + +// getMetricWithLabelValues gets a metric while handling possible collisions in +// the hash space. Must be called while holding read mutex. +func (m *MetricVec) getMetricWithLabelValues(h uint64, lvs []string) (Metric, bool) { + metrics, ok := m.children[h] + if ok { + if i := m.findMetricWithLabelValues(metrics, lvs); i < len(metrics) { + return metrics[i].metric, true + } + } + return nil, false +} + +// getMetricWithLabels gets a metric while handling possible collisions in +// the hash space. Must be called while holding read mutex. +func (m *MetricVec) getMetricWithLabels(h uint64, labels Labels) (Metric, bool) { + metrics, ok := m.children[h] + if ok { + if i := m.findMetricWithLabels(metrics, labels); i < len(metrics) { + return metrics[i].metric, true + } + } + return nil, false +} + +// findMetricWithLabelValues returns the index of the matching metric or +// len(metrics) if not found. +func (m *MetricVec) findMetricWithLabelValues(metrics []metricWithLabelValues, lvs []string) int { + for i, metric := range metrics { + if m.matchLabelValues(metric.values, lvs) { + return i + } + } + return len(metrics) +} + +// findMetricWithLabels returns the index of the matching metric or len(metrics) +// if not found. +func (m *MetricVec) findMetricWithLabels(metrics []metricWithLabelValues, labels Labels) int { + for i, metric := range metrics { + if m.matchLabels(metric.values, labels) { + return i + } + } + return len(metrics) +} + +func (m *MetricVec) matchLabelValues(values []string, lvs []string) bool { + if len(values) != len(lvs) { + return false + } + for i, v := range values { + if v != lvs[i] { + return false + } + } + return true +} + +func (m *MetricVec) matchLabels(values []string, labels Labels) bool { + if len(labels) != len(values) { + return false + } + for i, k := range m.desc.variableLabels { + if values[i] != labels[k] { + return false + } + } + return true +} + +func (m *MetricVec) extractLabelValues(labels Labels) []string { + labelValues := make([]string, len(labels)) + for i, k := range m.desc.variableLabels { + labelValues[i] = labels[k] + } + return labelValues +} diff --git a/vendor/github.com/prometheus/client_model/LICENSE b/vendor/github.com/prometheus/client_model/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/prometheus/client_model/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/prometheus/client_model/NOTICE b/vendor/github.com/prometheus/client_model/NOTICE new file mode 100644 index 000000000..20110e410 --- /dev/null +++ b/vendor/github.com/prometheus/client_model/NOTICE @@ -0,0 +1,5 @@ +Data model artifacts for Prometheus. +Copyright 2012-2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go new file mode 100644 index 000000000..9805432c2 --- /dev/null +++ b/vendor/github.com/prometheus/client_model/go/metrics.pb.go @@ -0,0 +1,629 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: metrics.proto + +package io_prometheus_client // import "github.com/prometheus/client_model/go" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type MetricType int32 + +const ( + MetricType_COUNTER MetricType = 0 + MetricType_GAUGE MetricType = 1 + MetricType_SUMMARY MetricType = 2 + MetricType_UNTYPED MetricType = 3 + MetricType_HISTOGRAM MetricType = 4 +) + +var MetricType_name = map[int32]string{ + 0: "COUNTER", + 1: "GAUGE", + 2: "SUMMARY", + 3: "UNTYPED", + 4: "HISTOGRAM", +} +var MetricType_value = map[string]int32{ + "COUNTER": 0, + "GAUGE": 1, + "SUMMARY": 2, + "UNTYPED": 3, + "HISTOGRAM": 4, +} + +func (x MetricType) Enum() *MetricType { + p := new(MetricType) + *p = x + return p +} +func (x MetricType) String() string { + return proto.EnumName(MetricType_name, int32(x)) +} +func (x *MetricType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MetricType_value, data, "MetricType") + if err != nil { + return err + } + *x = MetricType(value) + return nil +} +func (MetricType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{0} +} + +type LabelPair struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LabelPair) Reset() { *m = LabelPair{} } +func (m *LabelPair) String() string { return proto.CompactTextString(m) } +func (*LabelPair) ProtoMessage() {} +func (*LabelPair) Descriptor() ([]byte, []int) { + return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{0} +} +func (m *LabelPair) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LabelPair.Unmarshal(m, b) +} +func (m *LabelPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LabelPair.Marshal(b, m, deterministic) +} +func (dst *LabelPair) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelPair.Merge(dst, src) +} +func (m *LabelPair) XXX_Size() int { + return xxx_messageInfo_LabelPair.Size(m) +} +func (m *LabelPair) XXX_DiscardUnknown() { + xxx_messageInfo_LabelPair.DiscardUnknown(m) +} + +var xxx_messageInfo_LabelPair proto.InternalMessageInfo + +func (m *LabelPair) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *LabelPair) GetValue() string { + if m != nil && m.Value != nil { + return *m.Value + } + return "" +} + +type Gauge struct { + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Gauge) Reset() { *m = Gauge{} } +func (m *Gauge) String() string { return proto.CompactTextString(m) } +func (*Gauge) ProtoMessage() {} +func (*Gauge) Descriptor() ([]byte, []int) { + return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{1} +} +func (m *Gauge) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Gauge.Unmarshal(m, b) +} +func (m *Gauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Gauge.Marshal(b, m, deterministic) +} +func (dst *Gauge) XXX_Merge(src proto.Message) { + xxx_messageInfo_Gauge.Merge(dst, src) +} +func (m *Gauge) XXX_Size() int { + return xxx_messageInfo_Gauge.Size(m) +} +func (m *Gauge) XXX_DiscardUnknown() { + xxx_messageInfo_Gauge.DiscardUnknown(m) +} + +var xxx_messageInfo_Gauge proto.InternalMessageInfo + +func (m *Gauge) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type Counter struct { + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Counter) Reset() { *m = Counter{} } +func (m *Counter) String() string { return proto.CompactTextString(m) } +func (*Counter) ProtoMessage() {} +func (*Counter) Descriptor() ([]byte, []int) { + return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{2} +} +func (m *Counter) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Counter.Unmarshal(m, b) +} +func (m *Counter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Counter.Marshal(b, m, deterministic) +} +func (dst *Counter) XXX_Merge(src proto.Message) { + xxx_messageInfo_Counter.Merge(dst, src) +} +func (m *Counter) XXX_Size() int { + return xxx_messageInfo_Counter.Size(m) +} +func (m *Counter) XXX_DiscardUnknown() { + xxx_messageInfo_Counter.DiscardUnknown(m) +} + +var xxx_messageInfo_Counter proto.InternalMessageInfo + +func (m *Counter) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type Quantile struct { + Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"` + Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Quantile) Reset() { *m = Quantile{} } +func (m *Quantile) String() string { return proto.CompactTextString(m) } +func (*Quantile) ProtoMessage() {} +func (*Quantile) Descriptor() ([]byte, []int) { + return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{3} +} +func (m *Quantile) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Quantile.Unmarshal(m, b) +} +func (m *Quantile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Quantile.Marshal(b, m, deterministic) +} +func (dst *Quantile) XXX_Merge(src proto.Message) { + xxx_messageInfo_Quantile.Merge(dst, src) +} +func (m *Quantile) XXX_Size() int { + return xxx_messageInfo_Quantile.Size(m) +} +func (m *Quantile) XXX_DiscardUnknown() { + xxx_messageInfo_Quantile.DiscardUnknown(m) +} + +var xxx_messageInfo_Quantile proto.InternalMessageInfo + +func (m *Quantile) GetQuantile() float64 { + if m != nil && m.Quantile != nil { + return *m.Quantile + } + return 0 +} + +func (m *Quantile) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type Summary struct { + SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` + SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` + Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Summary) Reset() { *m = Summary{} } +func (m *Summary) String() string { return proto.CompactTextString(m) } +func (*Summary) ProtoMessage() {} +func (*Summary) Descriptor() ([]byte, []int) { + return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{4} +} +func (m *Summary) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Summary.Unmarshal(m, b) +} +func (m *Summary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Summary.Marshal(b, m, deterministic) +} +func (dst *Summary) XXX_Merge(src proto.Message) { + xxx_messageInfo_Summary.Merge(dst, src) +} +func (m *Summary) XXX_Size() int { + return xxx_messageInfo_Summary.Size(m) +} +func (m *Summary) XXX_DiscardUnknown() { + xxx_messageInfo_Summary.DiscardUnknown(m) +} + +var xxx_messageInfo_Summary proto.InternalMessageInfo + +func (m *Summary) GetSampleCount() uint64 { + if m != nil && m.SampleCount != nil { + return *m.SampleCount + } + return 0 +} + +func (m *Summary) GetSampleSum() float64 { + if m != nil && m.SampleSum != nil { + return *m.SampleSum + } + return 0 +} + +func (m *Summary) GetQuantile() []*Quantile { + if m != nil { + return m.Quantile + } + return nil +} + +type Untyped struct { + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Untyped) Reset() { *m = Untyped{} } +func (m *Untyped) String() string { return proto.CompactTextString(m) } +func (*Untyped) ProtoMessage() {} +func (*Untyped) Descriptor() ([]byte, []int) { + return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{5} +} +func (m *Untyped) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Untyped.Unmarshal(m, b) +} +func (m *Untyped) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Untyped.Marshal(b, m, deterministic) +} +func (dst *Untyped) XXX_Merge(src proto.Message) { + xxx_messageInfo_Untyped.Merge(dst, src) +} +func (m *Untyped) XXX_Size() int { + return xxx_messageInfo_Untyped.Size(m) +} +func (m *Untyped) XXX_DiscardUnknown() { + xxx_messageInfo_Untyped.DiscardUnknown(m) +} + +var xxx_messageInfo_Untyped proto.InternalMessageInfo + +func (m *Untyped) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type Histogram struct { + SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` + SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` + Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Histogram) Reset() { *m = Histogram{} } +func (m *Histogram) String() string { return proto.CompactTextString(m) } +func (*Histogram) ProtoMessage() {} +func (*Histogram) Descriptor() ([]byte, []int) { + return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{6} +} +func (m *Histogram) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Histogram.Unmarshal(m, b) +} +func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Histogram.Marshal(b, m, deterministic) +} +func (dst *Histogram) XXX_Merge(src proto.Message) { + xxx_messageInfo_Histogram.Merge(dst, src) +} +func (m *Histogram) XXX_Size() int { + return xxx_messageInfo_Histogram.Size(m) +} +func (m *Histogram) XXX_DiscardUnknown() { + xxx_messageInfo_Histogram.DiscardUnknown(m) +} + +var xxx_messageInfo_Histogram proto.InternalMessageInfo + +func (m *Histogram) GetSampleCount() uint64 { + if m != nil && m.SampleCount != nil { + return *m.SampleCount + } + return 0 +} + +func (m *Histogram) GetSampleSum() float64 { + if m != nil && m.SampleSum != nil { + return *m.SampleSum + } + return 0 +} + +func (m *Histogram) GetBucket() []*Bucket { + if m != nil { + return m.Bucket + } + return nil +} + +type Bucket struct { + CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"` + UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Bucket) Reset() { *m = Bucket{} } +func (m *Bucket) String() string { return proto.CompactTextString(m) } +func (*Bucket) ProtoMessage() {} +func (*Bucket) Descriptor() ([]byte, []int) { + return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{7} +} +func (m *Bucket) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Bucket.Unmarshal(m, b) +} +func (m *Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Bucket.Marshal(b, m, deterministic) +} +func (dst *Bucket) XXX_Merge(src proto.Message) { + xxx_messageInfo_Bucket.Merge(dst, src) +} +func (m *Bucket) XXX_Size() int { + return xxx_messageInfo_Bucket.Size(m) +} +func (m *Bucket) XXX_DiscardUnknown() { + xxx_messageInfo_Bucket.DiscardUnknown(m) +} + +var xxx_messageInfo_Bucket proto.InternalMessageInfo + +func (m *Bucket) GetCumulativeCount() uint64 { + if m != nil && m.CumulativeCount != nil { + return *m.CumulativeCount + } + return 0 +} + +func (m *Bucket) GetUpperBound() float64 { + if m != nil && m.UpperBound != nil { + return *m.UpperBound + } + return 0 +} + +type Metric struct { + Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` + Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"` + Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"` + Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"` + Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"` + Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"` + TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms,json=timestampMs" json:"timestamp_ms,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Metric) Reset() { *m = Metric{} } +func (m *Metric) String() string { return proto.CompactTextString(m) } +func (*Metric) ProtoMessage() {} +func (*Metric) Descriptor() ([]byte, []int) { + return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{8} +} +func (m *Metric) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Metric.Unmarshal(m, b) +} +func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Metric.Marshal(b, m, deterministic) +} +func (dst *Metric) XXX_Merge(src proto.Message) { + xxx_messageInfo_Metric.Merge(dst, src) +} +func (m *Metric) XXX_Size() int { + return xxx_messageInfo_Metric.Size(m) +} +func (m *Metric) XXX_DiscardUnknown() { + xxx_messageInfo_Metric.DiscardUnknown(m) +} + +var xxx_messageInfo_Metric proto.InternalMessageInfo + +func (m *Metric) GetLabel() []*LabelPair { + if m != nil { + return m.Label + } + return nil +} + +func (m *Metric) GetGauge() *Gauge { + if m != nil { + return m.Gauge + } + return nil +} + +func (m *Metric) GetCounter() *Counter { + if m != nil { + return m.Counter + } + return nil +} + +func (m *Metric) GetSummary() *Summary { + if m != nil { + return m.Summary + } + return nil +} + +func (m *Metric) GetUntyped() *Untyped { + if m != nil { + return m.Untyped + } + return nil +} + +func (m *Metric) GetHistogram() *Histogram { + if m != nil { + return m.Histogram + } + return nil +} + +func (m *Metric) GetTimestampMs() int64 { + if m != nil && m.TimestampMs != nil { + return *m.TimestampMs + } + return 0 +} + +type MetricFamily struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"` + Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"` + Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MetricFamily) Reset() { *m = MetricFamily{} } +func (m *MetricFamily) String() string { return proto.CompactTextString(m) } +func (*MetricFamily) ProtoMessage() {} +func (*MetricFamily) Descriptor() ([]byte, []int) { + return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{9} +} +func (m *MetricFamily) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MetricFamily.Unmarshal(m, b) +} +func (m *MetricFamily) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MetricFamily.Marshal(b, m, deterministic) +} +func (dst *MetricFamily) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricFamily.Merge(dst, src) +} +func (m *MetricFamily) XXX_Size() int { + return xxx_messageInfo_MetricFamily.Size(m) +} +func (m *MetricFamily) XXX_DiscardUnknown() { + xxx_messageInfo_MetricFamily.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricFamily proto.InternalMessageInfo + +func (m *MetricFamily) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MetricFamily) GetHelp() string { + if m != nil && m.Help != nil { + return *m.Help + } + return "" +} + +func (m *MetricFamily) GetType() MetricType { + if m != nil && m.Type != nil { + return *m.Type + } + return MetricType_COUNTER +} + +func (m *MetricFamily) GetMetric() []*Metric { + if m != nil { + return m.Metric + } + return nil +} + +func init() { + proto.RegisterType((*LabelPair)(nil), "io.prometheus.client.LabelPair") + proto.RegisterType((*Gauge)(nil), "io.prometheus.client.Gauge") + proto.RegisterType((*Counter)(nil), "io.prometheus.client.Counter") + proto.RegisterType((*Quantile)(nil), "io.prometheus.client.Quantile") + proto.RegisterType((*Summary)(nil), "io.prometheus.client.Summary") + proto.RegisterType((*Untyped)(nil), "io.prometheus.client.Untyped") + proto.RegisterType((*Histogram)(nil), "io.prometheus.client.Histogram") + proto.RegisterType((*Bucket)(nil), "io.prometheus.client.Bucket") + proto.RegisterType((*Metric)(nil), "io.prometheus.client.Metric") + proto.RegisterType((*MetricFamily)(nil), "io.prometheus.client.MetricFamily") + proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value) +} + +func init() { proto.RegisterFile("metrics.proto", fileDescriptor_metrics_c97c9a2b9560cb8f) } + +var fileDescriptor_metrics_c97c9a2b9560cb8f = []byte{ + // 591 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0x4f, 0x4f, 0xdb, 0x4e, + 0x14, 0xfc, 0x99, 0xd8, 0x09, 0x7e, 0x86, 0x5f, 0xad, 0x15, 0x07, 0xab, 0x2d, 0x25, 0xcd, 0x89, + 0xf6, 0x10, 0x54, 0x04, 0xaa, 0x44, 0xdb, 0x03, 0x50, 0x1a, 0x2a, 0xd5, 0x40, 0x37, 0xc9, 0x81, + 0x5e, 0xac, 0x8d, 0x59, 0x25, 0x56, 0xbd, 0xb6, 0x6b, 0xef, 0x22, 0xe5, 0xdc, 0x43, 0xbf, 0x47, + 0xbf, 0x68, 0xab, 0xfd, 0xe3, 0x18, 0x24, 0xc3, 0xa9, 0xb7, 0xb7, 0xf3, 0x66, 0xde, 0x8e, 0x77, + 0xc7, 0x0b, 0x9b, 0x8c, 0xf2, 0x32, 0x89, 0xab, 0x61, 0x51, 0xe6, 0x3c, 0x47, 0x5b, 0x49, 0x2e, + 0x2b, 0x46, 0xf9, 0x82, 0x8a, 0x6a, 0x18, 0xa7, 0x09, 0xcd, 0xf8, 0xe0, 0x10, 0xdc, 0x2f, 0x64, + 0x46, 0xd3, 0x2b, 0x92, 0x94, 0x08, 0x81, 0x9d, 0x11, 0x46, 0x03, 0xab, 0x6f, 0xed, 0xba, 0x58, + 0xd5, 0x68, 0x0b, 0x9c, 0x5b, 0x92, 0x0a, 0x1a, 0xac, 0x29, 0x50, 0x2f, 0x06, 0xdb, 0xe0, 0x8c, + 0x88, 0x98, 0xdf, 0x69, 0x4b, 0x8d, 0x55, 0xb7, 0x77, 0xa0, 0x77, 0x9a, 0x8b, 0x8c, 0xd3, 0xf2, + 0x01, 0xc2, 0x7b, 0x58, 0xff, 0x2a, 0x48, 0xc6, 0x93, 0x94, 0xa2, 0xa7, 0xb0, 0xfe, 0xc3, 0xd4, + 0x86, 0xb4, 0x5a, 0xdf, 0xdf, 0x7d, 0xa5, 0xfe, 0x65, 0x41, 0x6f, 0x2c, 0x18, 0x23, 0xe5, 0x12, + 0xbd, 0x84, 0x8d, 0x8a, 0xb0, 0x22, 0xa5, 0x51, 0x2c, 0x77, 0x54, 0x13, 0x6c, 0xec, 0x69, 0x4c, + 0x99, 0x40, 0xdb, 0x00, 0x86, 0x52, 0x09, 0x66, 0x26, 0xb9, 0x1a, 0x19, 0x0b, 0x86, 0x8e, 0xee, + 0xec, 0xdf, 0xe9, 0x77, 0x76, 0xbd, 0xfd, 0x17, 0xc3, 0xb6, 0xb3, 0x1a, 0xd6, 0x8e, 0x1b, 0x7f, + 0xf2, 0x43, 0xa7, 0x19, 0x5f, 0x16, 0xf4, 0xe6, 0x81, 0x0f, 0xfd, 0x69, 0x81, 0x7b, 0x9e, 0x54, + 0x3c, 0x9f, 0x97, 0x84, 0xfd, 0x03, 0xb3, 0x07, 0xd0, 0x9d, 0x89, 0xf8, 0x3b, 0xe5, 0xc6, 0xea, + 0xf3, 0x76, 0xab, 0x27, 0x8a, 0x83, 0x0d, 0x77, 0x30, 0x81, 0xae, 0x46, 0xd0, 0x2b, 0xf0, 0x63, + 0xc1, 0x44, 0x4a, 0x78, 0x72, 0x7b, 0xdf, 0xc5, 0x93, 0x06, 0xd7, 0x4e, 0x76, 0xc0, 0x13, 0x45, + 0x41, 0xcb, 0x68, 0x96, 0x8b, 0xec, 0xc6, 0x58, 0x01, 0x05, 0x9d, 0x48, 0x64, 0xf0, 0x67, 0x0d, + 0xba, 0xa1, 0xca, 0x18, 0x3a, 0x04, 0x27, 0x95, 0x31, 0x0a, 0x2c, 0xe5, 0x6a, 0xa7, 0xdd, 0xd5, + 0x2a, 0x69, 0x58, 0xb3, 0xd1, 0x1b, 0x70, 0xe6, 0x32, 0x46, 0x6a, 0xb8, 0xb7, 0xff, 0xac, 0x5d, + 0xa6, 0x92, 0x86, 0x35, 0x13, 0xbd, 0x85, 0x5e, 0xac, 0xa3, 0x15, 0x74, 0x94, 0x68, 0xbb, 0x5d, + 0x64, 0xf2, 0x87, 0x6b, 0xb6, 0x14, 0x56, 0x3a, 0x33, 0x81, 0xfd, 0x98, 0xd0, 0x04, 0x0b, 0xd7, + 0x6c, 0x29, 0x14, 0xfa, 0x8e, 0x03, 0xe7, 0x31, 0xa1, 0x09, 0x02, 0xae, 0xd9, 0xe8, 0x03, 0xb8, + 0x8b, 0xfa, 0xea, 0x83, 0x9e, 0x92, 0x3e, 0x70, 0x30, 0xab, 0x84, 0xe0, 0x46, 0x21, 0xc3, 0xc2, + 0x13, 0x46, 0x2b, 0x4e, 0x58, 0x11, 0xb1, 0x2a, 0xe8, 0xf6, 0xad, 0xdd, 0x0e, 0xf6, 0x56, 0x58, + 0x58, 0x0d, 0x7e, 0x5b, 0xb0, 0xa1, 0x6f, 0xe0, 0x13, 0x61, 0x49, 0xba, 0x6c, 0xfd, 0x83, 0x11, + 0xd8, 0x0b, 0x9a, 0x16, 0xe6, 0x07, 0x56, 0x35, 0x3a, 0x00, 0x5b, 0x7a, 0x54, 0x47, 0xf8, 0xff, + 0x7e, 0xbf, 0xdd, 0x95, 0x9e, 0x3c, 0x59, 0x16, 0x14, 0x2b, 0xb6, 0x0c, 0x9f, 0x7e, 0x53, 0x02, + 0xfb, 0xb1, 0xf0, 0x69, 0x1d, 0x36, 0xdc, 0xd7, 0x21, 0x40, 0x33, 0x09, 0x79, 0xd0, 0x3b, 0xbd, + 0x9c, 0x5e, 0x4c, 0xce, 0xb0, 0xff, 0x1f, 0x72, 0xc1, 0x19, 0x1d, 0x4f, 0x47, 0x67, 0xbe, 0x25, + 0xf1, 0xf1, 0x34, 0x0c, 0x8f, 0xf1, 0xb5, 0xbf, 0x26, 0x17, 0xd3, 0x8b, 0xc9, 0xf5, 0xd5, 0xd9, + 0x47, 0xbf, 0x83, 0x36, 0xc1, 0x3d, 0xff, 0x3c, 0x9e, 0x5c, 0x8e, 0xf0, 0x71, 0xe8, 0xdb, 0x27, + 0x18, 0x5a, 0x5f, 0xb2, 0x6f, 0x47, 0xf3, 0x84, 0x2f, 0xc4, 0x6c, 0x18, 0xe7, 0x6c, 0xaf, 0xe9, + 0xee, 0xe9, 0x6e, 0xc4, 0xf2, 0x1b, 0x9a, 0xee, 0xcd, 0xf3, 0x77, 0x49, 0x1e, 0x35, 0xdd, 0x48, + 0x77, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x45, 0x21, 0x7f, 0x64, 0x2b, 0x05, 0x00, 0x00, +} diff --git a/vendor/github.com/prometheus/client_model/ruby/LICENSE b/vendor/github.com/prometheus/client_model/ruby/LICENSE new file mode 100644 index 000000000..11069edd7 --- /dev/null +++ b/vendor/github.com/prometheus/client_model/ruby/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/prometheus/common/LICENSE b/vendor/github.com/prometheus/common/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/prometheus/common/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/prometheus/common/NOTICE b/vendor/github.com/prometheus/common/NOTICE new file mode 100644 index 000000000..636a2c1a5 --- /dev/null +++ b/vendor/github.com/prometheus/common/NOTICE @@ -0,0 +1,5 @@ +Common libraries shared by Prometheus Go components. +Copyright 2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go new file mode 100644 index 000000000..c092723e8 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/decode.go @@ -0,0 +1,429 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "fmt" + "io" + "math" + "mime" + "net/http" + + dto "github.com/prometheus/client_model/go" + + "github.com/matttproud/golang_protobuf_extensions/pbutil" + "github.com/prometheus/common/model" +) + +// Decoder types decode an input stream into metric families. +type Decoder interface { + Decode(*dto.MetricFamily) error +} + +// DecodeOptions contains options used by the Decoder and in sample extraction. +type DecodeOptions struct { + // Timestamp is added to each value from the stream that has no explicit timestamp set. + Timestamp model.Time +} + +// ResponseFormat extracts the correct format from a HTTP response header. +// If no matching format can be found FormatUnknown is returned. +func ResponseFormat(h http.Header) Format { + ct := h.Get(hdrContentType) + + mediatype, params, err := mime.ParseMediaType(ct) + if err != nil { + return FmtUnknown + } + + const textType = "text/plain" + + switch mediatype { + case ProtoType: + if p, ok := params["proto"]; ok && p != ProtoProtocol { + return FmtUnknown + } + if e, ok := params["encoding"]; ok && e != "delimited" { + return FmtUnknown + } + return FmtProtoDelim + + case textType: + if v, ok := params["version"]; ok && v != TextVersion { + return FmtUnknown + } + return FmtText + } + + return FmtUnknown +} + +// NewDecoder returns a new decoder based on the given input format. +// If the input format does not imply otherwise, a text format decoder is returned. +func NewDecoder(r io.Reader, format Format) Decoder { + switch format { + case FmtProtoDelim: + return &protoDecoder{r: r} + } + return &textDecoder{r: r} +} + +// protoDecoder implements the Decoder interface for protocol buffers. +type protoDecoder struct { + r io.Reader +} + +// Decode implements the Decoder interface. +func (d *protoDecoder) Decode(v *dto.MetricFamily) error { + _, err := pbutil.ReadDelimited(d.r, v) + if err != nil { + return err + } + if !model.IsValidMetricName(model.LabelValue(v.GetName())) { + return fmt.Errorf("invalid metric name %q", v.GetName()) + } + for _, m := range v.GetMetric() { + if m == nil { + continue + } + for _, l := range m.GetLabel() { + if l == nil { + continue + } + if !model.LabelValue(l.GetValue()).IsValid() { + return fmt.Errorf("invalid label value %q", l.GetValue()) + } + if !model.LabelName(l.GetName()).IsValid() { + return fmt.Errorf("invalid label name %q", l.GetName()) + } + } + } + return nil +} + +// textDecoder implements the Decoder interface for the text protocol. +type textDecoder struct { + r io.Reader + p TextParser + fams []*dto.MetricFamily +} + +// Decode implements the Decoder interface. +func (d *textDecoder) Decode(v *dto.MetricFamily) error { + // TODO(fabxc): Wrap this as a line reader to make streaming safer. + if len(d.fams) == 0 { + // No cached metric families, read everything and parse metrics. + fams, err := d.p.TextToMetricFamilies(d.r) + if err != nil { + return err + } + if len(fams) == 0 { + return io.EOF + } + d.fams = make([]*dto.MetricFamily, 0, len(fams)) + for _, f := range fams { + d.fams = append(d.fams, f) + } + } + + *v = *d.fams[0] + d.fams = d.fams[1:] + + return nil +} + +// SampleDecoder wraps a Decoder to extract samples from the metric families +// decoded by the wrapped Decoder. +type SampleDecoder struct { + Dec Decoder + Opts *DecodeOptions + + f dto.MetricFamily +} + +// Decode calls the Decode method of the wrapped Decoder and then extracts the +// samples from the decoded MetricFamily into the provided model.Vector. +func (sd *SampleDecoder) Decode(s *model.Vector) error { + err := sd.Dec.Decode(&sd.f) + if err != nil { + return err + } + *s, err = extractSamples(&sd.f, sd.Opts) + return err +} + +// ExtractSamples builds a slice of samples from the provided metric +// families. If an error occurrs during sample extraction, it continues to +// extract from the remaining metric families. The returned error is the last +// error that has occurred. +func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) (model.Vector, error) { + var ( + all model.Vector + lastErr error + ) + for _, f := range fams { + some, err := extractSamples(f, o) + if err != nil { + lastErr = err + continue + } + all = append(all, some...) + } + return all, lastErr +} + +func extractSamples(f *dto.MetricFamily, o *DecodeOptions) (model.Vector, error) { + switch f.GetType() { + case dto.MetricType_COUNTER: + return extractCounter(o, f), nil + case dto.MetricType_GAUGE: + return extractGauge(o, f), nil + case dto.MetricType_SUMMARY: + return extractSummary(o, f), nil + case dto.MetricType_UNTYPED: + return extractUntyped(o, f), nil + case dto.MetricType_HISTOGRAM: + return extractHistogram(o, f), nil + } + return nil, fmt.Errorf("expfmt.extractSamples: unknown metric family type %v", f.GetType()) +} + +func extractCounter(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Counter == nil { + continue + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) + + smpl := &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Counter.GetValue()), + } + + if m.TimestampMs != nil { + smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } else { + smpl.Timestamp = o.Timestamp + } + + samples = append(samples, smpl) + } + + return samples +} + +func extractGauge(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Gauge == nil { + continue + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) + + smpl := &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Gauge.GetValue()), + } + + if m.TimestampMs != nil { + smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } else { + smpl.Timestamp = o.Timestamp + } + + samples = append(samples, smpl) + } + + return samples +} + +func extractUntyped(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Untyped == nil { + continue + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) + + smpl := &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Untyped.GetValue()), + } + + if m.TimestampMs != nil { + smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } else { + smpl.Timestamp = o.Timestamp + } + + samples = append(samples, smpl) + } + + return samples +} + +func extractSummary(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Summary == nil { + continue + } + + timestamp := o.Timestamp + if m.TimestampMs != nil { + timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } + + for _, q := range m.Summary.Quantile { + lset := make(model.LabelSet, len(m.Label)+2) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + // BUG(matt): Update other names to "quantile". + lset[model.LabelName(model.QuantileLabel)] = model.LabelValue(fmt.Sprint(q.GetQuantile())) + lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(q.GetValue()), + Timestamp: timestamp, + }) + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum") + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Summary.GetSampleSum()), + Timestamp: timestamp, + }) + + lset = make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count") + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Summary.GetSampleCount()), + Timestamp: timestamp, + }) + } + + return samples +} + +func extractHistogram(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Histogram == nil { + continue + } + + timestamp := o.Timestamp + if m.TimestampMs != nil { + timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } + + infSeen := false + + for _, q := range m.Histogram.Bucket { + lset := make(model.LabelSet, len(m.Label)+2) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.LabelName(model.BucketLabel)] = model.LabelValue(fmt.Sprint(q.GetUpperBound())) + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket") + + if math.IsInf(q.GetUpperBound(), +1) { + infSeen = true + } + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(q.GetCumulativeCount()), + Timestamp: timestamp, + }) + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum") + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Histogram.GetSampleSum()), + Timestamp: timestamp, + }) + + lset = make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count") + + count := &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Histogram.GetSampleCount()), + Timestamp: timestamp, + } + samples = append(samples, count) + + if !infSeen { + // Append an infinity bucket sample. + lset := make(model.LabelSet, len(m.Label)+2) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.LabelName(model.BucketLabel)] = model.LabelValue("+Inf") + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket") + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: count.Value, + Timestamp: timestamp, + }) + } + } + + return samples +} diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go new file mode 100644 index 000000000..11839ed65 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/encode.go @@ -0,0 +1,88 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "fmt" + "io" + "net/http" + + "github.com/golang/protobuf/proto" + "github.com/matttproud/golang_protobuf_extensions/pbutil" + "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg" + + dto "github.com/prometheus/client_model/go" +) + +// Encoder types encode metric families into an underlying wire protocol. +type Encoder interface { + Encode(*dto.MetricFamily) error +} + +type encoder func(*dto.MetricFamily) error + +func (e encoder) Encode(v *dto.MetricFamily) error { + return e(v) +} + +// Negotiate returns the Content-Type based on the given Accept header. +// If no appropriate accepted type is found, FmtText is returned. +func Negotiate(h http.Header) Format { + for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) { + // Check for protocol buffer + if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { + switch ac.Params["encoding"] { + case "delimited": + return FmtProtoDelim + case "text": + return FmtProtoText + case "compact-text": + return FmtProtoCompact + } + } + // Check for text format. + ver := ac.Params["version"] + if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { + return FmtText + } + } + return FmtText +} + +// NewEncoder returns a new encoder based on content type negotiation. +func NewEncoder(w io.Writer, format Format) Encoder { + switch format { + case FmtProtoDelim: + return encoder(func(v *dto.MetricFamily) error { + _, err := pbutil.WriteDelimited(w, v) + return err + }) + case FmtProtoCompact: + return encoder(func(v *dto.MetricFamily) error { + _, err := fmt.Fprintln(w, v.String()) + return err + }) + case FmtProtoText: + return encoder(func(v *dto.MetricFamily) error { + _, err := fmt.Fprintln(w, proto.MarshalTextString(v)) + return err + }) + case FmtText: + return encoder(func(v *dto.MetricFamily) error { + _, err := MetricFamilyToText(w, v) + return err + }) + } + panic("expfmt.NewEncoder: unknown format") +} diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go new file mode 100644 index 000000000..c71bcb981 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go @@ -0,0 +1,38 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package expfmt contains tools for reading and writing Prometheus metrics. +package expfmt + +// Format specifies the HTTP content type of the different wire protocols. +type Format string + +// Constants to assemble the Content-Type values for the different wire protocols. +const ( + TextVersion = "0.0.4" + ProtoType = `application/vnd.google.protobuf` + ProtoProtocol = `io.prometheus.client.MetricFamily` + ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" + + // The Content-Type values for the different wire protocols. + FmtUnknown Format = `` + FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8` + FmtProtoDelim Format = ProtoFmt + ` encoding=delimited` + FmtProtoText Format = ProtoFmt + ` encoding=text` + FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` +) + +const ( + hdrContentType = "Content-Type" + hdrAccept = "Accept" +) diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz.go b/vendor/github.com/prometheus/common/expfmt/fuzz.go new file mode 100644 index 000000000..dc2eedeef --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/fuzz.go @@ -0,0 +1,36 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Build only when actually fuzzing +// +build gofuzz + +package expfmt + +import "bytes" + +// Fuzz text metric parser with with github.com/dvyukov/go-fuzz: +// +// go-fuzz-build github.com/prometheus/common/expfmt +// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz +// +// Further input samples should go in the folder fuzz/corpus. +func Fuzz(in []byte) int { + parser := TextParser{} + _, err := parser.TextToMetricFamilies(bytes.NewReader(in)) + + if err != nil { + return 0 + } + + return 1 +} diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go new file mode 100644 index 000000000..f11321cd0 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/text_create.go @@ -0,0 +1,303 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "fmt" + "io" + "math" + "strings" + + dto "github.com/prometheus/client_model/go" + "github.com/prometheus/common/model" +) + +// MetricFamilyToText converts a MetricFamily proto message into text format and +// writes the resulting lines to 'out'. It returns the number of bytes written +// and any error encountered. The output will have the same order as the input, +// no further sorting is performed. Furthermore, this function assumes the input +// is already sanitized and does not perform any sanity checks. If the input +// contains duplicate metrics or invalid metric or label names, the conversion +// will result in invalid text format output. +// +// This method fulfills the type 'prometheus.encoder'. +func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) { + var written int + + // Fail-fast checks. + if len(in.Metric) == 0 { + return written, fmt.Errorf("MetricFamily has no metrics: %s", in) + } + name := in.GetName() + if name == "" { + return written, fmt.Errorf("MetricFamily has no name: %s", in) + } + + // Comments, first HELP, then TYPE. + if in.Help != nil { + n, err := fmt.Fprintf( + out, "# HELP %s %s\n", + name, escapeString(*in.Help, false), + ) + written += n + if err != nil { + return written, err + } + } + metricType := in.GetType() + n, err := fmt.Fprintf( + out, "# TYPE %s %s\n", + name, strings.ToLower(metricType.String()), + ) + written += n + if err != nil { + return written, err + } + + // Finally the samples, one line for each. + for _, metric := range in.Metric { + switch metricType { + case dto.MetricType_COUNTER: + if metric.Counter == nil { + return written, fmt.Errorf( + "expected counter in metric %s %s", name, metric, + ) + } + n, err = writeSample( + name, metric, "", "", + metric.Counter.GetValue(), + out, + ) + case dto.MetricType_GAUGE: + if metric.Gauge == nil { + return written, fmt.Errorf( + "expected gauge in metric %s %s", name, metric, + ) + } + n, err = writeSample( + name, metric, "", "", + metric.Gauge.GetValue(), + out, + ) + case dto.MetricType_UNTYPED: + if metric.Untyped == nil { + return written, fmt.Errorf( + "expected untyped in metric %s %s", name, metric, + ) + } + n, err = writeSample( + name, metric, "", "", + metric.Untyped.GetValue(), + out, + ) + case dto.MetricType_SUMMARY: + if metric.Summary == nil { + return written, fmt.Errorf( + "expected summary in metric %s %s", name, metric, + ) + } + for _, q := range metric.Summary.Quantile { + n, err = writeSample( + name, metric, + model.QuantileLabel, fmt.Sprint(q.GetQuantile()), + q.GetValue(), + out, + ) + written += n + if err != nil { + return written, err + } + } + n, err = writeSample( + name+"_sum", metric, "", "", + metric.Summary.GetSampleSum(), + out, + ) + if err != nil { + return written, err + } + written += n + n, err = writeSample( + name+"_count", metric, "", "", + float64(metric.Summary.GetSampleCount()), + out, + ) + case dto.MetricType_HISTOGRAM: + if metric.Histogram == nil { + return written, fmt.Errorf( + "expected histogram in metric %s %s", name, metric, + ) + } + infSeen := false + for _, q := range metric.Histogram.Bucket { + n, err = writeSample( + name+"_bucket", metric, + model.BucketLabel, fmt.Sprint(q.GetUpperBound()), + float64(q.GetCumulativeCount()), + out, + ) + written += n + if err != nil { + return written, err + } + if math.IsInf(q.GetUpperBound(), +1) { + infSeen = true + } + } + if !infSeen { + n, err = writeSample( + name+"_bucket", metric, + model.BucketLabel, "+Inf", + float64(metric.Histogram.GetSampleCount()), + out, + ) + if err != nil { + return written, err + } + written += n + } + n, err = writeSample( + name+"_sum", metric, "", "", + metric.Histogram.GetSampleSum(), + out, + ) + if err != nil { + return written, err + } + written += n + n, err = writeSample( + name+"_count", metric, "", "", + float64(metric.Histogram.GetSampleCount()), + out, + ) + default: + return written, fmt.Errorf( + "unexpected type in metric %s %s", name, metric, + ) + } + written += n + if err != nil { + return written, err + } + } + return written, nil +} + +// writeSample writes a single sample in text format to out, given the metric +// name, the metric proto message itself, optionally an additional label name +// and value (use empty strings if not required), and the value. The function +// returns the number of bytes written and any error encountered. +func writeSample( + name string, + metric *dto.Metric, + additionalLabelName, additionalLabelValue string, + value float64, + out io.Writer, +) (int, error) { + var written int + n, err := fmt.Fprint(out, name) + written += n + if err != nil { + return written, err + } + n, err = labelPairsToText( + metric.Label, + additionalLabelName, additionalLabelValue, + out, + ) + written += n + if err != nil { + return written, err + } + n, err = fmt.Fprintf(out, " %v", value) + written += n + if err != nil { + return written, err + } + if metric.TimestampMs != nil { + n, err = fmt.Fprintf(out, " %v", *metric.TimestampMs) + written += n + if err != nil { + return written, err + } + } + n, err = out.Write([]byte{'\n'}) + written += n + if err != nil { + return written, err + } + return written, nil +} + +// labelPairsToText converts a slice of LabelPair proto messages plus the +// explicitly given additional label pair into text formatted as required by the +// text format and writes it to 'out'. An empty slice in combination with an +// empty string 'additionalLabelName' results in nothing being +// written. Otherwise, the label pairs are written, escaped as required by the +// text format, and enclosed in '{...}'. The function returns the number of +// bytes written and any error encountered. +func labelPairsToText( + in []*dto.LabelPair, + additionalLabelName, additionalLabelValue string, + out io.Writer, +) (int, error) { + if len(in) == 0 && additionalLabelName == "" { + return 0, nil + } + var written int + separator := '{' + for _, lp := range in { + n, err := fmt.Fprintf( + out, `%c%s="%s"`, + separator, lp.GetName(), escapeString(lp.GetValue(), true), + ) + written += n + if err != nil { + return written, err + } + separator = ',' + } + if additionalLabelName != "" { + n, err := fmt.Fprintf( + out, `%c%s="%s"`, + separator, additionalLabelName, + escapeString(additionalLabelValue, true), + ) + written += n + if err != nil { + return written, err + } + } + n, err := out.Write([]byte{'}'}) + written += n + if err != nil { + return written, err + } + return written, nil +} + +var ( + escape = strings.NewReplacer("\\", `\\`, "\n", `\n`) + escapeWithDoubleQuote = strings.NewReplacer("\\", `\\`, "\n", `\n`, "\"", `\"`) +) + +// escapeString replaces '\' by '\\', new line character by '\n', and - if +// includeDoubleQuote is true - '"' by '\"'. +func escapeString(v string, includeDoubleQuote bool) string { + if includeDoubleQuote { + return escapeWithDoubleQuote.Replace(v) + } + + return escape.Replace(v) +} diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go new file mode 100644 index 000000000..ec3d86ba7 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -0,0 +1,757 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "bufio" + "bytes" + "fmt" + "io" + "math" + "strconv" + "strings" + + dto "github.com/prometheus/client_model/go" + + "github.com/golang/protobuf/proto" + "github.com/prometheus/common/model" +) + +// A stateFn is a function that represents a state in a state machine. By +// executing it, the state is progressed to the next state. The stateFn returns +// another stateFn, which represents the new state. The end state is represented +// by nil. +type stateFn func() stateFn + +// ParseError signals errors while parsing the simple and flat text-based +// exchange format. +type ParseError struct { + Line int + Msg string +} + +// Error implements the error interface. +func (e ParseError) Error() string { + return fmt.Sprintf("text format parsing error in line %d: %s", e.Line, e.Msg) +} + +// TextParser is used to parse the simple and flat text-based exchange format. Its +// zero value is ready to use. +type TextParser struct { + metricFamiliesByName map[string]*dto.MetricFamily + buf *bufio.Reader // Where the parsed input is read through. + err error // Most recent error. + lineCount int // Tracks the line count for error messages. + currentByte byte // The most recent byte read. + currentToken bytes.Buffer // Re-used each time a token has to be gathered from multiple bytes. + currentMF *dto.MetricFamily + currentMetric *dto.Metric + currentLabelPair *dto.LabelPair + + // The remaining member variables are only used for summaries/histograms. + currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le' + // Summary specific. + summaries map[uint64]*dto.Metric // Key is created with LabelsToSignature. + currentQuantile float64 + // Histogram specific. + histograms map[uint64]*dto.Metric // Key is created with LabelsToSignature. + currentBucket float64 + // These tell us if the currently processed line ends on '_count' or + // '_sum' respectively and belong to a summary/histogram, representing the sample + // count and sum of that summary/histogram. + currentIsSummaryCount, currentIsSummarySum bool + currentIsHistogramCount, currentIsHistogramSum bool +} + +// TextToMetricFamilies reads 'in' as the simple and flat text-based exchange +// format and creates MetricFamily proto messages. It returns the MetricFamily +// proto messages in a map where the metric names are the keys, along with any +// error encountered. +// +// If the input contains duplicate metrics (i.e. lines with the same metric name +// and exactly the same label set), the resulting MetricFamily will contain +// duplicate Metric proto messages. Similar is true for duplicate label +// names. Checks for duplicates have to be performed separately, if required. +// Also note that neither the metrics within each MetricFamily are sorted nor +// the label pairs within each Metric. Sorting is not required for the most +// frequent use of this method, which is sample ingestion in the Prometheus +// server. However, for presentation purposes, you might want to sort the +// metrics, and in some cases, you must sort the labels, e.g. for consumption by +// the metric family injection hook of the Prometheus registry. +// +// Summaries and histograms are rather special beasts. You would probably not +// use them in the simple text format anyway. This method can deal with +// summaries and histograms if they are presented in exactly the way the +// text.Create function creates them. +// +// This method must not be called concurrently. If you want to parse different +// input concurrently, instantiate a separate Parser for each goroutine. +func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricFamily, error) { + p.reset(in) + for nextState := p.startOfLine; nextState != nil; nextState = nextState() { + // Magic happens here... + } + // Get rid of empty metric families. + for k, mf := range p.metricFamiliesByName { + if len(mf.GetMetric()) == 0 { + delete(p.metricFamiliesByName, k) + } + } + // If p.err is io.EOF now, we have run into a premature end of the input + // stream. Turn this error into something nicer and more + // meaningful. (io.EOF is often used as a signal for the legitimate end + // of an input stream.) + if p.err == io.EOF { + p.parseError("unexpected end of input stream") + } + return p.metricFamiliesByName, p.err +} + +func (p *TextParser) reset(in io.Reader) { + p.metricFamiliesByName = map[string]*dto.MetricFamily{} + if p.buf == nil { + p.buf = bufio.NewReader(in) + } else { + p.buf.Reset(in) + } + p.err = nil + p.lineCount = 0 + if p.summaries == nil || len(p.summaries) > 0 { + p.summaries = map[uint64]*dto.Metric{} + } + if p.histograms == nil || len(p.histograms) > 0 { + p.histograms = map[uint64]*dto.Metric{} + } + p.currentQuantile = math.NaN() + p.currentBucket = math.NaN() +} + +// startOfLine represents the state where the next byte read from p.buf is the +// start of a line (or whitespace leading up to it). +func (p *TextParser) startOfLine() stateFn { + p.lineCount++ + if p.skipBlankTab(); p.err != nil { + // End of input reached. This is the only case where + // that is not an error but a signal that we are done. + p.err = nil + return nil + } + switch p.currentByte { + case '#': + return p.startComment + case '\n': + return p.startOfLine // Empty line, start the next one. + } + return p.readingMetricName +} + +// startComment represents the state where the next byte read from p.buf is the +// start of a comment (or whitespace leading up to it). +func (p *TextParser) startComment() stateFn { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte == '\n' { + return p.startOfLine + } + if p.readTokenUntilWhitespace(); p.err != nil { + return nil // Unexpected end of input. + } + // If we have hit the end of line already, there is nothing left + // to do. This is not considered a syntax error. + if p.currentByte == '\n' { + return p.startOfLine + } + keyword := p.currentToken.String() + if keyword != "HELP" && keyword != "TYPE" { + // Generic comment, ignore by fast forwarding to end of line. + for p.currentByte != '\n' { + if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil { + return nil // Unexpected end of input. + } + } + return p.startOfLine + } + // There is something. Next has to be a metric name. + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.readTokenAsMetricName(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte == '\n' { + // At the end of the line already. + // Again, this is not considered a syntax error. + return p.startOfLine + } + if !isBlankOrTab(p.currentByte) { + p.parseError("invalid metric name in comment") + return nil + } + p.setOrCreateCurrentMF() + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte == '\n' { + // At the end of the line already. + // Again, this is not considered a syntax error. + return p.startOfLine + } + switch keyword { + case "HELP": + return p.readingHelp + case "TYPE": + return p.readingType + } + panic(fmt.Sprintf("code error: unexpected keyword %q", keyword)) +} + +// readingMetricName represents the state where the last byte read (now in +// p.currentByte) is the first byte of a metric name. +func (p *TextParser) readingMetricName() stateFn { + if p.readTokenAsMetricName(); p.err != nil { + return nil + } + if p.currentToken.Len() == 0 { + p.parseError("invalid metric name") + return nil + } + p.setOrCreateCurrentMF() + // Now is the time to fix the type if it hasn't happened yet. + if p.currentMF.Type == nil { + p.currentMF.Type = dto.MetricType_UNTYPED.Enum() + } + p.currentMetric = &dto.Metric{} + // Do not append the newly created currentMetric to + // currentMF.Metric right now. First wait if this is a summary, + // and the metric exists already, which we can only know after + // having read all the labels. + if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + return p.readingLabels +} + +// readingLabels represents the state where the last byte read (now in +// p.currentByte) is either the first byte of the label set (i.e. a '{'), or the +// first byte of the value (otherwise). +func (p *TextParser) readingLabels() stateFn { + // Summaries/histograms are special. We have to reset the + // currentLabels map, currentQuantile and currentBucket before starting to + // read labels. + if p.currentMF.GetType() == dto.MetricType_SUMMARY || p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + p.currentLabels = map[string]string{} + p.currentLabels[string(model.MetricNameLabel)] = p.currentMF.GetName() + p.currentQuantile = math.NaN() + p.currentBucket = math.NaN() + } + if p.currentByte != '{' { + return p.readingValue + } + return p.startLabelName +} + +// startLabelName represents the state where the next byte read from p.buf is +// the start of a label name (or whitespace leading up to it). +func (p *TextParser) startLabelName() stateFn { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte == '}' { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + return p.readingValue + } + if p.readTokenAsLabelName(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentToken.Len() == 0 { + p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName())) + return nil + } + p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())} + if p.currentLabelPair.GetName() == string(model.MetricNameLabel) { + p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel)) + return nil + } + // Special summary/histogram treatment. Don't add 'quantile' and 'le' + // labels to 'real' labels. + if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) && + !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) { + p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair) + } + if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte != '=' { + p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte)) + return nil + } + return p.startLabelValue +} + +// startLabelValue represents the state where the next byte read from p.buf is +// the start of a (quoted) label value (or whitespace leading up to it). +func (p *TextParser) startLabelValue() stateFn { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte != '"' { + p.parseError(fmt.Sprintf("expected '\"' at start of label value, found %q", p.currentByte)) + return nil + } + if p.readTokenAsLabelValue(); p.err != nil { + return nil + } + if !model.LabelValue(p.currentToken.String()).IsValid() { + p.parseError(fmt.Sprintf("invalid label value %q", p.currentToken.String())) + return nil + } + p.currentLabelPair.Value = proto.String(p.currentToken.String()) + // Special treatment of summaries: + // - Quantile labels are special, will result in dto.Quantile later. + // - Other labels have to be added to currentLabels for signature calculation. + if p.currentMF.GetType() == dto.MetricType_SUMMARY { + if p.currentLabelPair.GetName() == model.QuantileLabel { + if p.currentQuantile, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil { + // Create a more helpful error message. + p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue())) + return nil + } + } else { + p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue() + } + } + // Similar special treatment of histograms. + if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + if p.currentLabelPair.GetName() == model.BucketLabel { + if p.currentBucket, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil { + // Create a more helpful error message. + p.parseError(fmt.Sprintf("expected float as value for 'le' label, got %q", p.currentLabelPair.GetValue())) + return nil + } + } else { + p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue() + } + } + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + switch p.currentByte { + case ',': + return p.startLabelName + + case '}': + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + return p.readingValue + default: + p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.GetValue())) + return nil + } +} + +// readingValue represents the state where the last byte read (now in +// p.currentByte) is the first byte of the sample value (i.e. a float). +func (p *TextParser) readingValue() stateFn { + // When we are here, we have read all the labels, so for the + // special case of a summary/histogram, we can finally find out + // if the metric already exists. + if p.currentMF.GetType() == dto.MetricType_SUMMARY { + signature := model.LabelsToSignature(p.currentLabels) + if summary := p.summaries[signature]; summary != nil { + p.currentMetric = summary + } else { + p.summaries[signature] = p.currentMetric + p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) + } + } else if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + signature := model.LabelsToSignature(p.currentLabels) + if histogram := p.histograms[signature]; histogram != nil { + p.currentMetric = histogram + } else { + p.histograms[signature] = p.currentMetric + p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) + } + } else { + p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) + } + if p.readTokenUntilWhitespace(); p.err != nil { + return nil // Unexpected end of input. + } + value, err := strconv.ParseFloat(p.currentToken.String(), 64) + if err != nil { + // Create a more helpful error message. + p.parseError(fmt.Sprintf("expected float as value, got %q", p.currentToken.String())) + return nil + } + switch p.currentMF.GetType() { + case dto.MetricType_COUNTER: + p.currentMetric.Counter = &dto.Counter{Value: proto.Float64(value)} + case dto.MetricType_GAUGE: + p.currentMetric.Gauge = &dto.Gauge{Value: proto.Float64(value)} + case dto.MetricType_UNTYPED: + p.currentMetric.Untyped = &dto.Untyped{Value: proto.Float64(value)} + case dto.MetricType_SUMMARY: + // *sigh* + if p.currentMetric.Summary == nil { + p.currentMetric.Summary = &dto.Summary{} + } + switch { + case p.currentIsSummaryCount: + p.currentMetric.Summary.SampleCount = proto.Uint64(uint64(value)) + case p.currentIsSummarySum: + p.currentMetric.Summary.SampleSum = proto.Float64(value) + case !math.IsNaN(p.currentQuantile): + p.currentMetric.Summary.Quantile = append( + p.currentMetric.Summary.Quantile, + &dto.Quantile{ + Quantile: proto.Float64(p.currentQuantile), + Value: proto.Float64(value), + }, + ) + } + case dto.MetricType_HISTOGRAM: + // *sigh* + if p.currentMetric.Histogram == nil { + p.currentMetric.Histogram = &dto.Histogram{} + } + switch { + case p.currentIsHistogramCount: + p.currentMetric.Histogram.SampleCount = proto.Uint64(uint64(value)) + case p.currentIsHistogramSum: + p.currentMetric.Histogram.SampleSum = proto.Float64(value) + case !math.IsNaN(p.currentBucket): + p.currentMetric.Histogram.Bucket = append( + p.currentMetric.Histogram.Bucket, + &dto.Bucket{ + UpperBound: proto.Float64(p.currentBucket), + CumulativeCount: proto.Uint64(uint64(value)), + }, + ) + } + default: + p.err = fmt.Errorf("unexpected type for metric name %q", p.currentMF.GetName()) + } + if p.currentByte == '\n' { + return p.startOfLine + } + return p.startTimestamp +} + +// startTimestamp represents the state where the next byte read from p.buf is +// the start of the timestamp (or whitespace leading up to it). +func (p *TextParser) startTimestamp() stateFn { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.readTokenUntilWhitespace(); p.err != nil { + return nil // Unexpected end of input. + } + timestamp, err := strconv.ParseInt(p.currentToken.String(), 10, 64) + if err != nil { + // Create a more helpful error message. + p.parseError(fmt.Sprintf("expected integer as timestamp, got %q", p.currentToken.String())) + return nil + } + p.currentMetric.TimestampMs = proto.Int64(timestamp) + if p.readTokenUntilNewline(false); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentToken.Len() > 0 { + p.parseError(fmt.Sprintf("spurious string after timestamp: %q", p.currentToken.String())) + return nil + } + return p.startOfLine +} + +// readingHelp represents the state where the last byte read (now in +// p.currentByte) is the first byte of the docstring after 'HELP'. +func (p *TextParser) readingHelp() stateFn { + if p.currentMF.Help != nil { + p.parseError(fmt.Sprintf("second HELP line for metric name %q", p.currentMF.GetName())) + return nil + } + // Rest of line is the docstring. + if p.readTokenUntilNewline(true); p.err != nil { + return nil // Unexpected end of input. + } + p.currentMF.Help = proto.String(p.currentToken.String()) + return p.startOfLine +} + +// readingType represents the state where the last byte read (now in +// p.currentByte) is the first byte of the type hint after 'HELP'. +func (p *TextParser) readingType() stateFn { + if p.currentMF.Type != nil { + p.parseError(fmt.Sprintf("second TYPE line for metric name %q, or TYPE reported after samples", p.currentMF.GetName())) + return nil + } + // Rest of line is the type. + if p.readTokenUntilNewline(false); p.err != nil { + return nil // Unexpected end of input. + } + metricType, ok := dto.MetricType_value[strings.ToUpper(p.currentToken.String())] + if !ok { + p.parseError(fmt.Sprintf("unknown metric type %q", p.currentToken.String())) + return nil + } + p.currentMF.Type = dto.MetricType(metricType).Enum() + return p.startOfLine +} + +// parseError sets p.err to a ParseError at the current line with the given +// message. +func (p *TextParser) parseError(msg string) { + p.err = ParseError{ + Line: p.lineCount, + Msg: msg, + } +} + +// skipBlankTab reads (and discards) bytes from p.buf until it encounters a byte +// that is neither ' ' nor '\t'. That byte is left in p.currentByte. +func (p *TextParser) skipBlankTab() { + for { + if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil || !isBlankOrTab(p.currentByte) { + return + } + } +} + +// skipBlankTabIfCurrentBlankTab works exactly as skipBlankTab but doesn't do +// anything if p.currentByte is neither ' ' nor '\t'. +func (p *TextParser) skipBlankTabIfCurrentBlankTab() { + if isBlankOrTab(p.currentByte) { + p.skipBlankTab() + } +} + +// readTokenUntilWhitespace copies bytes from p.buf into p.currentToken. The +// first byte considered is the byte already read (now in p.currentByte). The +// first whitespace byte encountered is still copied into p.currentByte, but not +// into p.currentToken. +func (p *TextParser) readTokenUntilWhitespace() { + p.currentToken.Reset() + for p.err == nil && !isBlankOrTab(p.currentByte) && p.currentByte != '\n' { + p.currentToken.WriteByte(p.currentByte) + p.currentByte, p.err = p.buf.ReadByte() + } +} + +// readTokenUntilNewline copies bytes from p.buf into p.currentToken. The first +// byte considered is the byte already read (now in p.currentByte). The first +// newline byte encountered is still copied into p.currentByte, but not into +// p.currentToken. If recognizeEscapeSequence is true, two escape sequences are +// recognized: '\\' translates into '\', and '\n' into a line-feed character. +// All other escape sequences are invalid and cause an error. +func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) { + p.currentToken.Reset() + escaped := false + for p.err == nil { + if recognizeEscapeSequence && escaped { + switch p.currentByte { + case '\\': + p.currentToken.WriteByte(p.currentByte) + case 'n': + p.currentToken.WriteByte('\n') + default: + p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) + return + } + escaped = false + } else { + switch p.currentByte { + case '\n': + return + case '\\': + escaped = true + default: + p.currentToken.WriteByte(p.currentByte) + } + } + p.currentByte, p.err = p.buf.ReadByte() + } +} + +// readTokenAsMetricName copies a metric name from p.buf into p.currentToken. +// The first byte considered is the byte already read (now in p.currentByte). +// The first byte not part of a metric name is still copied into p.currentByte, +// but not into p.currentToken. +func (p *TextParser) readTokenAsMetricName() { + p.currentToken.Reset() + if !isValidMetricNameStart(p.currentByte) { + return + } + for { + p.currentToken.WriteByte(p.currentByte) + p.currentByte, p.err = p.buf.ReadByte() + if p.err != nil || !isValidMetricNameContinuation(p.currentByte) { + return + } + } +} + +// readTokenAsLabelName copies a label name from p.buf into p.currentToken. +// The first byte considered is the byte already read (now in p.currentByte). +// The first byte not part of a label name is still copied into p.currentByte, +// but not into p.currentToken. +func (p *TextParser) readTokenAsLabelName() { + p.currentToken.Reset() + if !isValidLabelNameStart(p.currentByte) { + return + } + for { + p.currentToken.WriteByte(p.currentByte) + p.currentByte, p.err = p.buf.ReadByte() + if p.err != nil || !isValidLabelNameContinuation(p.currentByte) { + return + } + } +} + +// readTokenAsLabelValue copies a label value from p.buf into p.currentToken. +// In contrast to the other 'readTokenAs...' functions, which start with the +// last read byte in p.currentByte, this method ignores p.currentByte and starts +// with reading a new byte from p.buf. The first byte not part of a label value +// is still copied into p.currentByte, but not into p.currentToken. +func (p *TextParser) readTokenAsLabelValue() { + p.currentToken.Reset() + escaped := false + for { + if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil { + return + } + if escaped { + switch p.currentByte { + case '"', '\\': + p.currentToken.WriteByte(p.currentByte) + case 'n': + p.currentToken.WriteByte('\n') + default: + p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) + return + } + escaped = false + continue + } + switch p.currentByte { + case '"': + return + case '\n': + p.parseError(fmt.Sprintf("label value %q contains unescaped new-line", p.currentToken.String())) + return + case '\\': + escaped = true + default: + p.currentToken.WriteByte(p.currentByte) + } + } +} + +func (p *TextParser) setOrCreateCurrentMF() { + p.currentIsSummaryCount = false + p.currentIsSummarySum = false + p.currentIsHistogramCount = false + p.currentIsHistogramSum = false + name := p.currentToken.String() + if p.currentMF = p.metricFamiliesByName[name]; p.currentMF != nil { + return + } + // Try out if this is a _sum or _count for a summary/histogram. + summaryName := summaryMetricName(name) + if p.currentMF = p.metricFamiliesByName[summaryName]; p.currentMF != nil { + if p.currentMF.GetType() == dto.MetricType_SUMMARY { + if isCount(name) { + p.currentIsSummaryCount = true + } + if isSum(name) { + p.currentIsSummarySum = true + } + return + } + } + histogramName := histogramMetricName(name) + if p.currentMF = p.metricFamiliesByName[histogramName]; p.currentMF != nil { + if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + if isCount(name) { + p.currentIsHistogramCount = true + } + if isSum(name) { + p.currentIsHistogramSum = true + } + return + } + } + p.currentMF = &dto.MetricFamily{Name: proto.String(name)} + p.metricFamiliesByName[name] = p.currentMF +} + +func isValidLabelNameStart(b byte) bool { + return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' +} + +func isValidLabelNameContinuation(b byte) bool { + return isValidLabelNameStart(b) || (b >= '0' && b <= '9') +} + +func isValidMetricNameStart(b byte) bool { + return isValidLabelNameStart(b) || b == ':' +} + +func isValidMetricNameContinuation(b byte) bool { + return isValidLabelNameContinuation(b) || b == ':' +} + +func isBlankOrTab(b byte) bool { + return b == ' ' || b == '\t' +} + +func isCount(name string) bool { + return len(name) > 6 && name[len(name)-6:] == "_count" +} + +func isSum(name string) bool { + return len(name) > 4 && name[len(name)-4:] == "_sum" +} + +func isBucket(name string) bool { + return len(name) > 7 && name[len(name)-7:] == "_bucket" +} + +func summaryMetricName(name string) string { + switch { + case isCount(name): + return name[:len(name)-6] + case isSum(name): + return name[:len(name)-4] + default: + return name + } +} + +func histogramMetricName(name string) string { + switch { + case isCount(name): + return name[:len(name)-6] + case isSum(name): + return name[:len(name)-4] + case isBucket(name): + return name[:len(name)-7] + default: + return name + } +} diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go new file mode 100644 index 000000000..648b38cb6 --- /dev/null +++ b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go @@ -0,0 +1,162 @@ +/* +HTTP Content-Type Autonegotiation. + +The functions in this package implement the behaviour specified in +http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html + +Copyright (c) 2011, Open Knowledge Foundation Ltd. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + Neither the name of the Open Knowledge Foundation Ltd. nor the + names of its contributors may be used to endorse or promote + products derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +*/ +package goautoneg + +import ( + "sort" + "strconv" + "strings" +) + +// Structure to represent a clause in an HTTP Accept Header +type Accept struct { + Type, SubType string + Q float64 + Params map[string]string +} + +// For internal use, so that we can use the sort interface +type accept_slice []Accept + +func (accept accept_slice) Len() int { + slice := []Accept(accept) + return len(slice) +} + +func (accept accept_slice) Less(i, j int) bool { + slice := []Accept(accept) + ai, aj := slice[i], slice[j] + if ai.Q > aj.Q { + return true + } + if ai.Type != "*" && aj.Type == "*" { + return true + } + if ai.SubType != "*" && aj.SubType == "*" { + return true + } + return false +} + +func (accept accept_slice) Swap(i, j int) { + slice := []Accept(accept) + slice[i], slice[j] = slice[j], slice[i] +} + +// Parse an Accept Header string returning a sorted list +// of clauses +func ParseAccept(header string) (accept []Accept) { + parts := strings.Split(header, ",") + accept = make([]Accept, 0, len(parts)) + for _, part := range parts { + part := strings.Trim(part, " ") + + a := Accept{} + a.Params = make(map[string]string) + a.Q = 1.0 + + mrp := strings.Split(part, ";") + + media_range := mrp[0] + sp := strings.Split(media_range, "/") + a.Type = strings.Trim(sp[0], " ") + + switch { + case len(sp) == 1 && a.Type == "*": + a.SubType = "*" + case len(sp) == 2: + a.SubType = strings.Trim(sp[1], " ") + default: + continue + } + + if len(mrp) == 1 { + accept = append(accept, a) + continue + } + + for _, param := range mrp[1:] { + sp := strings.SplitN(param, "=", 2) + if len(sp) != 2 { + continue + } + token := strings.Trim(sp[0], " ") + if token == "q" { + a.Q, _ = strconv.ParseFloat(sp[1], 32) + } else { + a.Params[token] = strings.Trim(sp[1], " ") + } + } + + accept = append(accept, a) + } + + slice := accept_slice(accept) + sort.Sort(slice) + + return +} + +// Negotiate the most appropriate content_type given the accept header +// and a list of alternatives. +func Negotiate(header string, alternatives []string) (content_type string) { + asp := make([][]string, 0, len(alternatives)) + for _, ctype := range alternatives { + asp = append(asp, strings.SplitN(ctype, "/", 2)) + } + for _, clause := range ParseAccept(header) { + for i, ctsp := range asp { + if clause.Type == ctsp[0] && clause.SubType == ctsp[1] { + content_type = alternatives[i] + return + } + if clause.Type == ctsp[0] && clause.SubType == "*" { + content_type = alternatives[i] + return + } + if clause.Type == "*" && clause.SubType == "*" { + content_type = alternatives[i] + return + } + } + } + return +} diff --git a/vendor/github.com/prometheus/common/model/alert.go b/vendor/github.com/prometheus/common/model/alert.go new file mode 100644 index 000000000..35e739c7a --- /dev/null +++ b/vendor/github.com/prometheus/common/model/alert.go @@ -0,0 +1,136 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "time" +) + +type AlertStatus string + +const ( + AlertFiring AlertStatus = "firing" + AlertResolved AlertStatus = "resolved" +) + +// Alert is a generic representation of an alert in the Prometheus eco-system. +type Alert struct { + // Label value pairs for purpose of aggregation, matching, and disposition + // dispatching. This must minimally include an "alertname" label. + Labels LabelSet `json:"labels"` + + // Extra key/value information which does not define alert identity. + Annotations LabelSet `json:"annotations"` + + // The known time range for this alert. Both ends are optional. + StartsAt time.Time `json:"startsAt,omitempty"` + EndsAt time.Time `json:"endsAt,omitempty"` + GeneratorURL string `json:"generatorURL"` +} + +// Name returns the name of the alert. It is equivalent to the "alertname" label. +func (a *Alert) Name() string { + return string(a.Labels[AlertNameLabel]) +} + +// Fingerprint returns a unique hash for the alert. It is equivalent to +// the fingerprint of the alert's label set. +func (a *Alert) Fingerprint() Fingerprint { + return a.Labels.Fingerprint() +} + +func (a *Alert) String() string { + s := fmt.Sprintf("%s[%s]", a.Name(), a.Fingerprint().String()[:7]) + if a.Resolved() { + return s + "[resolved]" + } + return s + "[active]" +} + +// Resolved returns true iff the activity interval ended in the past. +func (a *Alert) Resolved() bool { + return a.ResolvedAt(time.Now()) +} + +// ResolvedAt returns true off the activity interval ended before +// the given timestamp. +func (a *Alert) ResolvedAt(ts time.Time) bool { + if a.EndsAt.IsZero() { + return false + } + return !a.EndsAt.After(ts) +} + +// Status returns the status of the alert. +func (a *Alert) Status() AlertStatus { + if a.Resolved() { + return AlertResolved + } + return AlertFiring +} + +// Validate checks whether the alert data is inconsistent. +func (a *Alert) Validate() error { + if a.StartsAt.IsZero() { + return fmt.Errorf("start time missing") + } + if !a.EndsAt.IsZero() && a.EndsAt.Before(a.StartsAt) { + return fmt.Errorf("start time must be before end time") + } + if err := a.Labels.Validate(); err != nil { + return fmt.Errorf("invalid label set: %s", err) + } + if len(a.Labels) == 0 { + return fmt.Errorf("at least one label pair required") + } + if err := a.Annotations.Validate(); err != nil { + return fmt.Errorf("invalid annotations: %s", err) + } + return nil +} + +// Alert is a list of alerts that can be sorted in chronological order. +type Alerts []*Alert + +func (as Alerts) Len() int { return len(as) } +func (as Alerts) Swap(i, j int) { as[i], as[j] = as[j], as[i] } + +func (as Alerts) Less(i, j int) bool { + if as[i].StartsAt.Before(as[j].StartsAt) { + return true + } + if as[i].EndsAt.Before(as[j].EndsAt) { + return true + } + return as[i].Fingerprint() < as[j].Fingerprint() +} + +// HasFiring returns true iff one of the alerts is not resolved. +func (as Alerts) HasFiring() bool { + for _, a := range as { + if !a.Resolved() { + return true + } + } + return false +} + +// Status returns StatusFiring iff at least one of the alerts is firing. +func (as Alerts) Status() AlertStatus { + if as.HasFiring() { + return AlertFiring + } + return AlertResolved +} diff --git a/vendor/github.com/prometheus/common/model/fingerprinting.go b/vendor/github.com/prometheus/common/model/fingerprinting.go new file mode 100644 index 000000000..fc4de4106 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/fingerprinting.go @@ -0,0 +1,105 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "strconv" +) + +// Fingerprint provides a hash-capable representation of a Metric. +// For our purposes, FNV-1A 64-bit is used. +type Fingerprint uint64 + +// FingerprintFromString transforms a string representation into a Fingerprint. +func FingerprintFromString(s string) (Fingerprint, error) { + num, err := strconv.ParseUint(s, 16, 64) + return Fingerprint(num), err +} + +// ParseFingerprint parses the input string into a fingerprint. +func ParseFingerprint(s string) (Fingerprint, error) { + num, err := strconv.ParseUint(s, 16, 64) + if err != nil { + return 0, err + } + return Fingerprint(num), nil +} + +func (f Fingerprint) String() string { + return fmt.Sprintf("%016x", uint64(f)) +} + +// Fingerprints represents a collection of Fingerprint subject to a given +// natural sorting scheme. It implements sort.Interface. +type Fingerprints []Fingerprint + +// Len implements sort.Interface. +func (f Fingerprints) Len() int { + return len(f) +} + +// Less implements sort.Interface. +func (f Fingerprints) Less(i, j int) bool { + return f[i] < f[j] +} + +// Swap implements sort.Interface. +func (f Fingerprints) Swap(i, j int) { + f[i], f[j] = f[j], f[i] +} + +// FingerprintSet is a set of Fingerprints. +type FingerprintSet map[Fingerprint]struct{} + +// Equal returns true if both sets contain the same elements (and not more). +func (s FingerprintSet) Equal(o FingerprintSet) bool { + if len(s) != len(o) { + return false + } + + for k := range s { + if _, ok := o[k]; !ok { + return false + } + } + + return true +} + +// Intersection returns the elements contained in both sets. +func (s FingerprintSet) Intersection(o FingerprintSet) FingerprintSet { + myLength, otherLength := len(s), len(o) + if myLength == 0 || otherLength == 0 { + return FingerprintSet{} + } + + subSet := s + superSet := o + + if otherLength < myLength { + subSet = o + superSet = s + } + + out := FingerprintSet{} + + for k := range subSet { + if _, ok := superSet[k]; ok { + out[k] = struct{}{} + } + } + + return out +} diff --git a/vendor/github.com/prometheus/common/model/fnv.go b/vendor/github.com/prometheus/common/model/fnv.go new file mode 100644 index 000000000..038fc1c90 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/fnv.go @@ -0,0 +1,42 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +// Inline and byte-free variant of hash/fnv's fnv64a. + +const ( + offset64 = 14695981039346656037 + prime64 = 1099511628211 +) + +// hashNew initializies a new fnv64a hash value. +func hashNew() uint64 { + return offset64 +} + +// hashAdd adds a string to a fnv64a hash value, returning the updated hash. +func hashAdd(h uint64, s string) uint64 { + for i := 0; i < len(s); i++ { + h ^= uint64(s[i]) + h *= prime64 + } + return h +} + +// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash. +func hashAddByte(h uint64, b byte) uint64 { + h ^= uint64(b) + h *= prime64 + return h +} diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go new file mode 100644 index 000000000..41051a01a --- /dev/null +++ b/vendor/github.com/prometheus/common/model/labels.go @@ -0,0 +1,210 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "regexp" + "strings" + "unicode/utf8" +) + +const ( + // AlertNameLabel is the name of the label containing the an alert's name. + AlertNameLabel = "alertname" + + // ExportedLabelPrefix is the prefix to prepend to the label names present in + // exported metrics if a label of the same name is added by the server. + ExportedLabelPrefix = "exported_" + + // MetricNameLabel is the label name indicating the metric name of a + // timeseries. + MetricNameLabel = "__name__" + + // SchemeLabel is the name of the label that holds the scheme on which to + // scrape a target. + SchemeLabel = "__scheme__" + + // AddressLabel is the name of the label that holds the address of + // a scrape target. + AddressLabel = "__address__" + + // MetricsPathLabel is the name of the label that holds the path on which to + // scrape a target. + MetricsPathLabel = "__metrics_path__" + + // ReservedLabelPrefix is a prefix which is not legal in user-supplied + // label names. + ReservedLabelPrefix = "__" + + // MetaLabelPrefix is a prefix for labels that provide meta information. + // Labels with this prefix are used for intermediate label processing and + // will not be attached to time series. + MetaLabelPrefix = "__meta_" + + // TmpLabelPrefix is a prefix for temporary labels as part of relabelling. + // Labels with this prefix are used for intermediate label processing and + // will not be attached to time series. This is reserved for use in + // Prometheus configuration files by users. + TmpLabelPrefix = "__tmp_" + + // ParamLabelPrefix is a prefix for labels that provide URL parameters + // used to scrape a target. + ParamLabelPrefix = "__param_" + + // JobLabel is the label name indicating the job from which a timeseries + // was scraped. + JobLabel = "job" + + // InstanceLabel is the label name used for the instance label. + InstanceLabel = "instance" + + // BucketLabel is used for the label that defines the upper bound of a + // bucket of a histogram ("le" -> "less or equal"). + BucketLabel = "le" + + // QuantileLabel is used for the label that defines the quantile in a + // summary. + QuantileLabel = "quantile" +) + +// LabelNameRE is a regular expression matching valid label names. Note that the +// IsValid method of LabelName performs the same check but faster than a match +// with this regular expression. +var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") + +// A LabelName is a key for a LabelSet or Metric. It has a value associated +// therewith. +type LabelName string + +// IsValid is true iff the label name matches the pattern of LabelNameRE. This +// method, however, does not use LabelNameRE for the check but a much faster +// hardcoded implementation. +func (ln LabelName) IsValid() bool { + if len(ln) == 0 { + return false + } + for i, b := range ln { + if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { + return false + } + } + return true +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error { + var s string + if err := unmarshal(&s); err != nil { + return err + } + if !LabelName(s).IsValid() { + return fmt.Errorf("%q is not a valid label name", s) + } + *ln = LabelName(s) + return nil +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (ln *LabelName) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + if !LabelName(s).IsValid() { + return fmt.Errorf("%q is not a valid label name", s) + } + *ln = LabelName(s) + return nil +} + +// LabelNames is a sortable LabelName slice. In implements sort.Interface. +type LabelNames []LabelName + +func (l LabelNames) Len() int { + return len(l) +} + +func (l LabelNames) Less(i, j int) bool { + return l[i] < l[j] +} + +func (l LabelNames) Swap(i, j int) { + l[i], l[j] = l[j], l[i] +} + +func (l LabelNames) String() string { + labelStrings := make([]string, 0, len(l)) + for _, label := range l { + labelStrings = append(labelStrings, string(label)) + } + return strings.Join(labelStrings, ", ") +} + +// A LabelValue is an associated value for a LabelName. +type LabelValue string + +// IsValid returns true iff the string is a valid UTF8. +func (lv LabelValue) IsValid() bool { + return utf8.ValidString(string(lv)) +} + +// LabelValues is a sortable LabelValue slice. It implements sort.Interface. +type LabelValues []LabelValue + +func (l LabelValues) Len() int { + return len(l) +} + +func (l LabelValues) Less(i, j int) bool { + return string(l[i]) < string(l[j]) +} + +func (l LabelValues) Swap(i, j int) { + l[i], l[j] = l[j], l[i] +} + +// LabelPair pairs a name with a value. +type LabelPair struct { + Name LabelName + Value LabelValue +} + +// LabelPairs is a sortable slice of LabelPair pointers. It implements +// sort.Interface. +type LabelPairs []*LabelPair + +func (l LabelPairs) Len() int { + return len(l) +} + +func (l LabelPairs) Less(i, j int) bool { + switch { + case l[i].Name > l[j].Name: + return false + case l[i].Name < l[j].Name: + return true + case l[i].Value > l[j].Value: + return false + case l[i].Value < l[j].Value: + return true + default: + return false + } +} + +func (l LabelPairs) Swap(i, j int) { + l[i], l[j] = l[j], l[i] +} diff --git a/vendor/github.com/prometheus/common/model/labelset.go b/vendor/github.com/prometheus/common/model/labelset.go new file mode 100644 index 000000000..6eda08a73 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/labelset.go @@ -0,0 +1,169 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "sort" + "strings" +) + +// A LabelSet is a collection of LabelName and LabelValue pairs. The LabelSet +// may be fully-qualified down to the point where it may resolve to a single +// Metric in the data store or not. All operations that occur within the realm +// of a LabelSet can emit a vector of Metric entities to which the LabelSet may +// match. +type LabelSet map[LabelName]LabelValue + +// Validate checks whether all names and values in the label set +// are valid. +func (ls LabelSet) Validate() error { + for ln, lv := range ls { + if !ln.IsValid() { + return fmt.Errorf("invalid name %q", ln) + } + if !lv.IsValid() { + return fmt.Errorf("invalid value %q", lv) + } + } + return nil +} + +// Equal returns true iff both label sets have exactly the same key/value pairs. +func (ls LabelSet) Equal(o LabelSet) bool { + if len(ls) != len(o) { + return false + } + for ln, lv := range ls { + olv, ok := o[ln] + if !ok { + return false + } + if olv != lv { + return false + } + } + return true +} + +// Before compares the metrics, using the following criteria: +// +// If m has fewer labels than o, it is before o. If it has more, it is not. +// +// If the number of labels is the same, the superset of all label names is +// sorted alphanumerically. The first differing label pair found in that order +// determines the outcome: If the label does not exist at all in m, then m is +// before o, and vice versa. Otherwise the label value is compared +// alphanumerically. +// +// If m and o are equal, the method returns false. +func (ls LabelSet) Before(o LabelSet) bool { + if len(ls) < len(o) { + return true + } + if len(ls) > len(o) { + return false + } + + lns := make(LabelNames, 0, len(ls)+len(o)) + for ln := range ls { + lns = append(lns, ln) + } + for ln := range o { + lns = append(lns, ln) + } + // It's probably not worth it to de-dup lns. + sort.Sort(lns) + for _, ln := range lns { + mlv, ok := ls[ln] + if !ok { + return true + } + olv, ok := o[ln] + if !ok { + return false + } + if mlv < olv { + return true + } + if mlv > olv { + return false + } + } + return false +} + +// Clone returns a copy of the label set. +func (ls LabelSet) Clone() LabelSet { + lsn := make(LabelSet, len(ls)) + for ln, lv := range ls { + lsn[ln] = lv + } + return lsn +} + +// Merge is a helper function to non-destructively merge two label sets. +func (l LabelSet) Merge(other LabelSet) LabelSet { + result := make(LabelSet, len(l)) + + for k, v := range l { + result[k] = v + } + + for k, v := range other { + result[k] = v + } + + return result +} + +func (l LabelSet) String() string { + lstrs := make([]string, 0, len(l)) + for l, v := range l { + lstrs = append(lstrs, fmt.Sprintf("%s=%q", l, v)) + } + + sort.Strings(lstrs) + return fmt.Sprintf("{%s}", strings.Join(lstrs, ", ")) +} + +// Fingerprint returns the LabelSet's fingerprint. +func (ls LabelSet) Fingerprint() Fingerprint { + return labelSetToFingerprint(ls) +} + +// FastFingerprint returns the LabelSet's Fingerprint calculated by a faster hashing +// algorithm, which is, however, more susceptible to hash collisions. +func (ls LabelSet) FastFingerprint() Fingerprint { + return labelSetToFastFingerprint(ls) +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (l *LabelSet) UnmarshalJSON(b []byte) error { + var m map[LabelName]LabelValue + if err := json.Unmarshal(b, &m); err != nil { + return err + } + // encoding/json only unmarshals maps of the form map[string]T. It treats + // LabelName as a string and does not call its UnmarshalJSON method. + // Thus, we have to replicate the behavior here. + for ln := range m { + if !ln.IsValid() { + return fmt.Errorf("%q is not a valid label name", ln) + } + } + *l = LabelSet(m) + return nil +} diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go new file mode 100644 index 000000000..f7250909b --- /dev/null +++ b/vendor/github.com/prometheus/common/model/metric.go @@ -0,0 +1,103 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "regexp" + "sort" + "strings" +) + +var ( + separator = []byte{0} + // MetricNameRE is a regular expression matching valid metric + // names. Note that the IsValidMetricName function performs the same + // check but faster than a match with this regular expression. + MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`) +) + +// A Metric is similar to a LabelSet, but the key difference is that a Metric is +// a singleton and refers to one and only one stream of samples. +type Metric LabelSet + +// Equal compares the metrics. +func (m Metric) Equal(o Metric) bool { + return LabelSet(m).Equal(LabelSet(o)) +} + +// Before compares the metrics' underlying label sets. +func (m Metric) Before(o Metric) bool { + return LabelSet(m).Before(LabelSet(o)) +} + +// Clone returns a copy of the Metric. +func (m Metric) Clone() Metric { + clone := make(Metric, len(m)) + for k, v := range m { + clone[k] = v + } + return clone +} + +func (m Metric) String() string { + metricName, hasName := m[MetricNameLabel] + numLabels := len(m) - 1 + if !hasName { + numLabels = len(m) + } + labelStrings := make([]string, 0, numLabels) + for label, value := range m { + if label != MetricNameLabel { + labelStrings = append(labelStrings, fmt.Sprintf("%s=%q", label, value)) + } + } + + switch numLabels { + case 0: + if hasName { + return string(metricName) + } + return "{}" + default: + sort.Strings(labelStrings) + return fmt.Sprintf("%s{%s}", metricName, strings.Join(labelStrings, ", ")) + } +} + +// Fingerprint returns a Metric's Fingerprint. +func (m Metric) Fingerprint() Fingerprint { + return LabelSet(m).Fingerprint() +} + +// FastFingerprint returns a Metric's Fingerprint calculated by a faster hashing +// algorithm, which is, however, more susceptible to hash collisions. +func (m Metric) FastFingerprint() Fingerprint { + return LabelSet(m).FastFingerprint() +} + +// IsValidMetricName returns true iff name matches the pattern of MetricNameRE. +// This function, however, does not use MetricNameRE for the check but a much +// faster hardcoded implementation. +func IsValidMetricName(n LabelValue) bool { + if len(n) == 0 { + return false + } + for i, b := range n { + if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0)) { + return false + } + } + return true +} diff --git a/vendor/github.com/prometheus/common/model/model.go b/vendor/github.com/prometheus/common/model/model.go new file mode 100644 index 000000000..a7b969170 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/model.go @@ -0,0 +1,16 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package model contains common data structures that are shared across +// Prometheus components and libraries. +package model diff --git a/vendor/github.com/prometheus/common/model/signature.go b/vendor/github.com/prometheus/common/model/signature.go new file mode 100644 index 000000000..8762b13c6 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/signature.go @@ -0,0 +1,144 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "sort" +) + +// SeparatorByte is a byte that cannot occur in valid UTF-8 sequences and is +// used to separate label names, label values, and other strings from each other +// when calculating their combined hash value (aka signature aka fingerprint). +const SeparatorByte byte = 255 + +var ( + // cache the signature of an empty label set. + emptyLabelSignature = hashNew() +) + +// LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a +// given label set. (Collisions are possible but unlikely if the number of label +// sets the function is applied to is small.) +func LabelsToSignature(labels map[string]string) uint64 { + if len(labels) == 0 { + return emptyLabelSignature + } + + labelNames := make([]string, 0, len(labels)) + for labelName := range labels { + labelNames = append(labelNames, labelName) + } + sort.Strings(labelNames) + + sum := hashNew() + for _, labelName := range labelNames { + sum = hashAdd(sum, labelName) + sum = hashAddByte(sum, SeparatorByte) + sum = hashAdd(sum, labels[labelName]) + sum = hashAddByte(sum, SeparatorByte) + } + return sum +} + +// labelSetToFingerprint works exactly as LabelsToSignature but takes a LabelSet as +// parameter (rather than a label map) and returns a Fingerprint. +func labelSetToFingerprint(ls LabelSet) Fingerprint { + if len(ls) == 0 { + return Fingerprint(emptyLabelSignature) + } + + labelNames := make(LabelNames, 0, len(ls)) + for labelName := range ls { + labelNames = append(labelNames, labelName) + } + sort.Sort(labelNames) + + sum := hashNew() + for _, labelName := range labelNames { + sum = hashAdd(sum, string(labelName)) + sum = hashAddByte(sum, SeparatorByte) + sum = hashAdd(sum, string(ls[labelName])) + sum = hashAddByte(sum, SeparatorByte) + } + return Fingerprint(sum) +} + +// labelSetToFastFingerprint works similar to labelSetToFingerprint but uses a +// faster and less allocation-heavy hash function, which is more susceptible to +// create hash collisions. Therefore, collision detection should be applied. +func labelSetToFastFingerprint(ls LabelSet) Fingerprint { + if len(ls) == 0 { + return Fingerprint(emptyLabelSignature) + } + + var result uint64 + for labelName, labelValue := range ls { + sum := hashNew() + sum = hashAdd(sum, string(labelName)) + sum = hashAddByte(sum, SeparatorByte) + sum = hashAdd(sum, string(labelValue)) + result ^= sum + } + return Fingerprint(result) +} + +// SignatureForLabels works like LabelsToSignature but takes a Metric as +// parameter (rather than a label map) and only includes the labels with the +// specified LabelNames into the signature calculation. The labels passed in +// will be sorted by this function. +func SignatureForLabels(m Metric, labels ...LabelName) uint64 { + if len(labels) == 0 { + return emptyLabelSignature + } + + sort.Sort(LabelNames(labels)) + + sum := hashNew() + for _, label := range labels { + sum = hashAdd(sum, string(label)) + sum = hashAddByte(sum, SeparatorByte) + sum = hashAdd(sum, string(m[label])) + sum = hashAddByte(sum, SeparatorByte) + } + return sum +} + +// SignatureWithoutLabels works like LabelsToSignature but takes a Metric as +// parameter (rather than a label map) and excludes the labels with any of the +// specified LabelNames from the signature calculation. +func SignatureWithoutLabels(m Metric, labels map[LabelName]struct{}) uint64 { + if len(m) == 0 { + return emptyLabelSignature + } + + labelNames := make(LabelNames, 0, len(m)) + for labelName := range m { + if _, exclude := labels[labelName]; !exclude { + labelNames = append(labelNames, labelName) + } + } + if len(labelNames) == 0 { + return emptyLabelSignature + } + sort.Sort(labelNames) + + sum := hashNew() + for _, labelName := range labelNames { + sum = hashAdd(sum, string(labelName)) + sum = hashAddByte(sum, SeparatorByte) + sum = hashAdd(sum, string(m[labelName])) + sum = hashAddByte(sum, SeparatorByte) + } + return sum +} diff --git a/vendor/github.com/prometheus/common/model/silence.go b/vendor/github.com/prometheus/common/model/silence.go new file mode 100644 index 000000000..bb99889d2 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/silence.go @@ -0,0 +1,106 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "regexp" + "time" +) + +// Matcher describes a matches the value of a given label. +type Matcher struct { + Name LabelName `json:"name"` + Value string `json:"value"` + IsRegex bool `json:"isRegex"` +} + +func (m *Matcher) UnmarshalJSON(b []byte) error { + type plain Matcher + if err := json.Unmarshal(b, (*plain)(m)); err != nil { + return err + } + + if len(m.Name) == 0 { + return fmt.Errorf("label name in matcher must not be empty") + } + if m.IsRegex { + if _, err := regexp.Compile(m.Value); err != nil { + return err + } + } + return nil +} + +// Validate returns true iff all fields of the matcher have valid values. +func (m *Matcher) Validate() error { + if !m.Name.IsValid() { + return fmt.Errorf("invalid name %q", m.Name) + } + if m.IsRegex { + if _, err := regexp.Compile(m.Value); err != nil { + return fmt.Errorf("invalid regular expression %q", m.Value) + } + } else if !LabelValue(m.Value).IsValid() || len(m.Value) == 0 { + return fmt.Errorf("invalid value %q", m.Value) + } + return nil +} + +// Silence defines the representation of a silence definition in the Prometheus +// eco-system. +type Silence struct { + ID uint64 `json:"id,omitempty"` + + Matchers []*Matcher `json:"matchers"` + + StartsAt time.Time `json:"startsAt"` + EndsAt time.Time `json:"endsAt"` + + CreatedAt time.Time `json:"createdAt,omitempty"` + CreatedBy string `json:"createdBy"` + Comment string `json:"comment,omitempty"` +} + +// Validate returns true iff all fields of the silence have valid values. +func (s *Silence) Validate() error { + if len(s.Matchers) == 0 { + return fmt.Errorf("at least one matcher required") + } + for _, m := range s.Matchers { + if err := m.Validate(); err != nil { + return fmt.Errorf("invalid matcher: %s", err) + } + } + if s.StartsAt.IsZero() { + return fmt.Errorf("start time missing") + } + if s.EndsAt.IsZero() { + return fmt.Errorf("end time missing") + } + if s.EndsAt.Before(s.StartsAt) { + return fmt.Errorf("start time must be before end time") + } + if s.CreatedBy == "" { + return fmt.Errorf("creator information missing") + } + if s.Comment == "" { + return fmt.Errorf("comment missing") + } + if s.CreatedAt.IsZero() { + return fmt.Errorf("creation timestamp missing") + } + return nil +} diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go new file mode 100644 index 000000000..74ed5a9f7 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/time.go @@ -0,0 +1,264 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "math" + "regexp" + "strconv" + "strings" + "time" +) + +const ( + // MinimumTick is the minimum supported time resolution. This has to be + // at least time.Second in order for the code below to work. + minimumTick = time.Millisecond + // second is the Time duration equivalent to one second. + second = int64(time.Second / minimumTick) + // The number of nanoseconds per minimum tick. + nanosPerTick = int64(minimumTick / time.Nanosecond) + + // Earliest is the earliest Time representable. Handy for + // initializing a high watermark. + Earliest = Time(math.MinInt64) + // Latest is the latest Time representable. Handy for initializing + // a low watermark. + Latest = Time(math.MaxInt64) +) + +// Time is the number of milliseconds since the epoch +// (1970-01-01 00:00 UTC) excluding leap seconds. +type Time int64 + +// Interval describes and interval between two timestamps. +type Interval struct { + Start, End Time +} + +// Now returns the current time as a Time. +func Now() Time { + return TimeFromUnixNano(time.Now().UnixNano()) +} + +// TimeFromUnix returns the Time equivalent to the Unix Time t +// provided in seconds. +func TimeFromUnix(t int64) Time { + return Time(t * second) +} + +// TimeFromUnixNano returns the Time equivalent to the Unix Time +// t provided in nanoseconds. +func TimeFromUnixNano(t int64) Time { + return Time(t / nanosPerTick) +} + +// Equal reports whether two Times represent the same instant. +func (t Time) Equal(o Time) bool { + return t == o +} + +// Before reports whether the Time t is before o. +func (t Time) Before(o Time) bool { + return t < o +} + +// After reports whether the Time t is after o. +func (t Time) After(o Time) bool { + return t > o +} + +// Add returns the Time t + d. +func (t Time) Add(d time.Duration) Time { + return t + Time(d/minimumTick) +} + +// Sub returns the Duration t - o. +func (t Time) Sub(o Time) time.Duration { + return time.Duration(t-o) * minimumTick +} + +// Time returns the time.Time representation of t. +func (t Time) Time() time.Time { + return time.Unix(int64(t)/second, (int64(t)%second)*nanosPerTick) +} + +// Unix returns t as a Unix time, the number of seconds elapsed +// since January 1, 1970 UTC. +func (t Time) Unix() int64 { + return int64(t) / second +} + +// UnixNano returns t as a Unix time, the number of nanoseconds elapsed +// since January 1, 1970 UTC. +func (t Time) UnixNano() int64 { + return int64(t) * nanosPerTick +} + +// The number of digits after the dot. +var dotPrecision = int(math.Log10(float64(second))) + +// String returns a string representation of the Time. +func (t Time) String() string { + return strconv.FormatFloat(float64(t)/float64(second), 'f', -1, 64) +} + +// MarshalJSON implements the json.Marshaler interface. +func (t Time) MarshalJSON() ([]byte, error) { + return []byte(t.String()), nil +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (t *Time) UnmarshalJSON(b []byte) error { + p := strings.Split(string(b), ".") + switch len(p) { + case 1: + v, err := strconv.ParseInt(string(p[0]), 10, 64) + if err != nil { + return err + } + *t = Time(v * second) + + case 2: + v, err := strconv.ParseInt(string(p[0]), 10, 64) + if err != nil { + return err + } + v *= second + + prec := dotPrecision - len(p[1]) + if prec < 0 { + p[1] = p[1][:dotPrecision] + } else if prec > 0 { + p[1] = p[1] + strings.Repeat("0", prec) + } + + va, err := strconv.ParseInt(p[1], 10, 32) + if err != nil { + return err + } + + *t = Time(v + va) + + default: + return fmt.Errorf("invalid time %q", string(b)) + } + return nil +} + +// Duration wraps time.Duration. It is used to parse the custom duration format +// from YAML. +// This type should not propagate beyond the scope of input/output processing. +type Duration time.Duration + +// Set implements pflag/flag.Value +func (d *Duration) Set(s string) error { + var err error + *d, err = ParseDuration(s) + return err +} + +// Type implements pflag.Value +func (d *Duration) Type() string { + return "duration" +} + +var durationRE = regexp.MustCompile("^([0-9]+)(y|w|d|h|m|s|ms)$") + +// ParseDuration parses a string into a time.Duration, assuming that a year +// always has 365d, a week always has 7d, and a day always has 24h. +func ParseDuration(durationStr string) (Duration, error) { + matches := durationRE.FindStringSubmatch(durationStr) + if len(matches) != 3 { + return 0, fmt.Errorf("not a valid duration string: %q", durationStr) + } + var ( + n, _ = strconv.Atoi(matches[1]) + dur = time.Duration(n) * time.Millisecond + ) + switch unit := matches[2]; unit { + case "y": + dur *= 1000 * 60 * 60 * 24 * 365 + case "w": + dur *= 1000 * 60 * 60 * 24 * 7 + case "d": + dur *= 1000 * 60 * 60 * 24 + case "h": + dur *= 1000 * 60 * 60 + case "m": + dur *= 1000 * 60 + case "s": + dur *= 1000 + case "ms": + // Value already correct + default: + return 0, fmt.Errorf("invalid time unit in duration string: %q", unit) + } + return Duration(dur), nil +} + +func (d Duration) String() string { + var ( + ms = int64(time.Duration(d) / time.Millisecond) + unit = "ms" + ) + if ms == 0 { + return "0s" + } + factors := map[string]int64{ + "y": 1000 * 60 * 60 * 24 * 365, + "w": 1000 * 60 * 60 * 24 * 7, + "d": 1000 * 60 * 60 * 24, + "h": 1000 * 60 * 60, + "m": 1000 * 60, + "s": 1000, + "ms": 1, + } + + switch int64(0) { + case ms % factors["y"]: + unit = "y" + case ms % factors["w"]: + unit = "w" + case ms % factors["d"]: + unit = "d" + case ms % factors["h"]: + unit = "h" + case ms % factors["m"]: + unit = "m" + case ms % factors["s"]: + unit = "s" + } + return fmt.Sprintf("%v%v", ms/factors[unit], unit) +} + +// MarshalYAML implements the yaml.Marshaler interface. +func (d Duration) MarshalYAML() (interface{}, error) { + return d.String(), nil +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (d *Duration) UnmarshalYAML(unmarshal func(interface{}) error) error { + var s string + if err := unmarshal(&s); err != nil { + return err + } + dur, err := ParseDuration(s) + if err != nil { + return err + } + *d = dur + return nil +} diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go new file mode 100644 index 000000000..c9d8fb1a2 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/value.go @@ -0,0 +1,416 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "math" + "sort" + "strconv" + "strings" +) + +var ( + // ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a + // non-existing sample pair. It is a SamplePair with timestamp Earliest and + // value 0.0. Note that the natural zero value of SamplePair has a timestamp + // of 0, which is possible to appear in a real SamplePair and thus not + // suitable to signal a non-existing SamplePair. + ZeroSamplePair = SamplePair{Timestamp: Earliest} + + // ZeroSample is the pseudo zero-value of Sample used to signal a + // non-existing sample. It is a Sample with timestamp Earliest, value 0.0, + // and metric nil. Note that the natural zero value of Sample has a timestamp + // of 0, which is possible to appear in a real Sample and thus not suitable + // to signal a non-existing Sample. + ZeroSample = Sample{Timestamp: Earliest} +) + +// A SampleValue is a representation of a value for a given sample at a given +// time. +type SampleValue float64 + +// MarshalJSON implements json.Marshaler. +func (v SampleValue) MarshalJSON() ([]byte, error) { + return json.Marshal(v.String()) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (v *SampleValue) UnmarshalJSON(b []byte) error { + if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { + return fmt.Errorf("sample value must be a quoted string") + } + f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) + if err != nil { + return err + } + *v = SampleValue(f) + return nil +} + +// Equal returns true if the value of v and o is equal or if both are NaN. Note +// that v==o is false if both are NaN. If you want the conventional float +// behavior, use == to compare two SampleValues. +func (v SampleValue) Equal(o SampleValue) bool { + if v == o { + return true + } + return math.IsNaN(float64(v)) && math.IsNaN(float64(o)) +} + +func (v SampleValue) String() string { + return strconv.FormatFloat(float64(v), 'f', -1, 64) +} + +// SamplePair pairs a SampleValue with a Timestamp. +type SamplePair struct { + Timestamp Time + Value SampleValue +} + +// MarshalJSON implements json.Marshaler. +func (s SamplePair) MarshalJSON() ([]byte, error) { + t, err := json.Marshal(s.Timestamp) + if err != nil { + return nil, err + } + v, err := json.Marshal(s.Value) + if err != nil { + return nil, err + } + return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *SamplePair) UnmarshalJSON(b []byte) error { + v := [...]json.Unmarshaler{&s.Timestamp, &s.Value} + return json.Unmarshal(b, &v) +} + +// Equal returns true if this SamplePair and o have equal Values and equal +// Timestamps. The semantics of Value equality is defined by SampleValue.Equal. +func (s *SamplePair) Equal(o *SamplePair) bool { + return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp)) +} + +func (s SamplePair) String() string { + return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp) +} + +// Sample is a sample pair associated with a metric. +type Sample struct { + Metric Metric `json:"metric"` + Value SampleValue `json:"value"` + Timestamp Time `json:"timestamp"` +} + +// Equal compares first the metrics, then the timestamp, then the value. The +// semantics of value equality is defined by SampleValue.Equal. +func (s *Sample) Equal(o *Sample) bool { + if s == o { + return true + } + + if !s.Metric.Equal(o.Metric) { + return false + } + if !s.Timestamp.Equal(o.Timestamp) { + return false + } + + return s.Value.Equal(o.Value) +} + +func (s Sample) String() string { + return fmt.Sprintf("%s => %s", s.Metric, SamplePair{ + Timestamp: s.Timestamp, + Value: s.Value, + }) +} + +// MarshalJSON implements json.Marshaler. +func (s Sample) MarshalJSON() ([]byte, error) { + v := struct { + Metric Metric `json:"metric"` + Value SamplePair `json:"value"` + }{ + Metric: s.Metric, + Value: SamplePair{ + Timestamp: s.Timestamp, + Value: s.Value, + }, + } + + return json.Marshal(&v) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *Sample) UnmarshalJSON(b []byte) error { + v := struct { + Metric Metric `json:"metric"` + Value SamplePair `json:"value"` + }{ + Metric: s.Metric, + Value: SamplePair{ + Timestamp: s.Timestamp, + Value: s.Value, + }, + } + + if err := json.Unmarshal(b, &v); err != nil { + return err + } + + s.Metric = v.Metric + s.Timestamp = v.Value.Timestamp + s.Value = v.Value.Value + + return nil +} + +// Samples is a sortable Sample slice. It implements sort.Interface. +type Samples []*Sample + +func (s Samples) Len() int { + return len(s) +} + +// Less compares first the metrics, then the timestamp. +func (s Samples) Less(i, j int) bool { + switch { + case s[i].Metric.Before(s[j].Metric): + return true + case s[j].Metric.Before(s[i].Metric): + return false + case s[i].Timestamp.Before(s[j].Timestamp): + return true + default: + return false + } +} + +func (s Samples) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +// Equal compares two sets of samples and returns true if they are equal. +func (s Samples) Equal(o Samples) bool { + if len(s) != len(o) { + return false + } + + for i, sample := range s { + if !sample.Equal(o[i]) { + return false + } + } + return true +} + +// SampleStream is a stream of Values belonging to an attached COWMetric. +type SampleStream struct { + Metric Metric `json:"metric"` + Values []SamplePair `json:"values"` +} + +func (ss SampleStream) String() string { + vals := make([]string, len(ss.Values)) + for i, v := range ss.Values { + vals[i] = v.String() + } + return fmt.Sprintf("%s =>\n%s", ss.Metric, strings.Join(vals, "\n")) +} + +// Value is a generic interface for values resulting from a query evaluation. +type Value interface { + Type() ValueType + String() string +} + +func (Matrix) Type() ValueType { return ValMatrix } +func (Vector) Type() ValueType { return ValVector } +func (*Scalar) Type() ValueType { return ValScalar } +func (*String) Type() ValueType { return ValString } + +type ValueType int + +const ( + ValNone ValueType = iota + ValScalar + ValVector + ValMatrix + ValString +) + +// MarshalJSON implements json.Marshaler. +func (et ValueType) MarshalJSON() ([]byte, error) { + return json.Marshal(et.String()) +} + +func (et *ValueType) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + switch s { + case "": + *et = ValNone + case "scalar": + *et = ValScalar + case "vector": + *et = ValVector + case "matrix": + *et = ValMatrix + case "string": + *et = ValString + default: + return fmt.Errorf("unknown value type %q", s) + } + return nil +} + +func (e ValueType) String() string { + switch e { + case ValNone: + return "" + case ValScalar: + return "scalar" + case ValVector: + return "vector" + case ValMatrix: + return "matrix" + case ValString: + return "string" + } + panic("ValueType.String: unhandled value type") +} + +// Scalar is a scalar value evaluated at the set timestamp. +type Scalar struct { + Value SampleValue `json:"value"` + Timestamp Time `json:"timestamp"` +} + +func (s Scalar) String() string { + return fmt.Sprintf("scalar: %v @[%v]", s.Value, s.Timestamp) +} + +// MarshalJSON implements json.Marshaler. +func (s Scalar) MarshalJSON() ([]byte, error) { + v := strconv.FormatFloat(float64(s.Value), 'f', -1, 64) + return json.Marshal([...]interface{}{s.Timestamp, string(v)}) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *Scalar) UnmarshalJSON(b []byte) error { + var f string + v := [...]interface{}{&s.Timestamp, &f} + + if err := json.Unmarshal(b, &v); err != nil { + return err + } + + value, err := strconv.ParseFloat(f, 64) + if err != nil { + return fmt.Errorf("error parsing sample value: %s", err) + } + s.Value = SampleValue(value) + return nil +} + +// String is a string value evaluated at the set timestamp. +type String struct { + Value string `json:"value"` + Timestamp Time `json:"timestamp"` +} + +func (s *String) String() string { + return s.Value +} + +// MarshalJSON implements json.Marshaler. +func (s String) MarshalJSON() ([]byte, error) { + return json.Marshal([]interface{}{s.Timestamp, s.Value}) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *String) UnmarshalJSON(b []byte) error { + v := [...]interface{}{&s.Timestamp, &s.Value} + return json.Unmarshal(b, &v) +} + +// Vector is basically only an alias for Samples, but the +// contract is that in a Vector, all Samples have the same timestamp. +type Vector []*Sample + +func (vec Vector) String() string { + entries := make([]string, len(vec)) + for i, s := range vec { + entries[i] = s.String() + } + return strings.Join(entries, "\n") +} + +func (vec Vector) Len() int { return len(vec) } +func (vec Vector) Swap(i, j int) { vec[i], vec[j] = vec[j], vec[i] } + +// Less compares first the metrics, then the timestamp. +func (vec Vector) Less(i, j int) bool { + switch { + case vec[i].Metric.Before(vec[j].Metric): + return true + case vec[j].Metric.Before(vec[i].Metric): + return false + case vec[i].Timestamp.Before(vec[j].Timestamp): + return true + default: + return false + } +} + +// Equal compares two sets of samples and returns true if they are equal. +func (vec Vector) Equal(o Vector) bool { + if len(vec) != len(o) { + return false + } + + for i, sample := range vec { + if !sample.Equal(o[i]) { + return false + } + } + return true +} + +// Matrix is a list of time series. +type Matrix []*SampleStream + +func (m Matrix) Len() int { return len(m) } +func (m Matrix) Less(i, j int) bool { return m[i].Metric.Before(m[j].Metric) } +func (m Matrix) Swap(i, j int) { m[i], m[j] = m[j], m[i] } + +func (mat Matrix) String() string { + matCp := make(Matrix, len(mat)) + copy(matCp, mat) + sort.Sort(matCp) + + strs := make([]string, len(matCp)) + + for i, ss := range matCp { + strs[i] = ss.String() + } + + return strings.Join(strs, "\n") +} diff --git a/vendor/github.com/prometheus/procfs/LICENSE b/vendor/github.com/prometheus/procfs/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/prometheus/procfs/NOTICE b/vendor/github.com/prometheus/procfs/NOTICE new file mode 100644 index 000000000..53c5e9aa1 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/NOTICE @@ -0,0 +1,7 @@ +procfs provides functions to retrieve system, kernel and process +metrics from the pseudo-filesystem proc. + +Copyright 2014-2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). diff --git a/vendor/github.com/prometheus/procfs/buddyinfo.go b/vendor/github.com/prometheus/procfs/buddyinfo.go new file mode 100644 index 000000000..d3a826807 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/buddyinfo.go @@ -0,0 +1,95 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "fmt" + "io" + "os" + "strconv" + "strings" +) + +// A BuddyInfo is the details parsed from /proc/buddyinfo. +// The data is comprised of an array of free fragments of each size. +// The sizes are 2^n*PAGE_SIZE, where n is the array index. +type BuddyInfo struct { + Node string + Zone string + Sizes []float64 +} + +// NewBuddyInfo reads the buddyinfo statistics. +func NewBuddyInfo() ([]BuddyInfo, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return nil, err + } + + return fs.NewBuddyInfo() +} + +// NewBuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem. +func (fs FS) NewBuddyInfo() ([]BuddyInfo, error) { + file, err := os.Open(fs.Path("buddyinfo")) + if err != nil { + return nil, err + } + defer file.Close() + + return parseBuddyInfo(file) +} + +func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) { + var ( + buddyInfo = []BuddyInfo{} + scanner = bufio.NewScanner(r) + bucketCount = -1 + ) + + for scanner.Scan() { + var err error + line := scanner.Text() + parts := strings.Fields(line) + + if len(parts) < 4 { + return nil, fmt.Errorf("invalid number of fields when parsing buddyinfo") + } + + node := strings.TrimRight(parts[1], ",") + zone := strings.TrimRight(parts[3], ",") + arraySize := len(parts[4:]) + + if bucketCount == -1 { + bucketCount = arraySize + } else { + if bucketCount != arraySize { + return nil, fmt.Errorf("mismatch in number of buddyinfo buckets, previous count %d, new count %d", bucketCount, arraySize) + } + } + + sizes := make([]float64, arraySize) + for i := 0; i < arraySize; i++ { + sizes[i], err = strconv.ParseFloat(parts[i+4], 64) + if err != nil { + return nil, fmt.Errorf("invalid value in buddyinfo: %s", err) + } + } + + buddyInfo = append(buddyInfo, BuddyInfo{node, zone, sizes}) + } + + return buddyInfo, scanner.Err() +} diff --git a/vendor/github.com/prometheus/procfs/doc.go b/vendor/github.com/prometheus/procfs/doc.go new file mode 100644 index 000000000..e2acd6d40 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/doc.go @@ -0,0 +1,45 @@ +// Copyright 2014 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package procfs provides functions to retrieve system, kernel and process +// metrics from the pseudo-filesystem proc. +// +// Example: +// +// package main +// +// import ( +// "fmt" +// "log" +// +// "github.com/prometheus/procfs" +// ) +// +// func main() { +// p, err := procfs.Self() +// if err != nil { +// log.Fatalf("could not get process: %s", err) +// } +// +// stat, err := p.NewStat() +// if err != nil { +// log.Fatalf("could not get process stat: %s", err) +// } +// +// fmt.Printf("command: %s\n", stat.Comm) +// fmt.Printf("cpu time: %fs\n", stat.CPUTime()) +// fmt.Printf("vsize: %dB\n", stat.VirtualMemory()) +// fmt.Printf("rss: %dB\n", stat.ResidentMemory()) +// } +// +package procfs diff --git a/vendor/github.com/prometheus/procfs/fs.go b/vendor/github.com/prometheus/procfs/fs.go new file mode 100644 index 000000000..b6c6b2ce1 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/fs.go @@ -0,0 +1,82 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "fmt" + "os" + "path" + + "github.com/prometheus/procfs/nfs" + "github.com/prometheus/procfs/xfs" +) + +// FS represents the pseudo-filesystem proc, which provides an interface to +// kernel data structures. +type FS string + +// DefaultMountPoint is the common mount point of the proc filesystem. +const DefaultMountPoint = "/proc" + +// NewFS returns a new FS mounted under the given mountPoint. It will error +// if the mount point can't be read. +func NewFS(mountPoint string) (FS, error) { + info, err := os.Stat(mountPoint) + if err != nil { + return "", fmt.Errorf("could not read %s: %s", mountPoint, err) + } + if !info.IsDir() { + return "", fmt.Errorf("mount point %s is not a directory", mountPoint) + } + + return FS(mountPoint), nil +} + +// Path returns the path of the given subsystem relative to the procfs root. +func (fs FS) Path(p ...string) string { + return path.Join(append([]string{string(fs)}, p...)...) +} + +// XFSStats retrieves XFS filesystem runtime statistics. +func (fs FS) XFSStats() (*xfs.Stats, error) { + f, err := os.Open(fs.Path("fs/xfs/stat")) + if err != nil { + return nil, err + } + defer f.Close() + + return xfs.ParseStats(f) +} + +// NFSClientRPCStats retrieves NFS client RPC statistics. +func (fs FS) NFSClientRPCStats() (*nfs.ClientRPCStats, error) { + f, err := os.Open(fs.Path("net/rpc/nfs")) + if err != nil { + return nil, err + } + defer f.Close() + + return nfs.ParseClientRPCStats(f) +} + +// NFSdServerRPCStats retrieves NFS daemon RPC statistics. +func (fs FS) NFSdServerRPCStats() (*nfs.ServerRPCStats, error) { + f, err := os.Open(fs.Path("net/rpc/nfsd")) + if err != nil { + return nil, err + } + defer f.Close() + + return nfs.ParseServerRPCStats(f) +} diff --git a/vendor/github.com/prometheus/procfs/internal/util/parse.go b/vendor/github.com/prometheus/procfs/internal/util/parse.go new file mode 100644 index 000000000..2ff228e9d --- /dev/null +++ b/vendor/github.com/prometheus/procfs/internal/util/parse.go @@ -0,0 +1,59 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import ( + "io/ioutil" + "strconv" + "strings" +) + +// ParseUint32s parses a slice of strings into a slice of uint32s. +func ParseUint32s(ss []string) ([]uint32, error) { + us := make([]uint32, 0, len(ss)) + for _, s := range ss { + u, err := strconv.ParseUint(s, 10, 32) + if err != nil { + return nil, err + } + + us = append(us, uint32(u)) + } + + return us, nil +} + +// ParseUint64s parses a slice of strings into a slice of uint64s. +func ParseUint64s(ss []string) ([]uint64, error) { + us := make([]uint64, 0, len(ss)) + for _, s := range ss { + u, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return nil, err + } + + us = append(us, u) + } + + return us, nil +} + +// ReadUintFromFile reads a file and attempts to parse a uint64 from it. +func ReadUintFromFile(path string) (uint64, error) { + data, err := ioutil.ReadFile(path) + if err != nil { + return 0, err + } + return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64) +} diff --git a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_linux.go b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_linux.go new file mode 100644 index 000000000..df0d567b7 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_linux.go @@ -0,0 +1,45 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows + +package util + +import ( + "bytes" + "os" + "syscall" +) + +// SysReadFile is a simplified ioutil.ReadFile that invokes syscall.Read directly. +// https://github.com/prometheus/node_exporter/pull/728/files +func SysReadFile(file string) (string, error) { + f, err := os.Open(file) + if err != nil { + return "", err + } + defer f.Close() + + // On some machines, hwmon drivers are broken and return EAGAIN. This causes + // Go's ioutil.ReadFile implementation to poll forever. + // + // Since we either want to read data or bail immediately, do the simplest + // possible read using syscall directly. + b := make([]byte, 128) + n, err := syscall.Read(int(f.Fd()), b) + if err != nil { + return "", err + } + + return string(bytes.TrimSpace(b[:n])), nil +} diff --git a/vendor/github.com/prometheus/procfs/ipvs.go b/vendor/github.com/prometheus/procfs/ipvs.go new file mode 100644 index 000000000..e36d4a3bd --- /dev/null +++ b/vendor/github.com/prometheus/procfs/ipvs.go @@ -0,0 +1,259 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "encoding/hex" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "os" + "strconv" + "strings" +) + +// IPVSStats holds IPVS statistics, as exposed by the kernel in `/proc/net/ip_vs_stats`. +type IPVSStats struct { + // Total count of connections. + Connections uint64 + // Total incoming packages processed. + IncomingPackets uint64 + // Total outgoing packages processed. + OutgoingPackets uint64 + // Total incoming traffic. + IncomingBytes uint64 + // Total outgoing traffic. + OutgoingBytes uint64 +} + +// IPVSBackendStatus holds current metrics of one virtual / real address pair. +type IPVSBackendStatus struct { + // The local (virtual) IP address. + LocalAddress net.IP + // The remote (real) IP address. + RemoteAddress net.IP + // The local (virtual) port. + LocalPort uint16 + // The remote (real) port. + RemotePort uint16 + // The local firewall mark + LocalMark string + // The transport protocol (TCP, UDP). + Proto string + // The current number of active connections for this virtual/real address pair. + ActiveConn uint64 + // The current number of inactive connections for this virtual/real address pair. + InactConn uint64 + // The current weight of this virtual/real address pair. + Weight uint64 +} + +// NewIPVSStats reads the IPVS statistics. +func NewIPVSStats() (IPVSStats, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return IPVSStats{}, err + } + + return fs.NewIPVSStats() +} + +// NewIPVSStats reads the IPVS statistics from the specified `proc` filesystem. +func (fs FS) NewIPVSStats() (IPVSStats, error) { + file, err := os.Open(fs.Path("net/ip_vs_stats")) + if err != nil { + return IPVSStats{}, err + } + defer file.Close() + + return parseIPVSStats(file) +} + +// parseIPVSStats performs the actual parsing of `ip_vs_stats`. +func parseIPVSStats(file io.Reader) (IPVSStats, error) { + var ( + statContent []byte + statLines []string + statFields []string + stats IPVSStats + ) + + statContent, err := ioutil.ReadAll(file) + if err != nil { + return IPVSStats{}, err + } + + statLines = strings.SplitN(string(statContent), "\n", 4) + if len(statLines) != 4 { + return IPVSStats{}, errors.New("ip_vs_stats corrupt: too short") + } + + statFields = strings.Fields(statLines[2]) + if len(statFields) != 5 { + return IPVSStats{}, errors.New("ip_vs_stats corrupt: unexpected number of fields") + } + + stats.Connections, err = strconv.ParseUint(statFields[0], 16, 64) + if err != nil { + return IPVSStats{}, err + } + stats.IncomingPackets, err = strconv.ParseUint(statFields[1], 16, 64) + if err != nil { + return IPVSStats{}, err + } + stats.OutgoingPackets, err = strconv.ParseUint(statFields[2], 16, 64) + if err != nil { + return IPVSStats{}, err + } + stats.IncomingBytes, err = strconv.ParseUint(statFields[3], 16, 64) + if err != nil { + return IPVSStats{}, err + } + stats.OutgoingBytes, err = strconv.ParseUint(statFields[4], 16, 64) + if err != nil { + return IPVSStats{}, err + } + + return stats, nil +} + +// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs. +func NewIPVSBackendStatus() ([]IPVSBackendStatus, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return []IPVSBackendStatus{}, err + } + + return fs.NewIPVSBackendStatus() +} + +// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem. +func (fs FS) NewIPVSBackendStatus() ([]IPVSBackendStatus, error) { + file, err := os.Open(fs.Path("net/ip_vs")) + if err != nil { + return nil, err + } + defer file.Close() + + return parseIPVSBackendStatus(file) +} + +func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) { + var ( + status []IPVSBackendStatus + scanner = bufio.NewScanner(file) + proto string + localMark string + localAddress net.IP + localPort uint16 + err error + ) + + for scanner.Scan() { + fields := strings.Fields(scanner.Text()) + if len(fields) == 0 { + continue + } + switch { + case fields[0] == "IP" || fields[0] == "Prot" || fields[1] == "RemoteAddress:Port": + continue + case fields[0] == "TCP" || fields[0] == "UDP": + if len(fields) < 2 { + continue + } + proto = fields[0] + localMark = "" + localAddress, localPort, err = parseIPPort(fields[1]) + if err != nil { + return nil, err + } + case fields[0] == "FWM": + if len(fields) < 2 { + continue + } + proto = fields[0] + localMark = fields[1] + localAddress = nil + localPort = 0 + case fields[0] == "->": + if len(fields) < 6 { + continue + } + remoteAddress, remotePort, err := parseIPPort(fields[1]) + if err != nil { + return nil, err + } + weight, err := strconv.ParseUint(fields[3], 10, 64) + if err != nil { + return nil, err + } + activeConn, err := strconv.ParseUint(fields[4], 10, 64) + if err != nil { + return nil, err + } + inactConn, err := strconv.ParseUint(fields[5], 10, 64) + if err != nil { + return nil, err + } + status = append(status, IPVSBackendStatus{ + LocalAddress: localAddress, + LocalPort: localPort, + LocalMark: localMark, + RemoteAddress: remoteAddress, + RemotePort: remotePort, + Proto: proto, + Weight: weight, + ActiveConn: activeConn, + InactConn: inactConn, + }) + } + } + return status, nil +} + +func parseIPPort(s string) (net.IP, uint16, error) { + var ( + ip net.IP + err error + ) + + switch len(s) { + case 13: + ip, err = hex.DecodeString(s[0:8]) + if err != nil { + return nil, 0, err + } + case 46: + ip = net.ParseIP(s[1:40]) + if ip == nil { + return nil, 0, fmt.Errorf("invalid IPv6 address: %s", s[1:40]) + } + default: + return nil, 0, fmt.Errorf("unexpected IP:Port: %s", s) + } + + portString := s[len(s)-4:] + if len(portString) != 4 { + return nil, 0, fmt.Errorf("unexpected port string format: %s", portString) + } + port, err := strconv.ParseUint(portString, 16, 16) + if err != nil { + return nil, 0, err + } + + return ip, uint16(port), nil +} diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go new file mode 100644 index 000000000..9dc19583d --- /dev/null +++ b/vendor/github.com/prometheus/procfs/mdstat.go @@ -0,0 +1,151 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "fmt" + "io/ioutil" + "regexp" + "strconv" + "strings" +) + +var ( + statuslineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[[U_]+\]`) + buildlineRE = regexp.MustCompile(`\((\d+)/\d+\)`) +) + +// MDStat holds info parsed from /proc/mdstat. +type MDStat struct { + // Name of the device. + Name string + // activity-state of the device. + ActivityState string + // Number of active disks. + DisksActive int64 + // Total number of disks the device consists of. + DisksTotal int64 + // Number of blocks the device holds. + BlocksTotal int64 + // Number of blocks on the device that are in sync. + BlocksSynced int64 +} + +// ParseMDStat parses an mdstat-file and returns a struct with the relevant infos. +func (fs FS) ParseMDStat() (mdstates []MDStat, err error) { + mdStatusFilePath := fs.Path("mdstat") + content, err := ioutil.ReadFile(mdStatusFilePath) + if err != nil { + return []MDStat{}, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) + } + + mdStates := []MDStat{} + lines := strings.Split(string(content), "\n") + for i, l := range lines { + if l == "" { + continue + } + if l[0] == ' ' { + continue + } + if strings.HasPrefix(l, "Personalities") || strings.HasPrefix(l, "unused") { + continue + } + + mainLine := strings.Split(l, " ") + if len(mainLine) < 3 { + return mdStates, fmt.Errorf("error parsing mdline: %s", l) + } + mdName := mainLine[0] + activityState := mainLine[2] + + if len(lines) <= i+3 { + return mdStates, fmt.Errorf( + "error parsing %s: too few lines for md device %s", + mdStatusFilePath, + mdName, + ) + } + + active, total, size, err := evalStatusline(lines[i+1]) + if err != nil { + return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) + } + + // j is the line number of the syncing-line. + j := i + 2 + if strings.Contains(lines[i+2], "bitmap") { // skip bitmap line + j = i + 3 + } + + // If device is syncing at the moment, get the number of currently + // synced bytes, otherwise that number equals the size of the device. + syncedBlocks := size + if strings.Contains(lines[j], "recovery") || strings.Contains(lines[j], "resync") { + syncedBlocks, err = evalBuildline(lines[j]) + if err != nil { + return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) + } + } + + mdStates = append(mdStates, MDStat{ + Name: mdName, + ActivityState: activityState, + DisksActive: active, + DisksTotal: total, + BlocksTotal: size, + BlocksSynced: syncedBlocks, + }) + } + + return mdStates, nil +} + +func evalStatusline(statusline string) (active, total, size int64, err error) { + matches := statuslineRE.FindStringSubmatch(statusline) + if len(matches) != 4 { + return 0, 0, 0, fmt.Errorf("unexpected statusline: %s", statusline) + } + + size, err = strconv.ParseInt(matches[1], 10, 64) + if err != nil { + return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err) + } + + total, err = strconv.ParseInt(matches[2], 10, 64) + if err != nil { + return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err) + } + + active, err = strconv.ParseInt(matches[3], 10, 64) + if err != nil { + return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err) + } + + return active, total, size, nil +} + +func evalBuildline(buildline string) (syncedBlocks int64, err error) { + matches := buildlineRE.FindStringSubmatch(buildline) + if len(matches) != 2 { + return 0, fmt.Errorf("unexpected buildline: %s", buildline) + } + + syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64) + if err != nil { + return 0, fmt.Errorf("%s in buildline: %s", err, buildline) + } + + return syncedBlocks, nil +} diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go new file mode 100644 index 000000000..7a8a1e099 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/mountstats.go @@ -0,0 +1,606 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +// While implementing parsing of /proc/[pid]/mountstats, this blog was used +// heavily as a reference: +// https://utcc.utoronto.ca/~cks/space/blog/linux/NFSMountstatsIndex +// +// Special thanks to Chris Siebenmann for all of his posts explaining the +// various statistics available for NFS. + +import ( + "bufio" + "fmt" + "io" + "strconv" + "strings" + "time" +) + +// Constants shared between multiple functions. +const ( + deviceEntryLen = 8 + + fieldBytesLen = 8 + fieldEventsLen = 27 + + statVersion10 = "1.0" + statVersion11 = "1.1" + + fieldTransport10TCPLen = 10 + fieldTransport10UDPLen = 7 + + fieldTransport11TCPLen = 13 + fieldTransport11UDPLen = 10 +) + +// A Mount is a device mount parsed from /proc/[pid]/mountstats. +type Mount struct { + // Name of the device. + Device string + // The mount point of the device. + Mount string + // The filesystem type used by the device. + Type string + // If available additional statistics related to this Mount. + // Use a type assertion to determine if additional statistics are available. + Stats MountStats +} + +// A MountStats is a type which contains detailed statistics for a specific +// type of Mount. +type MountStats interface { + mountStats() +} + +// A MountStatsNFS is a MountStats implementation for NFSv3 and v4 mounts. +type MountStatsNFS struct { + // The version of statistics provided. + StatVersion string + // The age of the NFS mount. + Age time.Duration + // Statistics related to byte counters for various operations. + Bytes NFSBytesStats + // Statistics related to various NFS event occurrences. + Events NFSEventsStats + // Statistics broken down by filesystem operation. + Operations []NFSOperationStats + // Statistics about the NFS RPC transport. + Transport NFSTransportStats +} + +// mountStats implements MountStats. +func (m MountStatsNFS) mountStats() {} + +// A NFSBytesStats contains statistics about the number of bytes read and written +// by an NFS client to and from an NFS server. +type NFSBytesStats struct { + // Number of bytes read using the read() syscall. + Read uint64 + // Number of bytes written using the write() syscall. + Write uint64 + // Number of bytes read using the read() syscall in O_DIRECT mode. + DirectRead uint64 + // Number of bytes written using the write() syscall in O_DIRECT mode. + DirectWrite uint64 + // Number of bytes read from the NFS server, in total. + ReadTotal uint64 + // Number of bytes written to the NFS server, in total. + WriteTotal uint64 + // Number of pages read directly via mmap()'d files. + ReadPages uint64 + // Number of pages written directly via mmap()'d files. + WritePages uint64 +} + +// A NFSEventsStats contains statistics about NFS event occurrences. +type NFSEventsStats struct { + // Number of times cached inode attributes are re-validated from the server. + InodeRevalidate uint64 + // Number of times cached dentry nodes are re-validated from the server. + DnodeRevalidate uint64 + // Number of times an inode cache is cleared. + DataInvalidate uint64 + // Number of times cached inode attributes are invalidated. + AttributeInvalidate uint64 + // Number of times files or directories have been open()'d. + VFSOpen uint64 + // Number of times a directory lookup has occurred. + VFSLookup uint64 + // Number of times permissions have been checked. + VFSAccess uint64 + // Number of updates (and potential writes) to pages. + VFSUpdatePage uint64 + // Number of pages read directly via mmap()'d files. + VFSReadPage uint64 + // Number of times a group of pages have been read. + VFSReadPages uint64 + // Number of pages written directly via mmap()'d files. + VFSWritePage uint64 + // Number of times a group of pages have been written. + VFSWritePages uint64 + // Number of times directory entries have been read with getdents(). + VFSGetdents uint64 + // Number of times attributes have been set on inodes. + VFSSetattr uint64 + // Number of pending writes that have been forcefully flushed to the server. + VFSFlush uint64 + // Number of times fsync() has been called on directories and files. + VFSFsync uint64 + // Number of times locking has been attempted on a file. + VFSLock uint64 + // Number of times files have been closed and released. + VFSFileRelease uint64 + // Unknown. Possibly unused. + CongestionWait uint64 + // Number of times files have been truncated. + Truncation uint64 + // Number of times a file has been grown due to writes beyond its existing end. + WriteExtension uint64 + // Number of times a file was removed while still open by another process. + SillyRename uint64 + // Number of times the NFS server gave less data than expected while reading. + ShortRead uint64 + // Number of times the NFS server wrote less data than expected while writing. + ShortWrite uint64 + // Number of times the NFS server indicated EJUKEBOX; retrieving data from + // offline storage. + JukeboxDelay uint64 + // Number of NFS v4.1+ pNFS reads. + PNFSRead uint64 + // Number of NFS v4.1+ pNFS writes. + PNFSWrite uint64 +} + +// A NFSOperationStats contains statistics for a single operation. +type NFSOperationStats struct { + // The name of the operation. + Operation string + // Number of requests performed for this operation. + Requests uint64 + // Number of times an actual RPC request has been transmitted for this operation. + Transmissions uint64 + // Number of times a request has had a major timeout. + MajorTimeouts uint64 + // Number of bytes sent for this operation, including RPC headers and payload. + BytesSent uint64 + // Number of bytes received for this operation, including RPC headers and payload. + BytesReceived uint64 + // Duration all requests spent queued for transmission before they were sent. + CumulativeQueueTime time.Duration + // Duration it took to get a reply back after the request was transmitted. + CumulativeTotalResponseTime time.Duration + // Duration from when a request was enqueued to when it was completely handled. + CumulativeTotalRequestTime time.Duration +} + +// A NFSTransportStats contains statistics for the NFS mount RPC requests and +// responses. +type NFSTransportStats struct { + // The transport protocol used for the NFS mount. + Protocol string + // The local port used for the NFS mount. + Port uint64 + // Number of times the client has had to establish a connection from scratch + // to the NFS server. + Bind uint64 + // Number of times the client has made a TCP connection to the NFS server. + Connect uint64 + // Duration (in jiffies, a kernel internal unit of time) the NFS mount has + // spent waiting for connections to the server to be established. + ConnectIdleTime uint64 + // Duration since the NFS mount last saw any RPC traffic. + IdleTime time.Duration + // Number of RPC requests for this mount sent to the NFS server. + Sends uint64 + // Number of RPC responses for this mount received from the NFS server. + Receives uint64 + // Number of times the NFS server sent a response with a transaction ID + // unknown to this client. + BadTransactionIDs uint64 + // A running counter, incremented on each request as the current difference + // ebetween sends and receives. + CumulativeActiveRequests uint64 + // A running counter, incremented on each request by the current backlog + // queue size. + CumulativeBacklog uint64 + + // Stats below only available with stat version 1.1. + + // Maximum number of simultaneously active RPC requests ever used. + MaximumRPCSlotsUsed uint64 + // A running counter, incremented on each request as the current size of the + // sending queue. + CumulativeSendingQueue uint64 + // A running counter, incremented on each request as the current size of the + // pending queue. + CumulativePendingQueue uint64 +} + +// parseMountStats parses a /proc/[pid]/mountstats file and returns a slice +// of Mount structures containing detailed information about each mount. +// If available, statistics for each mount are parsed as well. +func parseMountStats(r io.Reader) ([]*Mount, error) { + const ( + device = "device" + statVersionPrefix = "statvers=" + + nfs3Type = "nfs" + nfs4Type = "nfs4" + ) + + var mounts []*Mount + + s := bufio.NewScanner(r) + for s.Scan() { + // Only look for device entries in this function + ss := strings.Fields(string(s.Bytes())) + if len(ss) == 0 || ss[0] != device { + continue + } + + m, err := parseMount(ss) + if err != nil { + return nil, err + } + + // Does this mount also possess statistics information? + if len(ss) > deviceEntryLen { + // Only NFSv3 and v4 are supported for parsing statistics + if m.Type != nfs3Type && m.Type != nfs4Type { + return nil, fmt.Errorf("cannot parse MountStats for fstype %q", m.Type) + } + + statVersion := strings.TrimPrefix(ss[8], statVersionPrefix) + + stats, err := parseMountStatsNFS(s, statVersion) + if err != nil { + return nil, err + } + + m.Stats = stats + } + + mounts = append(mounts, m) + } + + return mounts, s.Err() +} + +// parseMount parses an entry in /proc/[pid]/mountstats in the format: +// device [device] mounted on [mount] with fstype [type] +func parseMount(ss []string) (*Mount, error) { + if len(ss) < deviceEntryLen { + return nil, fmt.Errorf("invalid device entry: %v", ss) + } + + // Check for specific words appearing at specific indices to ensure + // the format is consistent with what we expect + format := []struct { + i int + s string + }{ + {i: 0, s: "device"}, + {i: 2, s: "mounted"}, + {i: 3, s: "on"}, + {i: 5, s: "with"}, + {i: 6, s: "fstype"}, + } + + for _, f := range format { + if ss[f.i] != f.s { + return nil, fmt.Errorf("invalid device entry: %v", ss) + } + } + + return &Mount{ + Device: ss[1], + Mount: ss[4], + Type: ss[7], + }, nil +} + +// parseMountStatsNFS parses a MountStatsNFS by scanning additional information +// related to NFS statistics. +func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, error) { + // Field indicators for parsing specific types of data + const ( + fieldAge = "age:" + fieldBytes = "bytes:" + fieldEvents = "events:" + fieldPerOpStats = "per-op" + fieldTransport = "xprt:" + ) + + stats := &MountStatsNFS{ + StatVersion: statVersion, + } + + for s.Scan() { + ss := strings.Fields(string(s.Bytes())) + if len(ss) == 0 { + break + } + if len(ss) < 2 { + return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) + } + + switch ss[0] { + case fieldAge: + // Age integer is in seconds + d, err := time.ParseDuration(ss[1] + "s") + if err != nil { + return nil, err + } + + stats.Age = d + case fieldBytes: + bstats, err := parseNFSBytesStats(ss[1:]) + if err != nil { + return nil, err + } + + stats.Bytes = *bstats + case fieldEvents: + estats, err := parseNFSEventsStats(ss[1:]) + if err != nil { + return nil, err + } + + stats.Events = *estats + case fieldTransport: + if len(ss) < 3 { + return nil, fmt.Errorf("not enough information for NFS transport stats: %v", ss) + } + + tstats, err := parseNFSTransportStats(ss[1:], statVersion) + if err != nil { + return nil, err + } + + stats.Transport = *tstats + } + + // When encountering "per-operation statistics", we must break this + // loop and parse them separately to ensure we can terminate parsing + // before reaching another device entry; hence why this 'if' statement + // is not just another switch case + if ss[0] == fieldPerOpStats { + break + } + } + + if err := s.Err(); err != nil { + return nil, err + } + + // NFS per-operation stats appear last before the next device entry + perOpStats, err := parseNFSOperationStats(s) + if err != nil { + return nil, err + } + + stats.Operations = perOpStats + + return stats, nil +} + +// parseNFSBytesStats parses a NFSBytesStats line using an input set of +// integer fields. +func parseNFSBytesStats(ss []string) (*NFSBytesStats, error) { + if len(ss) != fieldBytesLen { + return nil, fmt.Errorf("invalid NFS bytes stats: %v", ss) + } + + ns := make([]uint64, 0, fieldBytesLen) + for _, s := range ss { + n, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return nil, err + } + + ns = append(ns, n) + } + + return &NFSBytesStats{ + Read: ns[0], + Write: ns[1], + DirectRead: ns[2], + DirectWrite: ns[3], + ReadTotal: ns[4], + WriteTotal: ns[5], + ReadPages: ns[6], + WritePages: ns[7], + }, nil +} + +// parseNFSEventsStats parses a NFSEventsStats line using an input set of +// integer fields. +func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) { + if len(ss) != fieldEventsLen { + return nil, fmt.Errorf("invalid NFS events stats: %v", ss) + } + + ns := make([]uint64, 0, fieldEventsLen) + for _, s := range ss { + n, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return nil, err + } + + ns = append(ns, n) + } + + return &NFSEventsStats{ + InodeRevalidate: ns[0], + DnodeRevalidate: ns[1], + DataInvalidate: ns[2], + AttributeInvalidate: ns[3], + VFSOpen: ns[4], + VFSLookup: ns[5], + VFSAccess: ns[6], + VFSUpdatePage: ns[7], + VFSReadPage: ns[8], + VFSReadPages: ns[9], + VFSWritePage: ns[10], + VFSWritePages: ns[11], + VFSGetdents: ns[12], + VFSSetattr: ns[13], + VFSFlush: ns[14], + VFSFsync: ns[15], + VFSLock: ns[16], + VFSFileRelease: ns[17], + CongestionWait: ns[18], + Truncation: ns[19], + WriteExtension: ns[20], + SillyRename: ns[21], + ShortRead: ns[22], + ShortWrite: ns[23], + JukeboxDelay: ns[24], + PNFSRead: ns[25], + PNFSWrite: ns[26], + }, nil +} + +// parseNFSOperationStats parses a slice of NFSOperationStats by scanning +// additional information about per-operation statistics until an empty +// line is reached. +func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) { + const ( + // Number of expected fields in each per-operation statistics set + numFields = 9 + ) + + var ops []NFSOperationStats + + for s.Scan() { + ss := strings.Fields(string(s.Bytes())) + if len(ss) == 0 { + // Must break when reading a blank line after per-operation stats to + // enable top-level function to parse the next device entry + break + } + + if len(ss) != numFields { + return nil, fmt.Errorf("invalid NFS per-operations stats: %v", ss) + } + + // Skip string operation name for integers + ns := make([]uint64, 0, numFields-1) + for _, st := range ss[1:] { + n, err := strconv.ParseUint(st, 10, 64) + if err != nil { + return nil, err + } + + ns = append(ns, n) + } + + ops = append(ops, NFSOperationStats{ + Operation: strings.TrimSuffix(ss[0], ":"), + Requests: ns[0], + Transmissions: ns[1], + MajorTimeouts: ns[2], + BytesSent: ns[3], + BytesReceived: ns[4], + CumulativeQueueTime: time.Duration(ns[5]) * time.Millisecond, + CumulativeTotalResponseTime: time.Duration(ns[6]) * time.Millisecond, + CumulativeTotalRequestTime: time.Duration(ns[7]) * time.Millisecond, + }) + } + + return ops, s.Err() +} + +// parseNFSTransportStats parses a NFSTransportStats line using an input set of +// integer fields matched to a specific stats version. +func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats, error) { + // Extract the protocol field. It is the only string value in the line + protocol := ss[0] + ss = ss[1:] + + switch statVersion { + case statVersion10: + var expectedLength int + if protocol == "tcp" { + expectedLength = fieldTransport10TCPLen + } else if protocol == "udp" { + expectedLength = fieldTransport10UDPLen + } else { + return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.0 statement: %v", protocol, ss) + } + if len(ss) != expectedLength { + return nil, fmt.Errorf("invalid NFS transport stats 1.0 statement: %v", ss) + } + case statVersion11: + var expectedLength int + if protocol == "tcp" { + expectedLength = fieldTransport11TCPLen + } else if protocol == "udp" { + expectedLength = fieldTransport11UDPLen + } else { + return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.1 statement: %v", protocol, ss) + } + if len(ss) != expectedLength { + return nil, fmt.Errorf("invalid NFS transport stats 1.1 statement: %v", ss) + } + default: + return nil, fmt.Errorf("unrecognized NFS transport stats version: %q", statVersion) + } + + // Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay + // in a v1.0 response. Since the stat length is bigger for TCP stats, we use + // the TCP length here. + // + // Note: slice length must be set to length of v1.1 stats to avoid a panic when + // only v1.0 stats are present. + // See: https://github.com/prometheus/node_exporter/issues/571. + ns := make([]uint64, fieldTransport11TCPLen) + for i, s := range ss { + n, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return nil, err + } + + ns[i] = n + } + + // The fields differ depending on the transport protocol (TCP or UDP) + // From https://utcc.utoronto.ca/%7Ecks/space/blog/linux/NFSMountstatsXprt + // + // For the udp RPC transport there is no connection count, connect idle time, + // or idle time (fields #3, #4, and #5); all other fields are the same. So + // we set them to 0 here. + if protocol == "udp" { + ns = append(ns[:2], append(make([]uint64, 3), ns[2:]...)...) + } + + return &NFSTransportStats{ + Protocol: protocol, + Port: ns[0], + Bind: ns[1], + Connect: ns[2], + ConnectIdleTime: ns[3], + IdleTime: time.Duration(ns[4]) * time.Second, + Sends: ns[5], + Receives: ns[6], + BadTransactionIDs: ns[7], + CumulativeActiveRequests: ns[8], + CumulativeBacklog: ns[9], + MaximumRPCSlotsUsed: ns[10], + CumulativeSendingQueue: ns[11], + CumulativePendingQueue: ns[12], + }, nil +} diff --git a/vendor/github.com/prometheus/procfs/net_dev.go b/vendor/github.com/prometheus/procfs/net_dev.go new file mode 100644 index 000000000..3f2523371 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_dev.go @@ -0,0 +1,216 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "errors" + "os" + "sort" + "strconv" + "strings" +) + +// NetDevLine is single line parsed from /proc/net/dev or /proc/[pid]/net/dev. +type NetDevLine struct { + Name string `json:"name"` // The name of the interface. + RxBytes uint64 `json:"rx_bytes"` // Cumulative count of bytes received. + RxPackets uint64 `json:"rx_packets"` // Cumulative count of packets received. + RxErrors uint64 `json:"rx_errors"` // Cumulative count of receive errors encountered. + RxDropped uint64 `json:"rx_dropped"` // Cumulative count of packets dropped while receiving. + RxFIFO uint64 `json:"rx_fifo"` // Cumulative count of FIFO buffer errors. + RxFrame uint64 `json:"rx_frame"` // Cumulative count of packet framing errors. + RxCompressed uint64 `json:"rx_compressed"` // Cumulative count of compressed packets received by the device driver. + RxMulticast uint64 `json:"rx_multicast"` // Cumulative count of multicast frames received by the device driver. + TxBytes uint64 `json:"tx_bytes"` // Cumulative count of bytes transmitted. + TxPackets uint64 `json:"tx_packets"` // Cumulative count of packets transmitted. + TxErrors uint64 `json:"tx_errors"` // Cumulative count of transmit errors encountered. + TxDropped uint64 `json:"tx_dropped"` // Cumulative count of packets dropped while transmitting. + TxFIFO uint64 `json:"tx_fifo"` // Cumulative count of FIFO buffer errors. + TxCollisions uint64 `json:"tx_collisions"` // Cumulative count of collisions detected on the interface. + TxCarrier uint64 `json:"tx_carrier"` // Cumulative count of carrier losses detected by the device driver. + TxCompressed uint64 `json:"tx_compressed"` // Cumulative count of compressed packets transmitted by the device driver. +} + +// NetDev is parsed from /proc/net/dev or /proc/[pid]/net/dev. The map keys +// are interface names. +type NetDev map[string]NetDevLine + +// NewNetDev returns kernel/system statistics read from /proc/net/dev. +func NewNetDev() (NetDev, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return nil, err + } + + return fs.NewNetDev() +} + +// NewNetDev returns kernel/system statistics read from /proc/net/dev. +func (fs FS) NewNetDev() (NetDev, error) { + return newNetDev(fs.Path("net/dev")) +} + +// NewNetDev returns kernel/system statistics read from /proc/[pid]/net/dev. +func (p Proc) NewNetDev() (NetDev, error) { + return newNetDev(p.path("net/dev")) +} + +// newNetDev creates a new NetDev from the contents of the given file. +func newNetDev(file string) (NetDev, error) { + f, err := os.Open(file) + if err != nil { + return NetDev{}, err + } + defer f.Close() + + nd := NetDev{} + s := bufio.NewScanner(f) + for n := 0; s.Scan(); n++ { + // Skip the 2 header lines. + if n < 2 { + continue + } + + line, err := nd.parseLine(s.Text()) + if err != nil { + return nd, err + } + + nd[line.Name] = *line + } + + return nd, s.Err() +} + +// parseLine parses a single line from the /proc/net/dev file. Header lines +// must be filtered prior to calling this method. +func (nd NetDev) parseLine(rawLine string) (*NetDevLine, error) { + parts := strings.SplitN(rawLine, ":", 2) + if len(parts) != 2 { + return nil, errors.New("invalid net/dev line, missing colon") + } + fields := strings.Fields(strings.TrimSpace(parts[1])) + + var err error + line := &NetDevLine{} + + // Interface Name + line.Name = strings.TrimSpace(parts[0]) + if line.Name == "" { + return nil, errors.New("invalid net/dev line, empty interface name") + } + + // RX + line.RxBytes, err = strconv.ParseUint(fields[0], 10, 64) + if err != nil { + return nil, err + } + line.RxPackets, err = strconv.ParseUint(fields[1], 10, 64) + if err != nil { + return nil, err + } + line.RxErrors, err = strconv.ParseUint(fields[2], 10, 64) + if err != nil { + return nil, err + } + line.RxDropped, err = strconv.ParseUint(fields[3], 10, 64) + if err != nil { + return nil, err + } + line.RxFIFO, err = strconv.ParseUint(fields[4], 10, 64) + if err != nil { + return nil, err + } + line.RxFrame, err = strconv.ParseUint(fields[5], 10, 64) + if err != nil { + return nil, err + } + line.RxCompressed, err = strconv.ParseUint(fields[6], 10, 64) + if err != nil { + return nil, err + } + line.RxMulticast, err = strconv.ParseUint(fields[7], 10, 64) + if err != nil { + return nil, err + } + + // TX + line.TxBytes, err = strconv.ParseUint(fields[8], 10, 64) + if err != nil { + return nil, err + } + line.TxPackets, err = strconv.ParseUint(fields[9], 10, 64) + if err != nil { + return nil, err + } + line.TxErrors, err = strconv.ParseUint(fields[10], 10, 64) + if err != nil { + return nil, err + } + line.TxDropped, err = strconv.ParseUint(fields[11], 10, 64) + if err != nil { + return nil, err + } + line.TxFIFO, err = strconv.ParseUint(fields[12], 10, 64) + if err != nil { + return nil, err + } + line.TxCollisions, err = strconv.ParseUint(fields[13], 10, 64) + if err != nil { + return nil, err + } + line.TxCarrier, err = strconv.ParseUint(fields[14], 10, 64) + if err != nil { + return nil, err + } + line.TxCompressed, err = strconv.ParseUint(fields[15], 10, 64) + if err != nil { + return nil, err + } + + return line, nil +} + +// Total aggregates the values across interfaces and returns a new NetDevLine. +// The Name field will be a sorted comma separated list of interface names. +func (nd NetDev) Total() NetDevLine { + total := NetDevLine{} + + names := make([]string, 0, len(nd)) + for _, ifc := range nd { + names = append(names, ifc.Name) + total.RxBytes += ifc.RxBytes + total.RxPackets += ifc.RxPackets + total.RxPackets += ifc.RxPackets + total.RxErrors += ifc.RxErrors + total.RxDropped += ifc.RxDropped + total.RxFIFO += ifc.RxFIFO + total.RxFrame += ifc.RxFrame + total.RxCompressed += ifc.RxCompressed + total.RxMulticast += ifc.RxMulticast + total.TxBytes += ifc.TxBytes + total.TxPackets += ifc.TxPackets + total.TxErrors += ifc.TxErrors + total.TxDropped += ifc.TxDropped + total.TxFIFO += ifc.TxFIFO + total.TxCollisions += ifc.TxCollisions + total.TxCarrier += ifc.TxCarrier + total.TxCompressed += ifc.TxCompressed + } + sort.Strings(names) + total.Name = strings.Join(names, ", ") + + return total +} diff --git a/vendor/github.com/prometheus/procfs/nfs/nfs.go b/vendor/github.com/prometheus/procfs/nfs/nfs.go new file mode 100644 index 000000000..651bf6819 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/nfs/nfs.go @@ -0,0 +1,263 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package nfs implements parsing of /proc/net/rpc/nfsd. +// Fields are documented in https://www.svennd.be/nfsd-stats-explained-procnetrpcnfsd/ +package nfs + +// ReplyCache models the "rc" line. +type ReplyCache struct { + Hits uint64 + Misses uint64 + NoCache uint64 +} + +// FileHandles models the "fh" line. +type FileHandles struct { + Stale uint64 + TotalLookups uint64 + AnonLookups uint64 + DirNoCache uint64 + NoDirNoCache uint64 +} + +// InputOutput models the "io" line. +type InputOutput struct { + Read uint64 + Write uint64 +} + +// Threads models the "th" line. +type Threads struct { + Threads uint64 + FullCnt uint64 +} + +// ReadAheadCache models the "ra" line. +type ReadAheadCache struct { + CacheSize uint64 + CacheHistogram []uint64 + NotFound uint64 +} + +// Network models the "net" line. +type Network struct { + NetCount uint64 + UDPCount uint64 + TCPCount uint64 + TCPConnect uint64 +} + +// ClientRPC models the nfs "rpc" line. +type ClientRPC struct { + RPCCount uint64 + Retransmissions uint64 + AuthRefreshes uint64 +} + +// ServerRPC models the nfsd "rpc" line. +type ServerRPC struct { + RPCCount uint64 + BadCnt uint64 + BadFmt uint64 + BadAuth uint64 + BadcInt uint64 +} + +// V2Stats models the "proc2" line. +type V2Stats struct { + Null uint64 + GetAttr uint64 + SetAttr uint64 + Root uint64 + Lookup uint64 + ReadLink uint64 + Read uint64 + WrCache uint64 + Write uint64 + Create uint64 + Remove uint64 + Rename uint64 + Link uint64 + SymLink uint64 + MkDir uint64 + RmDir uint64 + ReadDir uint64 + FsStat uint64 +} + +// V3Stats models the "proc3" line. +type V3Stats struct { + Null uint64 + GetAttr uint64 + SetAttr uint64 + Lookup uint64 + Access uint64 + ReadLink uint64 + Read uint64 + Write uint64 + Create uint64 + MkDir uint64 + SymLink uint64 + MkNod uint64 + Remove uint64 + RmDir uint64 + Rename uint64 + Link uint64 + ReadDir uint64 + ReadDirPlus uint64 + FsStat uint64 + FsInfo uint64 + PathConf uint64 + Commit uint64 +} + +// ClientV4Stats models the nfs "proc4" line. +type ClientV4Stats struct { + Null uint64 + Read uint64 + Write uint64 + Commit uint64 + Open uint64 + OpenConfirm uint64 + OpenNoattr uint64 + OpenDowngrade uint64 + Close uint64 + Setattr uint64 + FsInfo uint64 + Renew uint64 + SetClientID uint64 + SetClientIDConfirm uint64 + Lock uint64 + Lockt uint64 + Locku uint64 + Access uint64 + Getattr uint64 + Lookup uint64 + LookupRoot uint64 + Remove uint64 + Rename uint64 + Link uint64 + Symlink uint64 + Create uint64 + Pathconf uint64 + StatFs uint64 + ReadLink uint64 + ReadDir uint64 + ServerCaps uint64 + DelegReturn uint64 + GetACL uint64 + SetACL uint64 + FsLocations uint64 + ReleaseLockowner uint64 + Secinfo uint64 + FsidPresent uint64 + ExchangeID uint64 + CreateSession uint64 + DestroySession uint64 + Sequence uint64 + GetLeaseTime uint64 + ReclaimComplete uint64 + LayoutGet uint64 + GetDeviceInfo uint64 + LayoutCommit uint64 + LayoutReturn uint64 + SecinfoNoName uint64 + TestStateID uint64 + FreeStateID uint64 + GetDeviceList uint64 + BindConnToSession uint64 + DestroyClientID uint64 + Seek uint64 + Allocate uint64 + DeAllocate uint64 + LayoutStats uint64 + Clone uint64 +} + +// ServerV4Stats models the nfsd "proc4" line. +type ServerV4Stats struct { + Null uint64 + Compound uint64 +} + +// V4Ops models the "proc4ops" line: NFSv4 operations +// Variable list, see: +// v4.0 https://tools.ietf.org/html/rfc3010 (38 operations) +// v4.1 https://tools.ietf.org/html/rfc5661 (58 operations) +// v4.2 https://tools.ietf.org/html/draft-ietf-nfsv4-minorversion2-41 (71 operations) +type V4Ops struct { + //Values uint64 // Variable depending on v4.x sub-version. TODO: Will this always at least include the fields in this struct? + Op0Unused uint64 + Op1Unused uint64 + Op2Future uint64 + Access uint64 + Close uint64 + Commit uint64 + Create uint64 + DelegPurge uint64 + DelegReturn uint64 + GetAttr uint64 + GetFH uint64 + Link uint64 + Lock uint64 + Lockt uint64 + Locku uint64 + Lookup uint64 + LookupRoot uint64 + Nverify uint64 + Open uint64 + OpenAttr uint64 + OpenConfirm uint64 + OpenDgrd uint64 + PutFH uint64 + PutPubFH uint64 + PutRootFH uint64 + Read uint64 + ReadDir uint64 + ReadLink uint64 + Remove uint64 + Rename uint64 + Renew uint64 + RestoreFH uint64 + SaveFH uint64 + SecInfo uint64 + SetAttr uint64 + Verify uint64 + Write uint64 + RelLockOwner uint64 +} + +// ClientRPCStats models all stats from /proc/net/rpc/nfs. +type ClientRPCStats struct { + Network Network + ClientRPC ClientRPC + V2Stats V2Stats + V3Stats V3Stats + ClientV4Stats ClientV4Stats +} + +// ServerRPCStats models all stats from /proc/net/rpc/nfsd. +type ServerRPCStats struct { + ReplyCache ReplyCache + FileHandles FileHandles + InputOutput InputOutput + Threads Threads + ReadAheadCache ReadAheadCache + Network Network + ServerRPC ServerRPC + V2Stats V2Stats + V3Stats V3Stats + ServerV4Stats ServerV4Stats + V4Ops V4Ops +} diff --git a/vendor/github.com/prometheus/procfs/nfs/parse.go b/vendor/github.com/prometheus/procfs/nfs/parse.go new file mode 100644 index 000000000..95a83cc5b --- /dev/null +++ b/vendor/github.com/prometheus/procfs/nfs/parse.go @@ -0,0 +1,317 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nfs + +import ( + "fmt" +) + +func parseReplyCache(v []uint64) (ReplyCache, error) { + if len(v) != 3 { + return ReplyCache{}, fmt.Errorf("invalid ReplyCache line %q", v) + } + + return ReplyCache{ + Hits: v[0], + Misses: v[1], + NoCache: v[2], + }, nil +} + +func parseFileHandles(v []uint64) (FileHandles, error) { + if len(v) != 5 { + return FileHandles{}, fmt.Errorf("invalid FileHandles, line %q", v) + } + + return FileHandles{ + Stale: v[0], + TotalLookups: v[1], + AnonLookups: v[2], + DirNoCache: v[3], + NoDirNoCache: v[4], + }, nil +} + +func parseInputOutput(v []uint64) (InputOutput, error) { + if len(v) != 2 { + return InputOutput{}, fmt.Errorf("invalid InputOutput line %q", v) + } + + return InputOutput{ + Read: v[0], + Write: v[1], + }, nil +} + +func parseThreads(v []uint64) (Threads, error) { + if len(v) != 2 { + return Threads{}, fmt.Errorf("invalid Threads line %q", v) + } + + return Threads{ + Threads: v[0], + FullCnt: v[1], + }, nil +} + +func parseReadAheadCache(v []uint64) (ReadAheadCache, error) { + if len(v) != 12 { + return ReadAheadCache{}, fmt.Errorf("invalid ReadAheadCache line %q", v) + } + + return ReadAheadCache{ + CacheSize: v[0], + CacheHistogram: v[1:11], + NotFound: v[11], + }, nil +} + +func parseNetwork(v []uint64) (Network, error) { + if len(v) != 4 { + return Network{}, fmt.Errorf("invalid Network line %q", v) + } + + return Network{ + NetCount: v[0], + UDPCount: v[1], + TCPCount: v[2], + TCPConnect: v[3], + }, nil +} + +func parseServerRPC(v []uint64) (ServerRPC, error) { + if len(v) != 5 { + return ServerRPC{}, fmt.Errorf("invalid RPC line %q", v) + } + + return ServerRPC{ + RPCCount: v[0], + BadCnt: v[1], + BadFmt: v[2], + BadAuth: v[3], + BadcInt: v[4], + }, nil +} + +func parseClientRPC(v []uint64) (ClientRPC, error) { + if len(v) != 3 { + return ClientRPC{}, fmt.Errorf("invalid RPC line %q", v) + } + + return ClientRPC{ + RPCCount: v[0], + Retransmissions: v[1], + AuthRefreshes: v[2], + }, nil +} + +func parseV2Stats(v []uint64) (V2Stats, error) { + values := int(v[0]) + if len(v[1:]) != values || values != 18 { + return V2Stats{}, fmt.Errorf("invalid V2Stats line %q", v) + } + + return V2Stats{ + Null: v[1], + GetAttr: v[2], + SetAttr: v[3], + Root: v[4], + Lookup: v[5], + ReadLink: v[6], + Read: v[7], + WrCache: v[8], + Write: v[9], + Create: v[10], + Remove: v[11], + Rename: v[12], + Link: v[13], + SymLink: v[14], + MkDir: v[15], + RmDir: v[16], + ReadDir: v[17], + FsStat: v[18], + }, nil +} + +func parseV3Stats(v []uint64) (V3Stats, error) { + values := int(v[0]) + if len(v[1:]) != values || values != 22 { + return V3Stats{}, fmt.Errorf("invalid V3Stats line %q", v) + } + + return V3Stats{ + Null: v[1], + GetAttr: v[2], + SetAttr: v[3], + Lookup: v[4], + Access: v[5], + ReadLink: v[6], + Read: v[7], + Write: v[8], + Create: v[9], + MkDir: v[10], + SymLink: v[11], + MkNod: v[12], + Remove: v[13], + RmDir: v[14], + Rename: v[15], + Link: v[16], + ReadDir: v[17], + ReadDirPlus: v[18], + FsStat: v[19], + FsInfo: v[20], + PathConf: v[21], + Commit: v[22], + }, nil +} + +func parseClientV4Stats(v []uint64) (ClientV4Stats, error) { + values := int(v[0]) + if len(v[1:]) != values { + return ClientV4Stats{}, fmt.Errorf("invalid ClientV4Stats line %q", v) + } + + // This function currently supports mapping 59 NFS v4 client stats. Older + // kernels may emit fewer stats, so we must detect this and pad out the + // values to match the expected slice size. + if values < 59 { + newValues := make([]uint64, 60) + copy(newValues, v) + v = newValues + } + + return ClientV4Stats{ + Null: v[1], + Read: v[2], + Write: v[3], + Commit: v[4], + Open: v[5], + OpenConfirm: v[6], + OpenNoattr: v[7], + OpenDowngrade: v[8], + Close: v[9], + Setattr: v[10], + FsInfo: v[11], + Renew: v[12], + SetClientID: v[13], + SetClientIDConfirm: v[14], + Lock: v[15], + Lockt: v[16], + Locku: v[17], + Access: v[18], + Getattr: v[19], + Lookup: v[20], + LookupRoot: v[21], + Remove: v[22], + Rename: v[23], + Link: v[24], + Symlink: v[25], + Create: v[26], + Pathconf: v[27], + StatFs: v[28], + ReadLink: v[29], + ReadDir: v[30], + ServerCaps: v[31], + DelegReturn: v[32], + GetACL: v[33], + SetACL: v[34], + FsLocations: v[35], + ReleaseLockowner: v[36], + Secinfo: v[37], + FsidPresent: v[38], + ExchangeID: v[39], + CreateSession: v[40], + DestroySession: v[41], + Sequence: v[42], + GetLeaseTime: v[43], + ReclaimComplete: v[44], + LayoutGet: v[45], + GetDeviceInfo: v[46], + LayoutCommit: v[47], + LayoutReturn: v[48], + SecinfoNoName: v[49], + TestStateID: v[50], + FreeStateID: v[51], + GetDeviceList: v[52], + BindConnToSession: v[53], + DestroyClientID: v[54], + Seek: v[55], + Allocate: v[56], + DeAllocate: v[57], + LayoutStats: v[58], + Clone: v[59], + }, nil +} + +func parseServerV4Stats(v []uint64) (ServerV4Stats, error) { + values := int(v[0]) + if len(v[1:]) != values || values != 2 { + return ServerV4Stats{}, fmt.Errorf("invalid V4Stats line %q", v) + } + + return ServerV4Stats{ + Null: v[1], + Compound: v[2], + }, nil +} + +func parseV4Ops(v []uint64) (V4Ops, error) { + values := int(v[0]) + if len(v[1:]) != values || values < 39 { + return V4Ops{}, fmt.Errorf("invalid V4Ops line %q", v) + } + + stats := V4Ops{ + Op0Unused: v[1], + Op1Unused: v[2], + Op2Future: v[3], + Access: v[4], + Close: v[5], + Commit: v[6], + Create: v[7], + DelegPurge: v[8], + DelegReturn: v[9], + GetAttr: v[10], + GetFH: v[11], + Link: v[12], + Lock: v[13], + Lockt: v[14], + Locku: v[15], + Lookup: v[16], + LookupRoot: v[17], + Nverify: v[18], + Open: v[19], + OpenAttr: v[20], + OpenConfirm: v[21], + OpenDgrd: v[22], + PutFH: v[23], + PutPubFH: v[24], + PutRootFH: v[25], + Read: v[26], + ReadDir: v[27], + ReadLink: v[28], + Remove: v[29], + Rename: v[30], + Renew: v[31], + RestoreFH: v[32], + SaveFH: v[33], + SecInfo: v[34], + SetAttr: v[35], + Verify: v[36], + Write: v[37], + RelLockOwner: v[38], + } + + return stats, nil +} diff --git a/vendor/github.com/prometheus/procfs/nfs/parse_nfs.go b/vendor/github.com/prometheus/procfs/nfs/parse_nfs.go new file mode 100644 index 000000000..c0d3a5ad9 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/nfs/parse_nfs.go @@ -0,0 +1,67 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nfs + +import ( + "bufio" + "fmt" + "io" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// ParseClientRPCStats returns stats read from /proc/net/rpc/nfs +func ParseClientRPCStats(r io.Reader) (*ClientRPCStats, error) { + stats := &ClientRPCStats{} + + scanner := bufio.NewScanner(r) + for scanner.Scan() { + line := scanner.Text() + parts := strings.Fields(scanner.Text()) + // require at least + if len(parts) < 2 { + return nil, fmt.Errorf("invalid NFS metric line %q", line) + } + + values, err := util.ParseUint64s(parts[1:]) + if err != nil { + return nil, fmt.Errorf("error parsing NFS metric line: %s", err) + } + + switch metricLine := parts[0]; metricLine { + case "net": + stats.Network, err = parseNetwork(values) + case "rpc": + stats.ClientRPC, err = parseClientRPC(values) + case "proc2": + stats.V2Stats, err = parseV2Stats(values) + case "proc3": + stats.V3Stats, err = parseV3Stats(values) + case "proc4": + stats.ClientV4Stats, err = parseClientV4Stats(values) + default: + return nil, fmt.Errorf("unknown NFS metric line %q", metricLine) + } + if err != nil { + return nil, fmt.Errorf("errors parsing NFS metric line: %s", err) + } + } + + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("error scanning NFS file: %s", err) + } + + return stats, nil +} diff --git a/vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go b/vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go new file mode 100644 index 000000000..57bb4a358 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go @@ -0,0 +1,89 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nfs + +import ( + "bufio" + "fmt" + "io" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// ParseServerRPCStats returns stats read from /proc/net/rpc/nfsd +func ParseServerRPCStats(r io.Reader) (*ServerRPCStats, error) { + stats := &ServerRPCStats{} + + scanner := bufio.NewScanner(r) + for scanner.Scan() { + line := scanner.Text() + parts := strings.Fields(scanner.Text()) + // require at least + if len(parts) < 2 { + return nil, fmt.Errorf("invalid NFSd metric line %q", line) + } + label := parts[0] + + var values []uint64 + var err error + if label == "th" { + if len(parts) < 3 { + return nil, fmt.Errorf("invalid NFSd th metric line %q", line) + } + values, err = util.ParseUint64s(parts[1:3]) + } else { + values, err = util.ParseUint64s(parts[1:]) + } + if err != nil { + return nil, fmt.Errorf("error parsing NFSd metric line: %s", err) + } + + switch metricLine := parts[0]; metricLine { + case "rc": + stats.ReplyCache, err = parseReplyCache(values) + case "fh": + stats.FileHandles, err = parseFileHandles(values) + case "io": + stats.InputOutput, err = parseInputOutput(values) + case "th": + stats.Threads, err = parseThreads(values) + case "ra": + stats.ReadAheadCache, err = parseReadAheadCache(values) + case "net": + stats.Network, err = parseNetwork(values) + case "rpc": + stats.ServerRPC, err = parseServerRPC(values) + case "proc2": + stats.V2Stats, err = parseV2Stats(values) + case "proc3": + stats.V3Stats, err = parseV3Stats(values) + case "proc4": + stats.ServerV4Stats, err = parseServerV4Stats(values) + case "proc4ops": + stats.V4Ops, err = parseV4Ops(values) + default: + return nil, fmt.Errorf("unknown NFSd metric line %q", metricLine) + } + if err != nil { + return nil, fmt.Errorf("errors parsing NFSd metric line: %s", err) + } + } + + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("error scanning NFSd file: %s", err) + } + + return stats, nil +} diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go new file mode 100644 index 000000000..06bed0ef4 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc.go @@ -0,0 +1,258 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "strconv" + "strings" +) + +// Proc provides information about a running process. +type Proc struct { + // The process ID. + PID int + + fs FS +} + +// Procs represents a list of Proc structs. +type Procs []Proc + +func (p Procs) Len() int { return len(p) } +func (p Procs) Swap(i, j int) { p[i], p[j] = p[j], p[i] } +func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID } + +// Self returns a process for the current process read via /proc/self. +func Self() (Proc, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return Proc{}, err + } + return fs.Self() +} + +// NewProc returns a process for the given pid under /proc. +func NewProc(pid int) (Proc, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return Proc{}, err + } + return fs.NewProc(pid) +} + +// AllProcs returns a list of all currently available processes under /proc. +func AllProcs() (Procs, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return Procs{}, err + } + return fs.AllProcs() +} + +// Self returns a process for the current process. +func (fs FS) Self() (Proc, error) { + p, err := os.Readlink(fs.Path("self")) + if err != nil { + return Proc{}, err + } + pid, err := strconv.Atoi(strings.Replace(p, string(fs), "", -1)) + if err != nil { + return Proc{}, err + } + return fs.NewProc(pid) +} + +// NewProc returns a process for the given pid. +func (fs FS) NewProc(pid int) (Proc, error) { + if _, err := os.Stat(fs.Path(strconv.Itoa(pid))); err != nil { + return Proc{}, err + } + return Proc{PID: pid, fs: fs}, nil +} + +// AllProcs returns a list of all currently available processes. +func (fs FS) AllProcs() (Procs, error) { + d, err := os.Open(fs.Path()) + if err != nil { + return Procs{}, err + } + defer d.Close() + + names, err := d.Readdirnames(-1) + if err != nil { + return Procs{}, fmt.Errorf("could not read %s: %s", d.Name(), err) + } + + p := Procs{} + for _, n := range names { + pid, err := strconv.ParseInt(n, 10, 64) + if err != nil { + continue + } + p = append(p, Proc{PID: int(pid), fs: fs}) + } + + return p, nil +} + +// CmdLine returns the command line of a process. +func (p Proc) CmdLine() ([]string, error) { + f, err := os.Open(p.path("cmdline")) + if err != nil { + return nil, err + } + defer f.Close() + + data, err := ioutil.ReadAll(f) + if err != nil { + return nil, err + } + + if len(data) < 1 { + return []string{}, nil + } + + return strings.Split(string(bytes.TrimRight(data, string("\x00"))), string(byte(0))), nil +} + +// Comm returns the command name of a process. +func (p Proc) Comm() (string, error) { + f, err := os.Open(p.path("comm")) + if err != nil { + return "", err + } + defer f.Close() + + data, err := ioutil.ReadAll(f) + if err != nil { + return "", err + } + + return strings.TrimSpace(string(data)), nil +} + +// Executable returns the absolute path of the executable command of a process. +func (p Proc) Executable() (string, error) { + exe, err := os.Readlink(p.path("exe")) + if os.IsNotExist(err) { + return "", nil + } + + return exe, err +} + +// Cwd returns the absolute path to the current working directory of the process. +func (p Proc) Cwd() (string, error) { + wd, err := os.Readlink(p.path("cwd")) + if os.IsNotExist(err) { + return "", nil + } + + return wd, err +} + +// RootDir returns the absolute path to the process's root directory (as set by chroot) +func (p Proc) RootDir() (string, error) { + rdir, err := os.Readlink(p.path("root")) + if os.IsNotExist(err) { + return "", nil + } + + return rdir, err +} + +// FileDescriptors returns the currently open file descriptors of a process. +func (p Proc) FileDescriptors() ([]uintptr, error) { + names, err := p.fileDescriptors() + if err != nil { + return nil, err + } + + fds := make([]uintptr, len(names)) + for i, n := range names { + fd, err := strconv.ParseInt(n, 10, 32) + if err != nil { + return nil, fmt.Errorf("could not parse fd %s: %s", n, err) + } + fds[i] = uintptr(fd) + } + + return fds, nil +} + +// FileDescriptorTargets returns the targets of all file descriptors of a process. +// If a file descriptor is not a symlink to a file (like a socket), that value will be the empty string. +func (p Proc) FileDescriptorTargets() ([]string, error) { + names, err := p.fileDescriptors() + if err != nil { + return nil, err + } + + targets := make([]string, len(names)) + + for i, name := range names { + target, err := os.Readlink(p.path("fd", name)) + if err == nil { + targets[i] = target + } + } + + return targets, nil +} + +// FileDescriptorsLen returns the number of currently open file descriptors of +// a process. +func (p Proc) FileDescriptorsLen() (int, error) { + fds, err := p.fileDescriptors() + if err != nil { + return 0, err + } + + return len(fds), nil +} + +// MountStats retrieves statistics and configuration for mount points in a +// process's namespace. +func (p Proc) MountStats() ([]*Mount, error) { + f, err := os.Open(p.path("mountstats")) + if err != nil { + return nil, err + } + defer f.Close() + + return parseMountStats(f) +} + +func (p Proc) fileDescriptors() ([]string, error) { + d, err := os.Open(p.path("fd")) + if err != nil { + return nil, err + } + defer d.Close() + + names, err := d.Readdirnames(-1) + if err != nil { + return nil, fmt.Errorf("could not read %s: %s", d.Name(), err) + } + + return names, nil +} + +func (p Proc) path(pa ...string) string { + return p.fs.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...) +} diff --git a/vendor/github.com/prometheus/procfs/proc_io.go b/vendor/github.com/prometheus/procfs/proc_io.go new file mode 100644 index 000000000..0251c83bf --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_io.go @@ -0,0 +1,65 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "fmt" + "io/ioutil" + "os" +) + +// ProcIO models the content of /proc//io. +type ProcIO struct { + // Chars read. + RChar uint64 + // Chars written. + WChar uint64 + // Read syscalls. + SyscR uint64 + // Write syscalls. + SyscW uint64 + // Bytes read. + ReadBytes uint64 + // Bytes written. + WriteBytes uint64 + // Bytes written, but taking into account truncation. See + // Documentation/filesystems/proc.txt in the kernel sources for + // detailed explanation. + CancelledWriteBytes int64 +} + +// NewIO creates a new ProcIO instance from a given Proc instance. +func (p Proc) NewIO() (ProcIO, error) { + pio := ProcIO{} + + f, err := os.Open(p.path("io")) + if err != nil { + return pio, err + } + defer f.Close() + + data, err := ioutil.ReadAll(f) + if err != nil { + return pio, err + } + + ioFormat := "rchar: %d\nwchar: %d\nsyscr: %d\nsyscw: %d\n" + + "read_bytes: %d\nwrite_bytes: %d\n" + + "cancelled_write_bytes: %d\n" + + _, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR, + &pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes) + + return pio, err +} diff --git a/vendor/github.com/prometheus/procfs/proc_limits.go b/vendor/github.com/prometheus/procfs/proc_limits.go new file mode 100644 index 000000000..f04ba6fda --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_limits.go @@ -0,0 +1,150 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "fmt" + "os" + "regexp" + "strconv" +) + +// ProcLimits represents the soft limits for each of the process's resource +// limits. For more information see getrlimit(2): +// http://man7.org/linux/man-pages/man2/getrlimit.2.html. +type ProcLimits struct { + // CPU time limit in seconds. + CPUTime int64 + // Maximum size of files that the process may create. + FileSize int64 + // Maximum size of the process's data segment (initialized data, + // uninitialized data, and heap). + DataSize int64 + // Maximum size of the process stack in bytes. + StackSize int64 + // Maximum size of a core file. + CoreFileSize int64 + // Limit of the process's resident set in pages. + ResidentSet int64 + // Maximum number of processes that can be created for the real user ID of + // the calling process. + Processes int64 + // Value one greater than the maximum file descriptor number that can be + // opened by this process. + OpenFiles int64 + // Maximum number of bytes of memory that may be locked into RAM. + LockedMemory int64 + // Maximum size of the process's virtual memory address space in bytes. + AddressSpace int64 + // Limit on the combined number of flock(2) locks and fcntl(2) leases that + // this process may establish. + FileLocks int64 + // Limit of signals that may be queued for the real user ID of the calling + // process. + PendingSignals int64 + // Limit on the number of bytes that can be allocated for POSIX message + // queues for the real user ID of the calling process. + MsqqueueSize int64 + // Limit of the nice priority set using setpriority(2) or nice(2). + NicePriority int64 + // Limit of the real-time priority set using sched_setscheduler(2) or + // sched_setparam(2). + RealtimePriority int64 + // Limit (in microseconds) on the amount of CPU time that a process + // scheduled under a real-time scheduling policy may consume without making + // a blocking system call. + RealtimeTimeout int64 +} + +const ( + limitsFields = 3 + limitsUnlimited = "unlimited" +) + +var ( + limitsDelimiter = regexp.MustCompile(" +") +) + +// NewLimits returns the current soft limits of the process. +func (p Proc) NewLimits() (ProcLimits, error) { + f, err := os.Open(p.path("limits")) + if err != nil { + return ProcLimits{}, err + } + defer f.Close() + + var ( + l = ProcLimits{} + s = bufio.NewScanner(f) + ) + for s.Scan() { + fields := limitsDelimiter.Split(s.Text(), limitsFields) + if len(fields) != limitsFields { + return ProcLimits{}, fmt.Errorf( + "couldn't parse %s line %s", f.Name(), s.Text()) + } + + switch fields[0] { + case "Max cpu time": + l.CPUTime, err = parseInt(fields[1]) + case "Max file size": + l.FileSize, err = parseInt(fields[1]) + case "Max data size": + l.DataSize, err = parseInt(fields[1]) + case "Max stack size": + l.StackSize, err = parseInt(fields[1]) + case "Max core file size": + l.CoreFileSize, err = parseInt(fields[1]) + case "Max resident set": + l.ResidentSet, err = parseInt(fields[1]) + case "Max processes": + l.Processes, err = parseInt(fields[1]) + case "Max open files": + l.OpenFiles, err = parseInt(fields[1]) + case "Max locked memory": + l.LockedMemory, err = parseInt(fields[1]) + case "Max address space": + l.AddressSpace, err = parseInt(fields[1]) + case "Max file locks": + l.FileLocks, err = parseInt(fields[1]) + case "Max pending signals": + l.PendingSignals, err = parseInt(fields[1]) + case "Max msgqueue size": + l.MsqqueueSize, err = parseInt(fields[1]) + case "Max nice priority": + l.NicePriority, err = parseInt(fields[1]) + case "Max realtime priority": + l.RealtimePriority, err = parseInt(fields[1]) + case "Max realtime timeout": + l.RealtimeTimeout, err = parseInt(fields[1]) + } + if err != nil { + return ProcLimits{}, err + } + } + + return l, s.Err() +} + +func parseInt(s string) (int64, error) { + if s == limitsUnlimited { + return -1, nil + } + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return 0, fmt.Errorf("couldn't parse value %s: %s", s, err) + } + return i, nil +} diff --git a/vendor/github.com/prometheus/procfs/proc_ns.go b/vendor/github.com/prometheus/procfs/proc_ns.go new file mode 100644 index 000000000..d06c26eba --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_ns.go @@ -0,0 +1,68 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "fmt" + "os" + "strconv" + "strings" +) + +// Namespace represents a single namespace of a process. +type Namespace struct { + Type string // Namespace type. + Inode uint32 // Inode number of the namespace. If two processes are in the same namespace their inodes will match. +} + +// Namespaces contains all of the namespaces that the process is contained in. +type Namespaces map[string]Namespace + +// NewNamespaces reads from /proc/[pid/ns/* to get the namespaces of which the +// process is a member. +func (p Proc) NewNamespaces() (Namespaces, error) { + d, err := os.Open(p.path("ns")) + if err != nil { + return nil, err + } + defer d.Close() + + names, err := d.Readdirnames(-1) + if err != nil { + return nil, fmt.Errorf("failed to read contents of ns dir: %v", err) + } + + ns := make(Namespaces, len(names)) + for _, name := range names { + target, err := os.Readlink(p.path("ns", name)) + if err != nil { + return nil, err + } + + fields := strings.SplitN(target, ":", 2) + if len(fields) != 2 { + return nil, fmt.Errorf("failed to parse namespace type and inode from '%v'", target) + } + + typ := fields[0] + inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32) + if err != nil { + return nil, fmt.Errorf("failed to parse inode from '%v': %v", fields[1], err) + } + + ns[name] = Namespace{typ, uint32(inode)} + } + + return ns, nil +} diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go new file mode 100644 index 000000000..3cf2a9f18 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_stat.go @@ -0,0 +1,188 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" +) + +// Originally, this USER_HZ value was dynamically retrieved via a sysconf call +// which required cgo. However, that caused a lot of problems regarding +// cross-compilation. Alternatives such as running a binary to determine the +// value, or trying to derive it in some other way were all problematic. After +// much research it was determined that USER_HZ is actually hardcoded to 100 on +// all Go-supported platforms as of the time of this writing. This is why we +// decided to hardcode it here as well. It is not impossible that there could +// be systems with exceptions, but they should be very exotic edge cases, and +// in that case, the worst outcome will be two misreported metrics. +// +// See also the following discussions: +// +// - https://github.com/prometheus/node_exporter/issues/52 +// - https://github.com/prometheus/procfs/pull/2 +// - http://stackoverflow.com/questions/17410841/how-does-user-hz-solve-the-jiffy-scaling-issue +const userHZ = 100 + +// ProcStat provides status information about the process, +// read from /proc/[pid]/stat. +type ProcStat struct { + // The process ID. + PID int + // The filename of the executable. + Comm string + // The process state. + State string + // The PID of the parent of this process. + PPID int + // The process group ID of the process. + PGRP int + // The session ID of the process. + Session int + // The controlling terminal of the process. + TTY int + // The ID of the foreground process group of the controlling terminal of + // the process. + TPGID int + // The kernel flags word of the process. + Flags uint + // The number of minor faults the process has made which have not required + // loading a memory page from disk. + MinFlt uint + // The number of minor faults that the process's waited-for children have + // made. + CMinFlt uint + // The number of major faults the process has made which have required + // loading a memory page from disk. + MajFlt uint + // The number of major faults that the process's waited-for children have + // made. + CMajFlt uint + // Amount of time that this process has been scheduled in user mode, + // measured in clock ticks. + UTime uint + // Amount of time that this process has been scheduled in kernel mode, + // measured in clock ticks. + STime uint + // Amount of time that this process's waited-for children have been + // scheduled in user mode, measured in clock ticks. + CUTime uint + // Amount of time that this process's waited-for children have been + // scheduled in kernel mode, measured in clock ticks. + CSTime uint + // For processes running a real-time scheduling policy, this is the negated + // scheduling priority, minus one. + Priority int + // The nice value, a value in the range 19 (low priority) to -20 (high + // priority). + Nice int + // Number of threads in this process. + NumThreads int + // The time the process started after system boot, the value is expressed + // in clock ticks. + Starttime uint64 + // Virtual memory size in bytes. + VSize int + // Resident set size in pages. + RSS int + + fs FS +} + +// NewStat returns the current status information of the process. +func (p Proc) NewStat() (ProcStat, error) { + f, err := os.Open(p.path("stat")) + if err != nil { + return ProcStat{}, err + } + defer f.Close() + + data, err := ioutil.ReadAll(f) + if err != nil { + return ProcStat{}, err + } + + var ( + ignore int + + s = ProcStat{PID: p.PID, fs: p.fs} + l = bytes.Index(data, []byte("(")) + r = bytes.LastIndex(data, []byte(")")) + ) + + if l < 0 || r < 0 { + return ProcStat{}, fmt.Errorf( + "unexpected format, couldn't extract comm: %s", + data, + ) + } + + s.Comm = string(data[l+1 : r]) + _, err = fmt.Fscan( + bytes.NewBuffer(data[r+2:]), + &s.State, + &s.PPID, + &s.PGRP, + &s.Session, + &s.TTY, + &s.TPGID, + &s.Flags, + &s.MinFlt, + &s.CMinFlt, + &s.MajFlt, + &s.CMajFlt, + &s.UTime, + &s.STime, + &s.CUTime, + &s.CSTime, + &s.Priority, + &s.Nice, + &s.NumThreads, + &ignore, + &s.Starttime, + &s.VSize, + &s.RSS, + ) + if err != nil { + return ProcStat{}, err + } + + return s, nil +} + +// VirtualMemory returns the virtual memory size in bytes. +func (s ProcStat) VirtualMemory() int { + return s.VSize +} + +// ResidentMemory returns the resident memory size in bytes. +func (s ProcStat) ResidentMemory() int { + return s.RSS * os.Getpagesize() +} + +// StartTime returns the unix timestamp of the process in seconds. +func (s ProcStat) StartTime() (float64, error) { + stat, err := s.fs.NewStat() + if err != nil { + return 0, err + } + return float64(stat.BootTime) + (float64(s.Starttime) / userHZ), nil +} + +// CPUTime returns the total CPU user and system time in seconds. +func (s ProcStat) CPUTime() float64 { + return float64(s.UTime+s.STime) / userHZ +} diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go new file mode 100644 index 000000000..61eb6b0e3 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/stat.go @@ -0,0 +1,232 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "fmt" + "io" + "os" + "strconv" + "strings" +) + +// CPUStat shows how much time the cpu spend in various stages. +type CPUStat struct { + User float64 + Nice float64 + System float64 + Idle float64 + Iowait float64 + IRQ float64 + SoftIRQ float64 + Steal float64 + Guest float64 + GuestNice float64 +} + +// SoftIRQStat represent the softirq statistics as exported in the procfs stat file. +// A nice introduction can be found at https://0xax.gitbooks.io/linux-insides/content/interrupts/interrupts-9.html +// It is possible to get per-cpu stats by reading /proc/softirqs +type SoftIRQStat struct { + Hi uint64 + Timer uint64 + NetTx uint64 + NetRx uint64 + Block uint64 + BlockIoPoll uint64 + Tasklet uint64 + Sched uint64 + Hrtimer uint64 + Rcu uint64 +} + +// Stat represents kernel/system statistics. +type Stat struct { + // Boot time in seconds since the Epoch. + BootTime uint64 + // Summed up cpu statistics. + CPUTotal CPUStat + // Per-CPU statistics. + CPU []CPUStat + // Number of times interrupts were handled, which contains numbered and unnumbered IRQs. + IRQTotal uint64 + // Number of times a numbered IRQ was triggered. + IRQ []uint64 + // Number of times a context switch happened. + ContextSwitches uint64 + // Number of times a process was created. + ProcessCreated uint64 + // Number of processes currently running. + ProcessesRunning uint64 + // Number of processes currently blocked (waiting for IO). + ProcessesBlocked uint64 + // Number of times a softirq was scheduled. + SoftIRQTotal uint64 + // Detailed softirq statistics. + SoftIRQ SoftIRQStat +} + +// NewStat returns kernel/system statistics read from /proc/stat. +func NewStat() (Stat, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return Stat{}, err + } + + return fs.NewStat() +} + +// Parse a cpu statistics line and returns the CPUStat struct plus the cpu id (or -1 for the overall sum). +func parseCPUStat(line string) (CPUStat, int64, error) { + cpuStat := CPUStat{} + var cpu string + + count, err := fmt.Sscanf(line, "%s %f %f %f %f %f %f %f %f %f %f", + &cpu, + &cpuStat.User, &cpuStat.Nice, &cpuStat.System, &cpuStat.Idle, + &cpuStat.Iowait, &cpuStat.IRQ, &cpuStat.SoftIRQ, &cpuStat.Steal, + &cpuStat.Guest, &cpuStat.GuestNice) + + if err != nil && err != io.EOF { + return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu): %s", line, err) + } + if count == 0 { + return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu): 0 elements parsed", line) + } + + cpuStat.User /= userHZ + cpuStat.Nice /= userHZ + cpuStat.System /= userHZ + cpuStat.Idle /= userHZ + cpuStat.Iowait /= userHZ + cpuStat.IRQ /= userHZ + cpuStat.SoftIRQ /= userHZ + cpuStat.Steal /= userHZ + cpuStat.Guest /= userHZ + cpuStat.GuestNice /= userHZ + + if cpu == "cpu" { + return cpuStat, -1, nil + } + + cpuID, err := strconv.ParseInt(cpu[3:], 10, 64) + if err != nil { + return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu/cpuid): %s", line, err) + } + + return cpuStat, cpuID, nil +} + +// Parse a softirq line. +func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) { + softIRQStat := SoftIRQStat{} + var total uint64 + var prefix string + + _, err := fmt.Sscanf(line, "%s %d %d %d %d %d %d %d %d %d %d %d", + &prefix, &total, + &softIRQStat.Hi, &softIRQStat.Timer, &softIRQStat.NetTx, &softIRQStat.NetRx, + &softIRQStat.Block, &softIRQStat.BlockIoPoll, + &softIRQStat.Tasklet, &softIRQStat.Sched, + &softIRQStat.Hrtimer, &softIRQStat.Rcu) + + if err != nil { + return SoftIRQStat{}, 0, fmt.Errorf("couldn't parse %s (softirq): %s", line, err) + } + + return softIRQStat, total, nil +} + +// NewStat returns an information about current kernel/system statistics. +func (fs FS) NewStat() (Stat, error) { + // See https://www.kernel.org/doc/Documentation/filesystems/proc.txt + + f, err := os.Open(fs.Path("stat")) + if err != nil { + return Stat{}, err + } + defer f.Close() + + stat := Stat{} + + scanner := bufio.NewScanner(f) + for scanner.Scan() { + line := scanner.Text() + parts := strings.Fields(scanner.Text()) + // require at least + if len(parts) < 2 { + continue + } + switch { + case parts[0] == "btime": + if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (btime): %s", parts[1], err) + } + case parts[0] == "intr": + if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (intr): %s", parts[1], err) + } + numberedIRQs := parts[2:] + stat.IRQ = make([]uint64, len(numberedIRQs)) + for i, count := range numberedIRQs { + if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (intr%d): %s", count, i, err) + } + } + case parts[0] == "ctxt": + if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (ctxt): %s", parts[1], err) + } + case parts[0] == "processes": + if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (processes): %s", parts[1], err) + } + case parts[0] == "procs_running": + if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (procs_running): %s", parts[1], err) + } + case parts[0] == "procs_blocked": + if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (procs_blocked): %s", parts[1], err) + } + case parts[0] == "softirq": + softIRQStats, total, err := parseSoftIRQStat(line) + if err != nil { + return Stat{}, err + } + stat.SoftIRQTotal = total + stat.SoftIRQ = softIRQStats + case strings.HasPrefix(parts[0], "cpu"): + cpuStat, cpuID, err := parseCPUStat(line) + if err != nil { + return Stat{}, err + } + if cpuID == -1 { + stat.CPUTotal = cpuStat + } else { + for int64(len(stat.CPU)) <= cpuID { + stat.CPU = append(stat.CPU, CPUStat{}) + } + stat.CPU[cpuID] = cpuStat + } + } + } + + if err := scanner.Err(); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s: %s", f.Name(), err) + } + + return stat, nil +} diff --git a/vendor/github.com/prometheus/procfs/xfrm.go b/vendor/github.com/prometheus/procfs/xfrm.go new file mode 100644 index 000000000..8f1508f0f --- /dev/null +++ b/vendor/github.com/prometheus/procfs/xfrm.go @@ -0,0 +1,187 @@ +// Copyright 2017 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "fmt" + "os" + "strconv" + "strings" +) + +// XfrmStat models the contents of /proc/net/xfrm_stat. +type XfrmStat struct { + // All errors which are not matched by other + XfrmInError int + // No buffer is left + XfrmInBufferError int + // Header Error + XfrmInHdrError int + // No state found + // i.e. either inbound SPI, address, or IPSEC protocol at SA is wrong + XfrmInNoStates int + // Transformation protocol specific error + // e.g. SA Key is wrong + XfrmInStateProtoError int + // Transformation mode specific error + XfrmInStateModeError int + // Sequence error + // e.g. sequence number is out of window + XfrmInStateSeqError int + // State is expired + XfrmInStateExpired int + // State has mismatch option + // e.g. UDP encapsulation type is mismatched + XfrmInStateMismatch int + // State is invalid + XfrmInStateInvalid int + // No matching template for states + // e.g. Inbound SAs are correct but SP rule is wrong + XfrmInTmplMismatch int + // No policy is found for states + // e.g. Inbound SAs are correct but no SP is found + XfrmInNoPols int + // Policy discards + XfrmInPolBlock int + // Policy error + XfrmInPolError int + // All errors which are not matched by others + XfrmOutError int + // Bundle generation error + XfrmOutBundleGenError int + // Bundle check error + XfrmOutBundleCheckError int + // No state was found + XfrmOutNoStates int + // Transformation protocol specific error + XfrmOutStateProtoError int + // Transportation mode specific error + XfrmOutStateModeError int + // Sequence error + // i.e sequence number overflow + XfrmOutStateSeqError int + // State is expired + XfrmOutStateExpired int + // Policy discads + XfrmOutPolBlock int + // Policy is dead + XfrmOutPolDead int + // Policy Error + XfrmOutPolError int + XfrmFwdHdrError int + XfrmOutStateInvalid int + XfrmAcquireError int +} + +// NewXfrmStat reads the xfrm_stat statistics. +func NewXfrmStat() (XfrmStat, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return XfrmStat{}, err + } + + return fs.NewXfrmStat() +} + +// NewXfrmStat reads the xfrm_stat statistics from the 'proc' filesystem. +func (fs FS) NewXfrmStat() (XfrmStat, error) { + file, err := os.Open(fs.Path("net/xfrm_stat")) + if err != nil { + return XfrmStat{}, err + } + defer file.Close() + + var ( + x = XfrmStat{} + s = bufio.NewScanner(file) + ) + + for s.Scan() { + fields := strings.Fields(s.Text()) + + if len(fields) != 2 { + return XfrmStat{}, fmt.Errorf( + "couldn't parse %s line %s", file.Name(), s.Text()) + } + + name := fields[0] + value, err := strconv.Atoi(fields[1]) + if err != nil { + return XfrmStat{}, err + } + + switch name { + case "XfrmInError": + x.XfrmInError = value + case "XfrmInBufferError": + x.XfrmInBufferError = value + case "XfrmInHdrError": + x.XfrmInHdrError = value + case "XfrmInNoStates": + x.XfrmInNoStates = value + case "XfrmInStateProtoError": + x.XfrmInStateProtoError = value + case "XfrmInStateModeError": + x.XfrmInStateModeError = value + case "XfrmInStateSeqError": + x.XfrmInStateSeqError = value + case "XfrmInStateExpired": + x.XfrmInStateExpired = value + case "XfrmInStateInvalid": + x.XfrmInStateInvalid = value + case "XfrmInTmplMismatch": + x.XfrmInTmplMismatch = value + case "XfrmInNoPols": + x.XfrmInNoPols = value + case "XfrmInPolBlock": + x.XfrmInPolBlock = value + case "XfrmInPolError": + x.XfrmInPolError = value + case "XfrmOutError": + x.XfrmOutError = value + case "XfrmInStateMismatch": + x.XfrmInStateMismatch = value + case "XfrmOutBundleGenError": + x.XfrmOutBundleGenError = value + case "XfrmOutBundleCheckError": + x.XfrmOutBundleCheckError = value + case "XfrmOutNoStates": + x.XfrmOutNoStates = value + case "XfrmOutStateProtoError": + x.XfrmOutStateProtoError = value + case "XfrmOutStateModeError": + x.XfrmOutStateModeError = value + case "XfrmOutStateSeqError": + x.XfrmOutStateSeqError = value + case "XfrmOutStateExpired": + x.XfrmOutStateExpired = value + case "XfrmOutPolBlock": + x.XfrmOutPolBlock = value + case "XfrmOutPolDead": + x.XfrmOutPolDead = value + case "XfrmOutPolError": + x.XfrmOutPolError = value + case "XfrmFwdHdrError": + x.XfrmFwdHdrError = value + case "XfrmOutStateInvalid": + x.XfrmOutStateInvalid = value + case "XfrmAcquireError": + x.XfrmAcquireError = value + } + + } + + return x, s.Err() +} diff --git a/vendor/github.com/prometheus/procfs/xfs/parse.go b/vendor/github.com/prometheus/procfs/xfs/parse.go new file mode 100644 index 000000000..2bc0ef342 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/xfs/parse.go @@ -0,0 +1,330 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package xfs + +import ( + "bufio" + "fmt" + "io" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// ParseStats parses a Stats from an input io.Reader, using the format +// found in /proc/fs/xfs/stat. +func ParseStats(r io.Reader) (*Stats, error) { + const ( + // Fields parsed into stats structures. + fieldExtentAlloc = "extent_alloc" + fieldAbt = "abt" + fieldBlkMap = "blk_map" + fieldBmbt = "bmbt" + fieldDir = "dir" + fieldTrans = "trans" + fieldIg = "ig" + fieldLog = "log" + fieldRw = "rw" + fieldAttr = "attr" + fieldIcluster = "icluster" + fieldVnodes = "vnodes" + fieldBuf = "buf" + fieldXpc = "xpc" + + // Unimplemented at this time due to lack of documentation. + fieldPushAil = "push_ail" + fieldXstrat = "xstrat" + fieldAbtb2 = "abtb2" + fieldAbtc2 = "abtc2" + fieldBmbt2 = "bmbt2" + fieldIbt2 = "ibt2" + fieldFibt2 = "fibt2" + fieldQm = "qm" + fieldDebug = "debug" + ) + + var xfss Stats + + s := bufio.NewScanner(r) + for s.Scan() { + // Expect at least a string label and a single integer value, ex: + // - abt 0 + // - rw 1 2 + ss := strings.Fields(string(s.Bytes())) + if len(ss) < 2 { + continue + } + label := ss[0] + + // Extended precision counters are uint64 values. + if label == fieldXpc { + us, err := util.ParseUint64s(ss[1:]) + if err != nil { + return nil, err + } + + xfss.ExtendedPrecision, err = extendedPrecisionStats(us) + if err != nil { + return nil, err + } + + continue + } + + // All other counters are uint32 values. + us, err := util.ParseUint32s(ss[1:]) + if err != nil { + return nil, err + } + + switch label { + case fieldExtentAlloc: + xfss.ExtentAllocation, err = extentAllocationStats(us) + case fieldAbt: + xfss.AllocationBTree, err = btreeStats(us) + case fieldBlkMap: + xfss.BlockMapping, err = blockMappingStats(us) + case fieldBmbt: + xfss.BlockMapBTree, err = btreeStats(us) + case fieldDir: + xfss.DirectoryOperation, err = directoryOperationStats(us) + case fieldTrans: + xfss.Transaction, err = transactionStats(us) + case fieldIg: + xfss.InodeOperation, err = inodeOperationStats(us) + case fieldLog: + xfss.LogOperation, err = logOperationStats(us) + case fieldRw: + xfss.ReadWrite, err = readWriteStats(us) + case fieldAttr: + xfss.AttributeOperation, err = attributeOperationStats(us) + case fieldIcluster: + xfss.InodeClustering, err = inodeClusteringStats(us) + case fieldVnodes: + xfss.Vnode, err = vnodeStats(us) + case fieldBuf: + xfss.Buffer, err = bufferStats(us) + } + if err != nil { + return nil, err + } + } + + return &xfss, s.Err() +} + +// extentAllocationStats builds an ExtentAllocationStats from a slice of uint32s. +func extentAllocationStats(us []uint32) (ExtentAllocationStats, error) { + if l := len(us); l != 4 { + return ExtentAllocationStats{}, fmt.Errorf("incorrect number of values for XFS extent allocation stats: %d", l) + } + + return ExtentAllocationStats{ + ExtentsAllocated: us[0], + BlocksAllocated: us[1], + ExtentsFreed: us[2], + BlocksFreed: us[3], + }, nil +} + +// btreeStats builds a BTreeStats from a slice of uint32s. +func btreeStats(us []uint32) (BTreeStats, error) { + if l := len(us); l != 4 { + return BTreeStats{}, fmt.Errorf("incorrect number of values for XFS btree stats: %d", l) + } + + return BTreeStats{ + Lookups: us[0], + Compares: us[1], + RecordsInserted: us[2], + RecordsDeleted: us[3], + }, nil +} + +// BlockMappingStat builds a BlockMappingStats from a slice of uint32s. +func blockMappingStats(us []uint32) (BlockMappingStats, error) { + if l := len(us); l != 7 { + return BlockMappingStats{}, fmt.Errorf("incorrect number of values for XFS block mapping stats: %d", l) + } + + return BlockMappingStats{ + Reads: us[0], + Writes: us[1], + Unmaps: us[2], + ExtentListInsertions: us[3], + ExtentListDeletions: us[4], + ExtentListLookups: us[5], + ExtentListCompares: us[6], + }, nil +} + +// DirectoryOperationStats builds a DirectoryOperationStats from a slice of uint32s. +func directoryOperationStats(us []uint32) (DirectoryOperationStats, error) { + if l := len(us); l != 4 { + return DirectoryOperationStats{}, fmt.Errorf("incorrect number of values for XFS directory operation stats: %d", l) + } + + return DirectoryOperationStats{ + Lookups: us[0], + Creates: us[1], + Removes: us[2], + Getdents: us[3], + }, nil +} + +// TransactionStats builds a TransactionStats from a slice of uint32s. +func transactionStats(us []uint32) (TransactionStats, error) { + if l := len(us); l != 3 { + return TransactionStats{}, fmt.Errorf("incorrect number of values for XFS transaction stats: %d", l) + } + + return TransactionStats{ + Sync: us[0], + Async: us[1], + Empty: us[2], + }, nil +} + +// InodeOperationStats builds an InodeOperationStats from a slice of uint32s. +func inodeOperationStats(us []uint32) (InodeOperationStats, error) { + if l := len(us); l != 7 { + return InodeOperationStats{}, fmt.Errorf("incorrect number of values for XFS inode operation stats: %d", l) + } + + return InodeOperationStats{ + Attempts: us[0], + Found: us[1], + Recycle: us[2], + Missed: us[3], + Duplicate: us[4], + Reclaims: us[5], + AttributeChange: us[6], + }, nil +} + +// LogOperationStats builds a LogOperationStats from a slice of uint32s. +func logOperationStats(us []uint32) (LogOperationStats, error) { + if l := len(us); l != 5 { + return LogOperationStats{}, fmt.Errorf("incorrect number of values for XFS log operation stats: %d", l) + } + + return LogOperationStats{ + Writes: us[0], + Blocks: us[1], + NoInternalBuffers: us[2], + Force: us[3], + ForceSleep: us[4], + }, nil +} + +// ReadWriteStats builds a ReadWriteStats from a slice of uint32s. +func readWriteStats(us []uint32) (ReadWriteStats, error) { + if l := len(us); l != 2 { + return ReadWriteStats{}, fmt.Errorf("incorrect number of values for XFS read write stats: %d", l) + } + + return ReadWriteStats{ + Read: us[0], + Write: us[1], + }, nil +} + +// AttributeOperationStats builds an AttributeOperationStats from a slice of uint32s. +func attributeOperationStats(us []uint32) (AttributeOperationStats, error) { + if l := len(us); l != 4 { + return AttributeOperationStats{}, fmt.Errorf("incorrect number of values for XFS attribute operation stats: %d", l) + } + + return AttributeOperationStats{ + Get: us[0], + Set: us[1], + Remove: us[2], + List: us[3], + }, nil +} + +// InodeClusteringStats builds an InodeClusteringStats from a slice of uint32s. +func inodeClusteringStats(us []uint32) (InodeClusteringStats, error) { + if l := len(us); l != 3 { + return InodeClusteringStats{}, fmt.Errorf("incorrect number of values for XFS inode clustering stats: %d", l) + } + + return InodeClusteringStats{ + Iflush: us[0], + Flush: us[1], + FlushInode: us[2], + }, nil +} + +// VnodeStats builds a VnodeStats from a slice of uint32s. +func vnodeStats(us []uint32) (VnodeStats, error) { + // The attribute "Free" appears to not be available on older XFS + // stats versions. Therefore, 7 or 8 elements may appear in + // this slice. + l := len(us) + if l != 7 && l != 8 { + return VnodeStats{}, fmt.Errorf("incorrect number of values for XFS vnode stats: %d", l) + } + + s := VnodeStats{ + Active: us[0], + Allocate: us[1], + Get: us[2], + Hold: us[3], + Release: us[4], + Reclaim: us[5], + Remove: us[6], + } + + // Skip adding free, unless it is present. The zero value will + // be used in place of an actual count. + if l == 7 { + return s, nil + } + + s.Free = us[7] + return s, nil +} + +// BufferStats builds a BufferStats from a slice of uint32s. +func bufferStats(us []uint32) (BufferStats, error) { + if l := len(us); l != 9 { + return BufferStats{}, fmt.Errorf("incorrect number of values for XFS buffer stats: %d", l) + } + + return BufferStats{ + Get: us[0], + Create: us[1], + GetLocked: us[2], + GetLockedWaited: us[3], + BusyLocked: us[4], + MissLocked: us[5], + PageRetries: us[6], + PageFound: us[7], + GetRead: us[8], + }, nil +} + +// ExtendedPrecisionStats builds an ExtendedPrecisionStats from a slice of uint32s. +func extendedPrecisionStats(us []uint64) (ExtendedPrecisionStats, error) { + if l := len(us); l != 3 { + return ExtendedPrecisionStats{}, fmt.Errorf("incorrect number of values for XFS extended precision stats: %d", l) + } + + return ExtendedPrecisionStats{ + FlushBytes: us[0], + WriteBytes: us[1], + ReadBytes: us[2], + }, nil +} diff --git a/vendor/github.com/prometheus/procfs/xfs/xfs.go b/vendor/github.com/prometheus/procfs/xfs/xfs.go new file mode 100644 index 000000000..d86794b7c --- /dev/null +++ b/vendor/github.com/prometheus/procfs/xfs/xfs.go @@ -0,0 +1,163 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package xfs provides access to statistics exposed by the XFS filesystem. +package xfs + +// Stats contains XFS filesystem runtime statistics, parsed from +// /proc/fs/xfs/stat. +// +// The names and meanings of each statistic were taken from +// http://xfs.org/index.php/Runtime_Stats and xfs_stats.h in the Linux +// kernel source. Most counters are uint32s (same data types used in +// xfs_stats.h), but some of the "extended precision stats" are uint64s. +type Stats struct { + // The name of the filesystem used to source these statistics. + // If empty, this indicates aggregated statistics for all XFS + // filesystems on the host. + Name string + + ExtentAllocation ExtentAllocationStats + AllocationBTree BTreeStats + BlockMapping BlockMappingStats + BlockMapBTree BTreeStats + DirectoryOperation DirectoryOperationStats + Transaction TransactionStats + InodeOperation InodeOperationStats + LogOperation LogOperationStats + ReadWrite ReadWriteStats + AttributeOperation AttributeOperationStats + InodeClustering InodeClusteringStats + Vnode VnodeStats + Buffer BufferStats + ExtendedPrecision ExtendedPrecisionStats +} + +// ExtentAllocationStats contains statistics regarding XFS extent allocations. +type ExtentAllocationStats struct { + ExtentsAllocated uint32 + BlocksAllocated uint32 + ExtentsFreed uint32 + BlocksFreed uint32 +} + +// BTreeStats contains statistics regarding an XFS internal B-tree. +type BTreeStats struct { + Lookups uint32 + Compares uint32 + RecordsInserted uint32 + RecordsDeleted uint32 +} + +// BlockMappingStats contains statistics regarding XFS block maps. +type BlockMappingStats struct { + Reads uint32 + Writes uint32 + Unmaps uint32 + ExtentListInsertions uint32 + ExtentListDeletions uint32 + ExtentListLookups uint32 + ExtentListCompares uint32 +} + +// DirectoryOperationStats contains statistics regarding XFS directory entries. +type DirectoryOperationStats struct { + Lookups uint32 + Creates uint32 + Removes uint32 + Getdents uint32 +} + +// TransactionStats contains statistics regarding XFS metadata transactions. +type TransactionStats struct { + Sync uint32 + Async uint32 + Empty uint32 +} + +// InodeOperationStats contains statistics regarding XFS inode operations. +type InodeOperationStats struct { + Attempts uint32 + Found uint32 + Recycle uint32 + Missed uint32 + Duplicate uint32 + Reclaims uint32 + AttributeChange uint32 +} + +// LogOperationStats contains statistics regarding the XFS log buffer. +type LogOperationStats struct { + Writes uint32 + Blocks uint32 + NoInternalBuffers uint32 + Force uint32 + ForceSleep uint32 +} + +// ReadWriteStats contains statistics regarding the number of read and write +// system calls for XFS filesystems. +type ReadWriteStats struct { + Read uint32 + Write uint32 +} + +// AttributeOperationStats contains statistics regarding manipulation of +// XFS extended file attributes. +type AttributeOperationStats struct { + Get uint32 + Set uint32 + Remove uint32 + List uint32 +} + +// InodeClusteringStats contains statistics regarding XFS inode clustering +// operations. +type InodeClusteringStats struct { + Iflush uint32 + Flush uint32 + FlushInode uint32 +} + +// VnodeStats contains statistics regarding XFS vnode operations. +type VnodeStats struct { + Active uint32 + Allocate uint32 + Get uint32 + Hold uint32 + Release uint32 + Reclaim uint32 + Remove uint32 + Free uint32 +} + +// BufferStats contains statistics regarding XFS read/write I/O buffers. +type BufferStats struct { + Get uint32 + Create uint32 + GetLocked uint32 + GetLockedWaited uint32 + BusyLocked uint32 + MissLocked uint32 + PageRetries uint32 + PageFound uint32 + GetRead uint32 +} + +// ExtendedPrecisionStats contains high precision counters used to track the +// total number of bytes read, written, or flushed, during XFS operations. +type ExtendedPrecisionStats struct { + FlushBytes uint64 + WriteBytes uint64 + ReadBytes uint64 +} diff --git a/vendor/go.opencensus.io/exporter/prometheus/prometheus.go b/vendor/go.opencensus.io/exporter/prometheus/prometheus.go new file mode 100644 index 000000000..50665dcb1 --- /dev/null +++ b/vendor/go.opencensus.io/exporter/prometheus/prometheus.go @@ -0,0 +1,308 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package prometheus contains a Prometheus exporter that supports exporting +// OpenCensus views as Prometheus metrics. +package prometheus // import "go.opencensus.io/exporter/prometheus" + +import ( + "bytes" + "fmt" + "log" + "net/http" + "sort" + "sync" + + "go.opencensus.io/internal" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" +) + +// Exporter exports stats to Prometheus, users need +// to register the exporter as an http.Handler to be +// able to export. +type Exporter struct { + opts Options + g prometheus.Gatherer + c *collector + handler http.Handler +} + +// Options contains options for configuring the exporter. +type Options struct { + Namespace string + Registry *prometheus.Registry + OnError func(err error) +} + +// NewExporter returns an exporter that exports stats to Prometheus. +func NewExporter(o Options) (*Exporter, error) { + if o.Registry == nil { + o.Registry = prometheus.NewRegistry() + } + collector := newCollector(o, o.Registry) + e := &Exporter{ + opts: o, + g: o.Registry, + c: collector, + handler: promhttp.HandlerFor(o.Registry, promhttp.HandlerOpts{}), + } + return e, nil +} + +var _ http.Handler = (*Exporter)(nil) +var _ view.Exporter = (*Exporter)(nil) + +func (c *collector) registerViews(views ...*view.View) { + count := 0 + for _, view := range views { + sig := viewSignature(c.opts.Namespace, view) + c.registeredViewsMu.Lock() + _, ok := c.registeredViews[sig] + c.registeredViewsMu.Unlock() + + if !ok { + desc := prometheus.NewDesc( + viewName(c.opts.Namespace, view), + view.Description, + tagKeysToLabels(view.TagKeys), + nil, + ) + c.registeredViewsMu.Lock() + c.registeredViews[sig] = desc + c.registeredViewsMu.Unlock() + count++ + } + } + if count == 0 { + return + } + + c.ensureRegisteredOnce() +} + +// ensureRegisteredOnce invokes reg.Register on the collector itself +// exactly once to ensure that we don't get errors such as +// cannot register the collector: descriptor Desc{fqName: *} +// already exists with the same fully-qualified name and const label values +// which is documented by Prometheus at +// https://github.com/prometheus/client_golang/blob/fcc130e101e76c5d303513d0e28f4b6d732845c7/prometheus/registry.go#L89-L101 +func (c *collector) ensureRegisteredOnce() { + c.registerOnce.Do(func() { + if err := c.reg.Register(c); err != nil { + c.opts.onError(fmt.Errorf("cannot register the collector: %v", err)) + } + }) + +} + +func (o *Options) onError(err error) { + if o.OnError != nil { + o.OnError(err) + } else { + log.Printf("Failed to export to Prometheus: %v", err) + } +} + +// ExportView exports to the Prometheus if view data has one or more rows. +// Each OpenCensus AggregationData will be converted to +// corresponding Prometheus Metric: SumData will be converted +// to Untyped Metric, CountData will be a Counter Metric, +// DistributionData will be a Histogram Metric. +func (e *Exporter) ExportView(vd *view.Data) { + if len(vd.Rows) == 0 { + return + } + e.c.addViewData(vd) +} + +// ServeHTTP serves the Prometheus endpoint. +func (e *Exporter) ServeHTTP(w http.ResponseWriter, r *http.Request) { + e.handler.ServeHTTP(w, r) +} + +// collector implements prometheus.Collector +type collector struct { + opts Options + mu sync.Mutex // mu guards all the fields. + + registerOnce sync.Once + + // reg helps collector register views dynamically. + reg *prometheus.Registry + + // viewData are accumulated and atomically + // appended to on every Export invocation, from + // stats. These views are cleared out when + // Collect is invoked and the cycle is repeated. + viewData map[string]*view.Data + + registeredViewsMu sync.Mutex + // registeredViews maps a view to a prometheus desc. + registeredViews map[string]*prometheus.Desc +} + +func (c *collector) addViewData(vd *view.Data) { + c.registerViews(vd.View) + sig := viewSignature(c.opts.Namespace, vd.View) + + c.mu.Lock() + c.viewData[sig] = vd + c.mu.Unlock() +} + +func (c *collector) Describe(ch chan<- *prometheus.Desc) { + c.registeredViewsMu.Lock() + registered := make(map[string]*prometheus.Desc) + for k, desc := range c.registeredViews { + registered[k] = desc + } + c.registeredViewsMu.Unlock() + + for _, desc := range registered { + ch <- desc + } +} + +// Collect fetches the statistics from OpenCensus +// and delivers them as Prometheus Metrics. +// Collect is invoked everytime a prometheus.Gatherer is run +// for example when the HTTP endpoint is invoked by Prometheus. +func (c *collector) Collect(ch chan<- prometheus.Metric) { + // We need a copy of all the view data up until this point. + viewData := c.cloneViewData() + + for _, vd := range viewData { + sig := viewSignature(c.opts.Namespace, vd.View) + c.registeredViewsMu.Lock() + desc := c.registeredViews[sig] + c.registeredViewsMu.Unlock() + + for _, row := range vd.Rows { + metric, err := c.toMetric(desc, vd.View, row) + if err != nil { + c.opts.onError(err) + } else { + ch <- metric + } + } + } + +} + +func (c *collector) toMetric(desc *prometheus.Desc, v *view.View, row *view.Row) (prometheus.Metric, error) { + switch data := row.Data.(type) { + case *view.CountData: + return prometheus.NewConstMetric(desc, prometheus.CounterValue, float64(data.Value), tagValues(row.Tags)...) + + case *view.DistributionData: + points := make(map[float64]uint64) + // Histograms are cumulative in Prometheus. + // 1. Sort buckets in ascending order but, retain + // their indices for reverse lookup later on. + // TODO: If there is a guarantee that distribution elements + // are always sorted, then skip the sorting. + indicesMap := make(map[float64]int) + buckets := make([]float64, 0, len(v.Aggregation.Buckets)) + for i, b := range v.Aggregation.Buckets { + if _, ok := indicesMap[b]; !ok { + indicesMap[b] = i + buckets = append(buckets, b) + } + } + sort.Float64s(buckets) + + // 2. Now that the buckets are sorted by magnitude + // we can create cumulative indicesmap them back by reverse index + cumCount := uint64(0) + for _, b := range buckets { + i := indicesMap[b] + cumCount += uint64(data.CountPerBucket[i]) + points[b] = cumCount + } + return prometheus.NewConstHistogram(desc, uint64(data.Count), data.Sum(), points, tagValues(row.Tags)...) + + case *view.SumData: + return prometheus.NewConstMetric(desc, prometheus.UntypedValue, data.Value, tagValues(row.Tags)...) + + case *view.LastValueData: + return prometheus.NewConstMetric(desc, prometheus.GaugeValue, data.Value, tagValues(row.Tags)...) + + default: + return nil, fmt.Errorf("aggregation %T is not yet supported", v.Aggregation) + } +} + +func tagKeysToLabels(keys []tag.Key) (labels []string) { + for _, key := range keys { + labels = append(labels, internal.Sanitize(key.Name())) + } + return labels +} + +func tagsToLabels(tags []tag.Tag) []string { + var names []string + for _, tag := range tags { + names = append(names, internal.Sanitize(tag.Key.Name())) + } + return names +} + +func newCollector(opts Options, registrar *prometheus.Registry) *collector { + return &collector{ + reg: registrar, + opts: opts, + registeredViews: make(map[string]*prometheus.Desc), + viewData: make(map[string]*view.Data), + } +} + +func tagValues(t []tag.Tag) []string { + var values []string + for _, t := range t { + values = append(values, t.Value) + } + return values +} + +func viewName(namespace string, v *view.View) string { + var name string + if namespace != "" { + name = namespace + "_" + } + return name + internal.Sanitize(v.Name) +} + +func viewSignature(namespace string, v *view.View) string { + var buf bytes.Buffer + buf.WriteString(viewName(namespace, v)) + for _, k := range v.TagKeys { + buf.WriteString("-" + k.Name()) + } + return buf.String() +} + +func (c *collector) cloneViewData() map[string]*view.Data { + c.mu.Lock() + defer c.mu.Unlock() + + viewDataCopy := make(map[string]*view.Data) + for sig, viewData := range c.viewData { + viewDataCopy[sig] = viewData + } + return viewDataCopy +} diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/client.go b/vendor/go.opencensus.io/plugin/ocgrpc/client.go new file mode 100644 index 000000000..a6c466ae8 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ocgrpc/client.go @@ -0,0 +1,56 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ocgrpc + +import ( + "go.opencensus.io/trace" + "golang.org/x/net/context" + + "google.golang.org/grpc/stats" +) + +// ClientHandler implements a gRPC stats.Handler for recording OpenCensus stats and +// traces. Use with gRPC clients only. +type ClientHandler struct { + // StartOptions allows configuring the StartOptions used to create new spans. + // + // StartOptions.SpanKind will always be set to trace.SpanKindClient + // for spans started by this handler. + StartOptions trace.StartOptions +} + +// HandleConn exists to satisfy gRPC stats.Handler. +func (c *ClientHandler) HandleConn(ctx context.Context, cs stats.ConnStats) { + // no-op +} + +// TagConn exists to satisfy gRPC stats.Handler. +func (c *ClientHandler) TagConn(ctx context.Context, cti *stats.ConnTagInfo) context.Context { + // no-op + return ctx +} + +// HandleRPC implements per-RPC tracing and stats instrumentation. +func (c *ClientHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) { + traceHandleRPC(ctx, rs) + statsHandleRPC(ctx, rs) +} + +// TagRPC implements per-RPC context management. +func (c *ClientHandler) TagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context { + ctx = c.traceTagRPC(ctx, rti) + ctx = c.statsTagRPC(ctx, rti) + return ctx +} diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go b/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go new file mode 100644 index 000000000..7d0352062 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go @@ -0,0 +1,116 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package ocgrpc + +import ( + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" +) + +// The following variables are measures are recorded by ClientHandler: +var ( + ClientSentMessagesPerRPC = stats.Int64("grpc.io/client/sent_messages_per_rpc", "Number of messages sent in the RPC (always 1 for non-streaming RPCs).", stats.UnitDimensionless) + ClientSentBytesPerRPC = stats.Int64("grpc.io/client/sent_bytes_per_rpc", "Total bytes sent across all request messages per RPC.", stats.UnitBytes) + ClientReceivedMessagesPerRPC = stats.Int64("grpc.io/client/received_messages_per_rpc", "Number of response messages received per RPC (always 1 for non-streaming RPCs).", stats.UnitDimensionless) + ClientReceivedBytesPerRPC = stats.Int64("grpc.io/client/received_bytes_per_rpc", "Total bytes received across all response messages per RPC.", stats.UnitBytes) + ClientRoundtripLatency = stats.Float64("grpc.io/client/roundtrip_latency", "Time between first byte of request sent to last byte of response received, or terminal error.", stats.UnitMilliseconds) + ClientServerLatency = stats.Float64("grpc.io/client/server_latency", `Propagated from the server and should have the same value as "grpc.io/server/latency".`, stats.UnitMilliseconds) +) + +// Predefined views may be registered to collect data for the above measures. +// As always, you may also define your own custom views over measures collected by this +// package. These are declared as a convenience only; none are registered by +// default. +var ( + ClientSentBytesPerRPCView = &view.View{ + Measure: ClientSentBytesPerRPC, + Name: "grpc.io/client/sent_bytes_per_rpc", + Description: "Distribution of bytes sent per RPC, by method.", + TagKeys: []tag.Key{KeyClientMethod}, + Aggregation: DefaultBytesDistribution, + } + + ClientReceivedBytesPerRPCView = &view.View{ + Measure: ClientReceivedBytesPerRPC, + Name: "grpc.io/client/received_bytes_per_rpc", + Description: "Distribution of bytes received per RPC, by method.", + TagKeys: []tag.Key{KeyClientMethod}, + Aggregation: DefaultBytesDistribution, + } + + ClientRoundtripLatencyView = &view.View{ + Measure: ClientRoundtripLatency, + Name: "grpc.io/client/roundtrip_latency", + Description: "Distribution of round-trip latency, by method.", + TagKeys: []tag.Key{KeyClientMethod}, + Aggregation: DefaultMillisecondsDistribution, + } + + ClientCompletedRPCsView = &view.View{ + Measure: ClientRoundtripLatency, + Name: "grpc.io/client/completed_rpcs", + Description: "Count of RPCs by method and status.", + TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus}, + Aggregation: view.Count(), + } + + ClientSentMessagesPerRPCView = &view.View{ + Measure: ClientSentMessagesPerRPC, + Name: "grpc.io/client/sent_messages_per_rpc", + Description: "Distribution of sent messages count per RPC, by method.", + TagKeys: []tag.Key{KeyClientMethod}, + Aggregation: DefaultMessageCountDistribution, + } + + ClientReceivedMessagesPerRPCView = &view.View{ + Measure: ClientReceivedMessagesPerRPC, + Name: "grpc.io/client/received_messages_per_rpc", + Description: "Distribution of received messages count per RPC, by method.", + TagKeys: []tag.Key{KeyClientMethod}, + Aggregation: DefaultMessageCountDistribution, + } + + ClientServerLatencyView = &view.View{ + Measure: ClientServerLatency, + Name: "grpc.io/client/server_latency", + Description: "Distribution of server latency as viewed by client, by method.", + TagKeys: []tag.Key{KeyClientMethod}, + Aggregation: DefaultMillisecondsDistribution, + } + + // Deprecated: This view is going to be removed, if you need it please define it + // yourself. + ClientRequestCountView = &view.View{ + Name: "Count of request messages per client RPC", + TagKeys: []tag.Key{KeyClientMethod}, + Measure: ClientRoundtripLatency, + Aggregation: view.Count(), + } +) + +// DefaultClientViews are the default client views provided by this package. +var DefaultClientViews = []*view.View{ + ClientSentBytesPerRPCView, + ClientReceivedBytesPerRPCView, + ClientRoundtripLatencyView, + ClientCompletedRPCsView, +} + +// TODO(jbd): Add roundtrip_latency, uncompressed_request_bytes, uncompressed_response_bytes, request_count, response_count. +// TODO(acetechnologist): This is temporary and will need to be replaced by a +// mechanism to load these defaults from a common repository/config shared by +// all supported languages. Likely a serialized protobuf of these defaults. diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler.go b/vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler.go new file mode 100644 index 000000000..303c607f6 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler.go @@ -0,0 +1,49 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package ocgrpc + +import ( + "time" + + "go.opencensus.io/tag" + "golang.org/x/net/context" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/stats" +) + +// statsTagRPC gets the tag.Map populated by the application code, serializes +// its tags into the GRPC metadata in order to be sent to the server. +func (h *ClientHandler) statsTagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context { + startTime := time.Now() + if info == nil { + if grpclog.V(2) { + grpclog.Infof("clientHandler.TagRPC called with nil info.", info.FullMethodName) + } + return ctx + } + + d := &rpcData{ + startTime: startTime, + method: info.FullMethodName, + } + ts := tag.FromContext(ctx) + if ts != nil { + encoded := tag.Encode(ts) + ctx = stats.SetTags(ctx, encoded) + } + + return context.WithValue(ctx, rpcDataKey, d) +} diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/doc.go b/vendor/go.opencensus.io/plugin/ocgrpc/doc.go new file mode 100644 index 000000000..1370323fb --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ocgrpc/doc.go @@ -0,0 +1,19 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package ocgrpc contains OpenCensus stats and trace +// integrations for gRPC. +// +// Use ServerHandler for servers and ClientHandler for clients. +package ocgrpc // import "go.opencensus.io/plugin/ocgrpc" diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/server.go b/vendor/go.opencensus.io/plugin/ocgrpc/server.go new file mode 100644 index 000000000..b67b3e2be --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ocgrpc/server.go @@ -0,0 +1,80 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ocgrpc + +import ( + "go.opencensus.io/trace" + "golang.org/x/net/context" + + "google.golang.org/grpc/stats" +) + +// ServerHandler implements gRPC stats.Handler recording OpenCensus stats and +// traces. Use with gRPC servers. +// +// When installed (see Example), tracing metadata is read from inbound RPCs +// by default. If no tracing metadata is present, or if the tracing metadata is +// present but the SpanContext isn't sampled, then a new trace may be started +// (as determined by Sampler). +type ServerHandler struct { + // IsPublicEndpoint may be set to true to always start a new trace around + // each RPC. Any SpanContext in the RPC metadata will be added as a linked + // span instead of making it the parent of the span created around the + // server RPC. + // + // Be aware that if you leave this false (the default) on a public-facing + // server, callers will be able to send tracing metadata in gRPC headers + // and trigger traces in your backend. + IsPublicEndpoint bool + + // StartOptions to use for to spans started around RPCs handled by this server. + // + // These will apply even if there is tracing metadata already + // present on the inbound RPC but the SpanContext is not sampled. This + // ensures that each service has some opportunity to be traced. If you would + // like to not add any additional traces for this gRPC service, set: + // + // StartOptions.Sampler = trace.ProbabilitySampler(0.0) + // + // StartOptions.SpanKind will always be set to trace.SpanKindServer + // for spans started by this handler. + StartOptions trace.StartOptions +} + +var _ stats.Handler = (*ServerHandler)(nil) + +// HandleConn exists to satisfy gRPC stats.Handler. +func (s *ServerHandler) HandleConn(ctx context.Context, cs stats.ConnStats) { + // no-op +} + +// TagConn exists to satisfy gRPC stats.Handler. +func (s *ServerHandler) TagConn(ctx context.Context, cti *stats.ConnTagInfo) context.Context { + // no-op + return ctx +} + +// HandleRPC implements per-RPC tracing and stats instrumentation. +func (s *ServerHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) { + traceHandleRPC(ctx, rs) + statsHandleRPC(ctx, rs) +} + +// TagRPC implements per-RPC context management. +func (s *ServerHandler) TagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context { + ctx = s.traceTagRPC(ctx, rti) + ctx = s.statsTagRPC(ctx, rti) + return ctx +} diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go b/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go new file mode 100644 index 000000000..609d9ed24 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go @@ -0,0 +1,97 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package ocgrpc + +import ( + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" +) + +// The following variables are measures are recorded by ServerHandler: +var ( + ServerReceivedMessagesPerRPC = stats.Int64("grpc.io/server/received_messages_per_rpc", "Number of messages received in each RPC. Has value 1 for non-streaming RPCs.", stats.UnitDimensionless) + ServerReceivedBytesPerRPC = stats.Int64("grpc.io/server/received_bytes_per_rpc", "Total bytes received across all messages per RPC.", stats.UnitBytes) + ServerSentMessagesPerRPC = stats.Int64("grpc.io/server/sent_messages_per_rpc", "Number of messages sent in each RPC. Has value 1 for non-streaming RPCs.", stats.UnitDimensionless) + ServerSentBytesPerRPC = stats.Int64("grpc.io/server/sent_bytes_per_rpc", "Total bytes sent in across all response messages per RPC.", stats.UnitBytes) + ServerLatency = stats.Float64("grpc.io/server/server_latency", "Time between first byte of request received to last byte of response sent, or terminal error.", stats.UnitMilliseconds) +) + +// TODO(acetechnologist): This is temporary and will need to be replaced by a +// mechanism to load these defaults from a common repository/config shared by +// all supported languages. Likely a serialized protobuf of these defaults. + +// Predefined views may be registered to collect data for the above measures. +// As always, you may also define your own custom views over measures collected by this +// package. These are declared as a convenience only; none are registered by +// default. +var ( + ServerReceivedBytesPerRPCView = &view.View{ + Name: "grpc.io/server/received_bytes_per_rpc", + Description: "Distribution of received bytes per RPC, by method.", + Measure: ServerReceivedBytesPerRPC, + TagKeys: []tag.Key{KeyServerMethod}, + Aggregation: DefaultBytesDistribution, + } + + ServerSentBytesPerRPCView = &view.View{ + Name: "grpc.io/server/sent_bytes_per_rpc", + Description: "Distribution of total sent bytes per RPC, by method.", + Measure: ServerSentBytesPerRPC, + TagKeys: []tag.Key{KeyServerMethod}, + Aggregation: DefaultBytesDistribution, + } + + ServerLatencyView = &view.View{ + Name: "grpc.io/server/server_latency", + Description: "Distribution of server latency in milliseconds, by method.", + TagKeys: []tag.Key{KeyServerMethod}, + Measure: ServerLatency, + Aggregation: DefaultMillisecondsDistribution, + } + + ServerCompletedRPCsView = &view.View{ + Name: "grpc.io/server/completed_rpcs", + Description: "Count of RPCs by method and status.", + TagKeys: []tag.Key{KeyServerMethod, KeyServerStatus}, + Measure: ServerLatency, + Aggregation: view.Count(), + } + + ServerReceivedMessagesPerRPCView = &view.View{ + Name: "grpc.io/server/received_messages_per_rpc", + Description: "Distribution of messages received count per RPC, by method.", + TagKeys: []tag.Key{KeyServerMethod}, + Measure: ServerReceivedMessagesPerRPC, + Aggregation: DefaultMessageCountDistribution, + } + + ServerSentMessagesPerRPCView = &view.View{ + Name: "grpc.io/server/sent_messages_per_rpc", + Description: "Distribution of messages sent count per RPC, by method.", + TagKeys: []tag.Key{KeyServerMethod}, + Measure: ServerSentMessagesPerRPC, + Aggregation: DefaultMessageCountDistribution, + } +) + +// DefaultServerViews are the default server views provided by this package. +var DefaultServerViews = []*view.View{ + ServerReceivedBytesPerRPCView, + ServerSentBytesPerRPCView, + ServerLatencyView, + ServerCompletedRPCsView, +} diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler.go b/vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler.go new file mode 100644 index 000000000..7847c1a91 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler.go @@ -0,0 +1,63 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package ocgrpc + +import ( + "time" + + "golang.org/x/net/context" + + "go.opencensus.io/tag" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/stats" +) + +// statsTagRPC gets the metadata from gRPC context, extracts the encoded tags from +// it and creates a new tag.Map and puts them into the returned context. +func (h *ServerHandler) statsTagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context { + startTime := time.Now() + if info == nil { + if grpclog.V(2) { + grpclog.Infof("opencensus: TagRPC called with nil info.") + } + return ctx + } + d := &rpcData{ + startTime: startTime, + method: info.FullMethodName, + } + propagated := h.extractPropagatedTags(ctx) + ctx = tag.NewContext(ctx, propagated) + ctx, _ = tag.New(ctx, tag.Upsert(KeyServerMethod, methodName(info.FullMethodName))) + return context.WithValue(ctx, rpcDataKey, d) +} + +// extractPropagatedTags creates a new tag map containing the tags extracted from the +// gRPC metadata. +func (h *ServerHandler) extractPropagatedTags(ctx context.Context) *tag.Map { + buf := stats.Tags(ctx) + if buf == nil { + return nil + } + propagated, err := tag.Decode(buf) + if err != nil { + if grpclog.V(2) { + grpclog.Warningf("opencensus: Failed to decode tags from gRPC metadata failed to decode: %v", err) + } + return nil + } + return propagated +} diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go b/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go new file mode 100644 index 000000000..119bbda9b --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go @@ -0,0 +1,205 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package ocgrpc + +import ( + "context" + "strconv" + "strings" + "sync/atomic" + "time" + + ocstats "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +type grpcInstrumentationKey string + +// rpcData holds the instrumentation RPC data that is needed between the start +// and end of an call. It holds the info that this package needs to keep track +// of between the various GRPC events. +type rpcData struct { + // reqCount and respCount has to be the first words + // in order to be 64-aligned on 32-bit architectures. + sentCount, sentBytes, recvCount, recvBytes int64 // access atomically + + // startTime represents the time at which TagRPC was invoked at the + // beginning of an RPC. It is an appoximation of the time when the + // application code invoked GRPC code. + startTime time.Time + method string +} + +// The following variables define the default hard-coded auxiliary data used by +// both the default GRPC client and GRPC server metrics. +var ( + DefaultBytesDistribution = view.Distribution(0, 1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296) + DefaultMillisecondsDistribution = view.Distribution(0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000) + DefaultMessageCountDistribution = view.Distribution(0, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536) +) + +// Server tags are applied to the context used to process each RPC, as well as +// the measures at the end of each RPC. +var ( + KeyServerMethod, _ = tag.NewKey("grpc_server_method") + KeyServerStatus, _ = tag.NewKey("grpc_server_status") +) + +// Client tags are applied to measures at the end of each RPC. +var ( + KeyClientMethod, _ = tag.NewKey("grpc_client_method") + KeyClientStatus, _ = tag.NewKey("grpc_client_status") +) + +var ( + rpcDataKey = grpcInstrumentationKey("opencensus-rpcData") +) + +func methodName(fullname string) string { + return strings.TrimLeft(fullname, "/") +} + +// statsHandleRPC processes the RPC events. +func statsHandleRPC(ctx context.Context, s stats.RPCStats) { + switch st := s.(type) { + case *stats.Begin, *stats.OutHeader, *stats.InHeader, *stats.InTrailer, *stats.OutTrailer: + // do nothing for client + case *stats.OutPayload: + handleRPCOutPayload(ctx, st) + case *stats.InPayload: + handleRPCInPayload(ctx, st) + case *stats.End: + handleRPCEnd(ctx, st) + default: + grpclog.Infof("unexpected stats: %T", st) + } +} + +func handleRPCOutPayload(ctx context.Context, s *stats.OutPayload) { + d, ok := ctx.Value(rpcDataKey).(*rpcData) + if !ok { + if grpclog.V(2) { + grpclog.Infoln("Failed to retrieve *rpcData from context.") + } + return + } + + atomic.AddInt64(&d.sentBytes, int64(s.Length)) + atomic.AddInt64(&d.sentCount, 1) +} + +func handleRPCInPayload(ctx context.Context, s *stats.InPayload) { + d, ok := ctx.Value(rpcDataKey).(*rpcData) + if !ok { + if grpclog.V(2) { + grpclog.Infoln("Failed to retrieve *rpcData from context.") + } + return + } + + atomic.AddInt64(&d.recvBytes, int64(s.Length)) + atomic.AddInt64(&d.recvCount, 1) +} + +func handleRPCEnd(ctx context.Context, s *stats.End) { + d, ok := ctx.Value(rpcDataKey).(*rpcData) + if !ok { + if grpclog.V(2) { + grpclog.Infoln("Failed to retrieve *rpcData from context.") + } + return + } + + elapsedTime := time.Since(d.startTime) + + var st string + if s.Error != nil { + s, ok := status.FromError(s.Error) + if ok { + st = statusCodeToString(s) + } + } else { + st = "OK" + } + + latencyMillis := float64(elapsedTime) / float64(time.Millisecond) + if s.Client { + ctx, _ = tag.New(ctx, + tag.Upsert(KeyClientMethod, methodName(d.method)), + tag.Upsert(KeyClientStatus, st)) + ocstats.Record(ctx, + ClientSentBytesPerRPC.M(atomic.LoadInt64(&d.sentBytes)), + ClientSentMessagesPerRPC.M(atomic.LoadInt64(&d.sentCount)), + ClientReceivedMessagesPerRPC.M(atomic.LoadInt64(&d.recvCount)), + ClientReceivedBytesPerRPC.M(atomic.LoadInt64(&d.recvBytes)), + ClientRoundtripLatency.M(latencyMillis)) + } else { + ctx, _ = tag.New(ctx, tag.Upsert(KeyServerStatus, st)) + ocstats.Record(ctx, + ServerSentBytesPerRPC.M(atomic.LoadInt64(&d.sentBytes)), + ServerSentMessagesPerRPC.M(atomic.LoadInt64(&d.sentCount)), + ServerReceivedMessagesPerRPC.M(atomic.LoadInt64(&d.recvCount)), + ServerReceivedBytesPerRPC.M(atomic.LoadInt64(&d.recvBytes)), + ServerLatency.M(latencyMillis)) + } +} + +func statusCodeToString(s *status.Status) string { + // see https://github.com/grpc/grpc/blob/master/doc/statuscodes.md + switch c := s.Code(); c { + case codes.OK: + return "OK" + case codes.Canceled: + return "CANCELLED" + case codes.Unknown: + return "UNKNOWN" + case codes.InvalidArgument: + return "INVALID_ARGUMENT" + case codes.DeadlineExceeded: + return "DEADLINE_EXCEEDED" + case codes.NotFound: + return "NOT_FOUND" + case codes.AlreadyExists: + return "ALREADY_EXISTS" + case codes.PermissionDenied: + return "PERMISSION_DENIED" + case codes.ResourceExhausted: + return "RESOURCE_EXHAUSTED" + case codes.FailedPrecondition: + return "FAILED_PRECONDITION" + case codes.Aborted: + return "ABORTED" + case codes.OutOfRange: + return "OUT_OF_RANGE" + case codes.Unimplemented: + return "UNIMPLEMENTED" + case codes.Internal: + return "INTERNAL" + case codes.Unavailable: + return "UNAVAILABLE" + case codes.DataLoss: + return "DATA_LOSS" + case codes.Unauthenticated: + return "UNAUTHENTICATED" + default: + return "CODE_" + strconv.FormatInt(int64(c), 10) + } +} diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/trace_common.go b/vendor/go.opencensus.io/plugin/ocgrpc/trace_common.go new file mode 100644 index 000000000..720f381c2 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ocgrpc/trace_common.go @@ -0,0 +1,107 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ocgrpc + +import ( + "strings" + + "google.golang.org/grpc/codes" + + "go.opencensus.io/trace" + "go.opencensus.io/trace/propagation" + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +const traceContextKey = "grpc-trace-bin" + +// TagRPC creates a new trace span for the client side of the RPC. +// +// It returns ctx with the new trace span added and a serialization of the +// SpanContext added to the outgoing gRPC metadata. +func (c *ClientHandler) traceTagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context { + name := strings.TrimPrefix(rti.FullMethodName, "/") + name = strings.Replace(name, "/", ".", -1) + ctx, span := trace.StartSpan(ctx, name, + trace.WithSampler(c.StartOptions.Sampler), + trace.WithSpanKind(trace.SpanKindClient)) // span is ended by traceHandleRPC + traceContextBinary := propagation.Binary(span.SpanContext()) + return metadata.AppendToOutgoingContext(ctx, traceContextKey, string(traceContextBinary)) +} + +// TagRPC creates a new trace span for the server side of the RPC. +// +// It checks the incoming gRPC metadata in ctx for a SpanContext, and if +// it finds one, uses that SpanContext as the parent context of the new span. +// +// It returns ctx, with the new trace span added. +func (s *ServerHandler) traceTagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context { + md, _ := metadata.FromIncomingContext(ctx) + name := strings.TrimPrefix(rti.FullMethodName, "/") + name = strings.Replace(name, "/", ".", -1) + traceContext := md[traceContextKey] + var ( + parent trace.SpanContext + haveParent bool + ) + if len(traceContext) > 0 { + // Metadata with keys ending in -bin are actually binary. They are base64 + // encoded before being put on the wire, see: + // https://github.com/grpc/grpc-go/blob/08d6261/Documentation/grpc-metadata.md#storing-binary-data-in-metadata + traceContextBinary := []byte(traceContext[0]) + parent, haveParent = propagation.FromBinary(traceContextBinary) + if haveParent && !s.IsPublicEndpoint { + ctx, _ := trace.StartSpanWithRemoteParent(ctx, name, parent, + trace.WithSpanKind(trace.SpanKindServer), + trace.WithSampler(s.StartOptions.Sampler), + ) + return ctx + } + } + ctx, span := trace.StartSpan(ctx, name, + trace.WithSpanKind(trace.SpanKindServer), + trace.WithSampler(s.StartOptions.Sampler)) + if haveParent { + span.AddLink(trace.Link{TraceID: parent.TraceID, SpanID: parent.SpanID, Type: trace.LinkTypeChild}) + } + return ctx +} + +func traceHandleRPC(ctx context.Context, rs stats.RPCStats) { + span := trace.FromContext(ctx) + // TODO: compressed and uncompressed sizes are not populated in every message. + switch rs := rs.(type) { + case *stats.Begin: + span.AddAttributes( + trace.BoolAttribute("Client", rs.Client), + trace.BoolAttribute("FailFast", rs.FailFast)) + case *stats.InPayload: + span.AddMessageReceiveEvent(0 /* TODO: messageID */, int64(rs.Length), int64(rs.WireLength)) + case *stats.OutPayload: + span.AddMessageSendEvent(0, int64(rs.Length), int64(rs.WireLength)) + case *stats.End: + if rs.Error != nil { + s, ok := status.FromError(rs.Error) + if ok { + span.SetStatus(trace.Status{Code: int32(s.Code()), Message: s.Message()}) + } else { + span.SetStatus(trace.Status{Code: int32(codes.Internal), Message: rs.Error.Error()}) + } + } + span.End() + } +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/client.go b/vendor/go.opencensus.io/plugin/ochttp/client.go new file mode 100644 index 000000000..180792106 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/client.go @@ -0,0 +1,97 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "net/http" + + "go.opencensus.io/trace" + "go.opencensus.io/trace/propagation" +) + +// Transport is an http.RoundTripper that instruments all outgoing requests with +// OpenCensus stats and tracing. +// +// The zero value is intended to be a useful default, but for +// now it's recommended that you explicitly set Propagation, since the default +// for this may change. +type Transport struct { + // Base may be set to wrap another http.RoundTripper that does the actual + // requests. By default http.DefaultTransport is used. + // + // If base HTTP roundtripper implements CancelRequest, + // the returned round tripper will be cancelable. + Base http.RoundTripper + + // Propagation defines how traces are propagated. If unspecified, a default + // (currently B3 format) will be used. + Propagation propagation.HTTPFormat + + // StartOptions are applied to the span started by this Transport around each + // request. + // + // StartOptions.SpanKind will always be set to trace.SpanKindClient + // for spans started by this transport. + StartOptions trace.StartOptions + + // NameFromRequest holds the function to use for generating the span name + // from the information found in the outgoing HTTP Request. By default the + // name equals the URL Path. + FormatSpanName func(*http.Request) string + + // TODO: Implement tag propagation for HTTP. +} + +// RoundTrip implements http.RoundTripper, delegating to Base and recording stats and traces for the request. +func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + rt := t.base() + // TODO: remove excessive nesting of http.RoundTrippers here. + format := t.Propagation + if format == nil { + format = defaultFormat + } + spanNameFormatter := t.FormatSpanName + if spanNameFormatter == nil { + spanNameFormatter = spanNameFromURL + } + rt = &traceTransport{ + base: rt, + format: format, + startOptions: trace.StartOptions{ + Sampler: t.StartOptions.Sampler, + SpanKind: trace.SpanKindClient, + }, + formatSpanName: spanNameFormatter, + } + rt = statsTransport{base: rt} + return rt.RoundTrip(req) +} + +func (t *Transport) base() http.RoundTripper { + if t.Base != nil { + return t.Base + } + return http.DefaultTransport +} + +// CancelRequest cancels an in-flight request by closing its connection. +func (t *Transport) CancelRequest(req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + if cr, ok := t.base().(canceler); ok { + cr.CancelRequest(req) + } +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/client_stats.go b/vendor/go.opencensus.io/plugin/ochttp/client_stats.go new file mode 100644 index 000000000..9b286b929 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/client_stats.go @@ -0,0 +1,125 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "context" + "io" + "net/http" + "strconv" + "sync" + "time" + + "go.opencensus.io/stats" + "go.opencensus.io/tag" +) + +// statsTransport is an http.RoundTripper that collects stats for the outgoing requests. +type statsTransport struct { + base http.RoundTripper +} + +// RoundTrip implements http.RoundTripper, delegating to Base and recording stats for the request. +func (t statsTransport) RoundTrip(req *http.Request) (*http.Response, error) { + ctx, _ := tag.New(req.Context(), + tag.Upsert(Host, req.URL.Host), + tag.Upsert(Path, req.URL.Path), + tag.Upsert(Method, req.Method)) + req = req.WithContext(ctx) + track := &tracker{ + start: time.Now(), + ctx: ctx, + } + if req.Body == nil { + // TODO: Handle cases where ContentLength is not set. + track.reqSize = -1 + } else if req.ContentLength > 0 { + track.reqSize = req.ContentLength + } + stats.Record(ctx, ClientRequestCount.M(1)) + + // Perform request. + resp, err := t.base.RoundTrip(req) + + if err != nil { + track.statusCode = http.StatusInternalServerError + track.end() + } else { + track.statusCode = resp.StatusCode + if resp.Body == nil { + track.end() + } else { + track.body = resp.Body + resp.Body = track + } + } + return resp, err +} + +// CancelRequest cancels an in-flight request by closing its connection. +func (t statsTransport) CancelRequest(req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + if cr, ok := t.base.(canceler); ok { + cr.CancelRequest(req) + } +} + +type tracker struct { + ctx context.Context + respSize int64 + reqSize int64 + start time.Time + body io.ReadCloser + statusCode int + endOnce sync.Once +} + +var _ io.ReadCloser = (*tracker)(nil) + +func (t *tracker) end() { + t.endOnce.Do(func() { + m := []stats.Measurement{ + ClientLatency.M(float64(time.Since(t.start)) / float64(time.Millisecond)), + ClientResponseBytes.M(t.respSize), + } + if t.reqSize >= 0 { + m = append(m, ClientRequestBytes.M(t.reqSize)) + } + ctx, _ := tag.New(t.ctx, tag.Upsert(StatusCode, strconv.Itoa(t.statusCode))) + stats.Record(ctx, m...) + }) +} + +func (t *tracker) Read(b []byte) (int, error) { + n, err := t.body.Read(b) + switch err { + case nil: + t.respSize += int64(n) + return n, nil + case io.EOF: + t.end() + } + return n, err +} + +func (t *tracker) Close() error { + // Invoking endSpan on Close will help catch the cases + // in which a read returned a non-nil error, we set the + // span status but didn't end the span. + t.end() + return t.body.Close() +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/doc.go b/vendor/go.opencensus.io/plugin/ochttp/doc.go new file mode 100644 index 000000000..10e626b16 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/doc.go @@ -0,0 +1,19 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package ochttp provides OpenCensus instrumentation for net/http package. +// +// For server instrumentation, see Handler. For client-side instrumentation, +// see Transport. +package ochttp // import "go.opencensus.io/plugin/ochttp" diff --git a/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go b/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go new file mode 100644 index 000000000..f777772ec --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go @@ -0,0 +1,123 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package b3 contains a propagation.HTTPFormat implementation +// for B3 propagation. See https://github.com/openzipkin/b3-propagation +// for more details. +package b3 // import "go.opencensus.io/plugin/ochttp/propagation/b3" + +import ( + "encoding/hex" + "net/http" + + "go.opencensus.io/trace" + "go.opencensus.io/trace/propagation" +) + +// B3 headers that OpenCensus understands. +const ( + TraceIDHeader = "X-B3-TraceId" + SpanIDHeader = "X-B3-SpanId" + SampledHeader = "X-B3-Sampled" +) + +// HTTPFormat implements propagation.HTTPFormat to propagate +// traces in HTTP headers in B3 propagation format. +// HTTPFormat skips the X-B3-ParentId and X-B3-Flags headers +// because there are additional fields not represented in the +// OpenCensus span context. Spans created from the incoming +// header will be the direct children of the client-side span. +// Similarly, reciever of the outgoing spans should use client-side +// span created by OpenCensus as the parent. +type HTTPFormat struct{} + +var _ propagation.HTTPFormat = (*HTTPFormat)(nil) + +// SpanContextFromRequest extracts a B3 span context from incoming requests. +func (f *HTTPFormat) SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) { + tid, ok := ParseTraceID(req.Header.Get(TraceIDHeader)) + if !ok { + return trace.SpanContext{}, false + } + sid, ok := ParseSpanID(req.Header.Get(SpanIDHeader)) + if !ok { + return trace.SpanContext{}, false + } + sampled, _ := ParseSampled(req.Header.Get(SampledHeader)) + return trace.SpanContext{ + TraceID: tid, + SpanID: sid, + TraceOptions: sampled, + }, true +} + +// ParseTraceID parses the value of the X-B3-TraceId header. +func ParseTraceID(tid string) (trace.TraceID, bool) { + if tid == "" { + return trace.TraceID{}, false + } + b, err := hex.DecodeString(tid) + if err != nil { + return trace.TraceID{}, false + } + var traceID trace.TraceID + if len(b) <= 8 { + // The lower 64-bits. + start := 8 + (8 - len(b)) + copy(traceID[start:], b) + } else { + start := 16 - len(b) + copy(traceID[start:], b) + } + + return traceID, true +} + +// ParseSpanID parses the value of the X-B3-SpanId or X-B3-ParentSpanId headers. +func ParseSpanID(sid string) (spanID trace.SpanID, ok bool) { + if sid == "" { + return trace.SpanID{}, false + } + b, err := hex.DecodeString(sid) + if err != nil { + return trace.SpanID{}, false + } + start := 8 - len(b) + copy(spanID[start:], b) + return spanID, true +} + +// ParseSampled parses the value of the X-B3-Sampled header. +func ParseSampled(sampled string) (trace.TraceOptions, bool) { + switch sampled { + case "true", "1": + return trace.TraceOptions(1), true + default: + return trace.TraceOptions(0), false + } +} + +// SpanContextToRequest modifies the given request to include B3 headers. +func (f *HTTPFormat) SpanContextToRequest(sc trace.SpanContext, req *http.Request) { + req.Header.Set(TraceIDHeader, hex.EncodeToString(sc.TraceID[:])) + req.Header.Set(SpanIDHeader, hex.EncodeToString(sc.SpanID[:])) + + var sampled string + if sc.IsSampled() { + sampled = "1" + } else { + sampled = "0" + } + req.Header.Set(SampledHeader, sampled) +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/server.go b/vendor/go.opencensus.io/plugin/ochttp/server.go new file mode 100644 index 000000000..fe2a6eb58 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/server.go @@ -0,0 +1,230 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "bufio" + "context" + "errors" + "net" + "net/http" + "strconv" + "sync" + "time" + + "go.opencensus.io/stats" + "go.opencensus.io/tag" + "go.opencensus.io/trace" + "go.opencensus.io/trace/propagation" +) + +// Handler is an http.Handler wrapper to instrument your HTTP server with +// OpenCensus. It supports both stats and tracing. +// +// Tracing +// +// This handler is aware of the incoming request's span, reading it from request +// headers as configured using the Propagation field. +// The extracted span can be accessed from the incoming request's +// context. +// +// span := trace.FromContext(r.Context()) +// +// The server span will be automatically ended at the end of ServeHTTP. +type Handler struct { + // Propagation defines how traces are propagated. If unspecified, + // B3 propagation will be used. + Propagation propagation.HTTPFormat + + // Handler is the handler used to handle the incoming request. + Handler http.Handler + + // StartOptions are applied to the span started by this Handler around each + // request. + // + // StartOptions.SpanKind will always be set to trace.SpanKindServer + // for spans started by this transport. + StartOptions trace.StartOptions + + // IsPublicEndpoint should be set to true for publicly accessible HTTP(S) + // servers. If true, any trace metadata set on the incoming request will + // be added as a linked trace instead of being added as a parent of the + // current trace. + IsPublicEndpoint bool + + // FormatSpanName holds the function to use for generating the span name + // from the information found in the incoming HTTP Request. By default the + // name equals the URL Path. + FormatSpanName func(*http.Request) string +} + +func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + var traceEnd, statsEnd func() + r, traceEnd = h.startTrace(w, r) + defer traceEnd() + w, statsEnd = h.startStats(w, r) + defer statsEnd() + handler := h.Handler + if handler == nil { + handler = http.DefaultServeMux + } + handler.ServeHTTP(w, r) +} + +func (h *Handler) startTrace(w http.ResponseWriter, r *http.Request) (*http.Request, func()) { + var name string + if h.FormatSpanName == nil { + name = spanNameFromURL(r) + } else { + name = h.FormatSpanName(r) + } + ctx := r.Context() + var span *trace.Span + sc, ok := h.extractSpanContext(r) + if ok && !h.IsPublicEndpoint { + ctx, span = trace.StartSpanWithRemoteParent(ctx, name, sc, + trace.WithSampler(h.StartOptions.Sampler), + trace.WithSpanKind(trace.SpanKindServer)) + } else { + ctx, span = trace.StartSpan(ctx, name, + trace.WithSampler(h.StartOptions.Sampler), + trace.WithSpanKind(trace.SpanKindServer), + ) + if ok { + span.AddLink(trace.Link{ + TraceID: sc.TraceID, + SpanID: sc.SpanID, + Type: trace.LinkTypeChild, + Attributes: nil, + }) + } + } + span.AddAttributes(requestAttrs(r)...) + return r.WithContext(ctx), span.End +} + +func (h *Handler) extractSpanContext(r *http.Request) (trace.SpanContext, bool) { + if h.Propagation == nil { + return defaultFormat.SpanContextFromRequest(r) + } + return h.Propagation.SpanContextFromRequest(r) +} + +func (h *Handler) startStats(w http.ResponseWriter, r *http.Request) (http.ResponseWriter, func()) { + ctx, _ := tag.New(r.Context(), + tag.Upsert(Host, r.URL.Host), + tag.Upsert(Path, r.URL.Path), + tag.Upsert(Method, r.Method)) + track := &trackingResponseWriter{ + start: time.Now(), + ctx: ctx, + writer: w, + } + if r.Body == nil { + // TODO: Handle cases where ContentLength is not set. + track.reqSize = -1 + } else if r.ContentLength > 0 { + track.reqSize = r.ContentLength + } + stats.Record(ctx, ServerRequestCount.M(1)) + return track, track.end +} + +type trackingResponseWriter struct { + ctx context.Context + reqSize int64 + respSize int64 + start time.Time + statusCode int + statusLine string + endOnce sync.Once + writer http.ResponseWriter +} + +// Compile time assertions for widely used net/http interfaces +var _ http.CloseNotifier = (*trackingResponseWriter)(nil) +var _ http.Flusher = (*trackingResponseWriter)(nil) +var _ http.Hijacker = (*trackingResponseWriter)(nil) +var _ http.Pusher = (*trackingResponseWriter)(nil) +var _ http.ResponseWriter = (*trackingResponseWriter)(nil) + +var errHijackerUnimplemented = errors.New("ResponseWriter does not implement http.Hijacker") + +func (t *trackingResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { + hj, ok := t.writer.(http.Hijacker) + if !ok { + return nil, nil, errHijackerUnimplemented + } + return hj.Hijack() +} + +func (t *trackingResponseWriter) CloseNotify() <-chan bool { + cn, ok := t.writer.(http.CloseNotifier) + if !ok { + return nil + } + return cn.CloseNotify() +} + +func (t *trackingResponseWriter) Push(target string, opts *http.PushOptions) error { + pusher, ok := t.writer.(http.Pusher) + if !ok { + return http.ErrNotSupported + } + return pusher.Push(target, opts) +} + +func (t *trackingResponseWriter) end() { + t.endOnce.Do(func() { + if t.statusCode == 0 { + t.statusCode = 200 + } + + span := trace.FromContext(t.ctx) + span.SetStatus(TraceStatus(t.statusCode, t.statusLine)) + + m := []stats.Measurement{ + ServerLatency.M(float64(time.Since(t.start)) / float64(time.Millisecond)), + ServerResponseBytes.M(t.respSize), + } + if t.reqSize >= 0 { + m = append(m, ServerRequestBytes.M(t.reqSize)) + } + ctx, _ := tag.New(t.ctx, tag.Upsert(StatusCode, strconv.Itoa(t.statusCode))) + stats.Record(ctx, m...) + }) +} + +func (t *trackingResponseWriter) Header() http.Header { + return t.writer.Header() +} + +func (t *trackingResponseWriter) Write(data []byte) (int, error) { + n, err := t.writer.Write(data) + t.respSize += int64(n) + return n, err +} + +func (t *trackingResponseWriter) WriteHeader(statusCode int) { + t.writer.WriteHeader(statusCode) + t.statusCode = statusCode + t.statusLine = http.StatusText(t.statusCode) +} + +func (t *trackingResponseWriter) Flush() { + if flusher, ok := t.writer.(http.Flusher); ok { + flusher.Flush() + } +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/stats.go b/vendor/go.opencensus.io/plugin/ochttp/stats.go new file mode 100644 index 000000000..19a882500 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/stats.go @@ -0,0 +1,181 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" +) + +// The following client HTTP measures are supported for use in custom views. +var ( + ClientRequestCount = stats.Int64("opencensus.io/http/client/request_count", "Number of HTTP requests started", stats.UnitDimensionless) + ClientRequestBytes = stats.Int64("opencensus.io/http/client/request_bytes", "HTTP request body size if set as ContentLength (uncompressed)", stats.UnitBytes) + ClientResponseBytes = stats.Int64("opencensus.io/http/client/response_bytes", "HTTP response body size (uncompressed)", stats.UnitBytes) + ClientLatency = stats.Float64("opencensus.io/http/client/latency", "End-to-end latency", stats.UnitMilliseconds) +) + +// The following server HTTP measures are supported for use in custom views: +var ( + ServerRequestCount = stats.Int64("opencensus.io/http/server/request_count", "Number of HTTP requests started", stats.UnitDimensionless) + ServerRequestBytes = stats.Int64("opencensus.io/http/server/request_bytes", "HTTP request body size if set as ContentLength (uncompressed)", stats.UnitBytes) + ServerResponseBytes = stats.Int64("opencensus.io/http/server/response_bytes", "HTTP response body size (uncompressed)", stats.UnitBytes) + ServerLatency = stats.Float64("opencensus.io/http/server/latency", "End-to-end latency", stats.UnitMilliseconds) +) + +// The following tags are applied to stats recorded by this package. Host, Path +// and Method are applied to all measures. StatusCode is not applied to +// ClientRequestCount or ServerRequestCount, since it is recorded before the status is known. +var ( + // Host is the value of the HTTP Host header. + // + // The value of this tag can be controlled by the HTTP client, so you need + // to watch out for potentially generating high-cardinality labels in your + // metrics backend if you use this tag in views. + Host, _ = tag.NewKey("http.host") + + // StatusCode is the numeric HTTP response status code, + // or "error" if a transport error occurred and no status code was read. + StatusCode, _ = tag.NewKey("http.status") + + // Path is the URL path (not including query string) in the request. + // + // The value of this tag can be controlled by the HTTP client, so you need + // to watch out for potentially generating high-cardinality labels in your + // metrics backend if you use this tag in views. + Path, _ = tag.NewKey("http.path") + + // Method is the HTTP method of the request, capitalized (GET, POST, etc.). + Method, _ = tag.NewKey("http.method") +) + +// Default distributions used by views in this package. +var ( + DefaultSizeDistribution = view.Distribution(0, 1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296) + DefaultLatencyDistribution = view.Distribution(0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000) +) + +// Package ochttp provides some convenience views. +// You need to register the views for data to actually be collected. +var ( + ClientRequestCountView = &view.View{ + Name: "opencensus.io/http/client/request_count", + Description: "Count of HTTP requests started", + Measure: ClientRequestCount, + Aggregation: view.Count(), + } + + ClientRequestBytesView = &view.View{ + Name: "opencensus.io/http/client/request_bytes", + Description: "Size distribution of HTTP request body", + Measure: ClientRequestBytes, + Aggregation: DefaultSizeDistribution, + } + + ClientResponseBytesView = &view.View{ + Name: "opencensus.io/http/client/response_bytes", + Description: "Size distribution of HTTP response body", + Measure: ClientResponseBytes, + Aggregation: DefaultSizeDistribution, + } + + ClientLatencyView = &view.View{ + Name: "opencensus.io/http/client/latency", + Description: "Latency distribution of HTTP requests", + Measure: ClientLatency, + Aggregation: DefaultLatencyDistribution, + } + + ClientRequestCountByMethod = &view.View{ + Name: "opencensus.io/http/client/request_count_by_method", + Description: "Client request count by HTTP method", + TagKeys: []tag.Key{Method}, + Measure: ClientRequestCount, + Aggregation: view.Count(), + } + + ClientResponseCountByStatusCode = &view.View{ + Name: "opencensus.io/http/client/response_count_by_status_code", + Description: "Client response count by status code", + TagKeys: []tag.Key{StatusCode}, + Measure: ClientLatency, + Aggregation: view.Count(), + } + + ServerRequestCountView = &view.View{ + Name: "opencensus.io/http/server/request_count", + Description: "Count of HTTP requests started", + Measure: ServerRequestCount, + Aggregation: view.Count(), + } + + ServerRequestBytesView = &view.View{ + Name: "opencensus.io/http/server/request_bytes", + Description: "Size distribution of HTTP request body", + Measure: ServerRequestBytes, + Aggregation: DefaultSizeDistribution, + } + + ServerResponseBytesView = &view.View{ + Name: "opencensus.io/http/server/response_bytes", + Description: "Size distribution of HTTP response body", + Measure: ServerResponseBytes, + Aggregation: DefaultSizeDistribution, + } + + ServerLatencyView = &view.View{ + Name: "opencensus.io/http/server/latency", + Description: "Latency distribution of HTTP requests", + Measure: ServerLatency, + Aggregation: DefaultLatencyDistribution, + } + + ServerRequestCountByMethod = &view.View{ + Name: "opencensus.io/http/server/request_count_by_method", + Description: "Server request count by HTTP method", + TagKeys: []tag.Key{Method}, + Measure: ServerRequestCount, + Aggregation: view.Count(), + } + + ServerResponseCountByStatusCode = &view.View{ + Name: "opencensus.io/http/server/response_count_by_status_code", + Description: "Server response count by status code", + TagKeys: []tag.Key{StatusCode}, + Measure: ServerLatency, + Aggregation: view.Count(), + } +) + +// DefaultClientViews are the default client views provided by this package. +var DefaultClientViews = []*view.View{ + ClientRequestCountView, + ClientRequestBytesView, + ClientResponseBytesView, + ClientLatencyView, + ClientRequestCountByMethod, + ClientResponseCountByStatusCode, +} + +// DefaultServerViews are the default server views provided by this package. +var DefaultServerViews = []*view.View{ + ServerRequestCountView, + ServerRequestBytesView, + ServerResponseBytesView, + ServerLatencyView, + ServerRequestCountByMethod, + ServerResponseCountByStatusCode, +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/trace.go b/vendor/go.opencensus.io/plugin/ochttp/trace.go new file mode 100644 index 000000000..ea066a2c6 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/trace.go @@ -0,0 +1,199 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "io" + "net/http" + + "go.opencensus.io/plugin/ochttp/propagation/b3" + "go.opencensus.io/trace" + "go.opencensus.io/trace/propagation" +) + +// TODO(jbd): Add godoc examples. + +var defaultFormat propagation.HTTPFormat = &b3.HTTPFormat{} + +// Attributes recorded on the span for the requests. +// Only trace exporters will need them. +const ( + HostAttribute = "http.host" + MethodAttribute = "http.method" + PathAttribute = "http.path" + UserAgentAttribute = "http.user_agent" + StatusCodeAttribute = "http.status_code" +) + +type traceTransport struct { + base http.RoundTripper + startOptions trace.StartOptions + format propagation.HTTPFormat + formatSpanName func(*http.Request) string +} + +// TODO(jbd): Add message events for request and response size. + +// RoundTrip creates a trace.Span and inserts it into the outgoing request's headers. +// The created span can follow a parent span, if a parent is presented in +// the request's context. +func (t *traceTransport) RoundTrip(req *http.Request) (*http.Response, error) { + name := t.formatSpanName(req) + // TODO(jbd): Discuss whether we want to prefix + // outgoing requests with Sent. + _, span := trace.StartSpan(req.Context(), name, + trace.WithSampler(t.startOptions.Sampler), + trace.WithSpanKind(trace.SpanKindClient)) + + req = req.WithContext(trace.WithSpan(req.Context(), span)) + if t.format != nil { + t.format.SpanContextToRequest(span.SpanContext(), req) + } + + span.AddAttributes(requestAttrs(req)...) + resp, err := t.base.RoundTrip(req) + if err != nil { + span.SetStatus(trace.Status{Code: trace.StatusCodeUnknown, Message: err.Error()}) + span.End() + return resp, err + } + + span.AddAttributes(responseAttrs(resp)...) + span.SetStatus(TraceStatus(resp.StatusCode, resp.Status)) + + // span.End() will be invoked after + // a read from resp.Body returns io.EOF or when + // resp.Body.Close() is invoked. + resp.Body = &bodyTracker{rc: resp.Body, span: span} + return resp, err +} + +// bodyTracker wraps a response.Body and invokes +// trace.EndSpan on encountering io.EOF on reading +// the body of the original response. +type bodyTracker struct { + rc io.ReadCloser + span *trace.Span +} + +var _ io.ReadCloser = (*bodyTracker)(nil) + +func (bt *bodyTracker) Read(b []byte) (int, error) { + n, err := bt.rc.Read(b) + + switch err { + case nil: + return n, nil + case io.EOF: + bt.span.End() + default: + // For all other errors, set the span status + bt.span.SetStatus(trace.Status{ + // Code 2 is the error code for Internal server error. + Code: 2, + Message: err.Error(), + }) + } + return n, err +} + +func (bt *bodyTracker) Close() error { + // Invoking endSpan on Close will help catch the cases + // in which a read returned a non-nil error, we set the + // span status but didn't end the span. + bt.span.End() + return bt.rc.Close() +} + +// CancelRequest cancels an in-flight request by closing its connection. +func (t *traceTransport) CancelRequest(req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + if cr, ok := t.base.(canceler); ok { + cr.CancelRequest(req) + } +} + +func spanNameFromURL(req *http.Request) string { + return req.URL.Path +} + +func requestAttrs(r *http.Request) []trace.Attribute { + return []trace.Attribute{ + trace.StringAttribute(PathAttribute, r.URL.Path), + trace.StringAttribute(HostAttribute, r.URL.Host), + trace.StringAttribute(MethodAttribute, r.Method), + trace.StringAttribute(UserAgentAttribute, r.UserAgent()), + } +} + +func responseAttrs(resp *http.Response) []trace.Attribute { + return []trace.Attribute{ + trace.Int64Attribute(StatusCodeAttribute, int64(resp.StatusCode)), + } +} + +// TraceStatus is a utility to convert the HTTP status code to a trace.Status that +// represents the outcome as closely as possible. +func TraceStatus(httpStatusCode int, statusLine string) trace.Status { + var code int32 + if httpStatusCode < 200 || httpStatusCode >= 400 { + code = trace.StatusCodeUnknown + } + switch httpStatusCode { + case 499: + code = trace.StatusCodeCancelled + case http.StatusBadRequest: + code = trace.StatusCodeInvalidArgument + case http.StatusGatewayTimeout: + code = trace.StatusCodeDeadlineExceeded + case http.StatusNotFound: + code = trace.StatusCodeNotFound + case http.StatusForbidden: + code = trace.StatusCodePermissionDenied + case http.StatusUnauthorized: // 401 is actually unauthenticated. + code = trace.StatusCodeUnauthenticated + case http.StatusTooManyRequests: + code = trace.StatusCodeResourceExhausted + case http.StatusNotImplemented: + code = trace.StatusCodeUnimplemented + case http.StatusServiceUnavailable: + code = trace.StatusCodeUnavailable + case http.StatusOK: + code = trace.StatusCodeOK + } + return trace.Status{Code: code, Message: codeToStr[code]} +} + +var codeToStr = map[int32]string{ + trace.StatusCodeOK: `"OK"`, + trace.StatusCodeCancelled: `"CANCELLED"`, + trace.StatusCodeUnknown: `"UNKNOWN"`, + trace.StatusCodeInvalidArgument: `"INVALID_ARGUMENT"`, + trace.StatusCodeDeadlineExceeded: `"DEADLINE_EXCEEDED"`, + trace.StatusCodeNotFound: `"NOT_FOUND"`, + trace.StatusCodeAlreadyExists: `"ALREADY_EXISTS"`, + trace.StatusCodePermissionDenied: `"PERMISSION_DENIED"`, + trace.StatusCodeResourceExhausted: `"RESOURCE_EXHAUSTED"`, + trace.StatusCodeFailedPrecondition: `"FAILED_PRECONDITION"`, + trace.StatusCodeAborted: `"ABORTED"`, + trace.StatusCodeOutOfRange: `"OUT_OF_RANGE"`, + trace.StatusCodeUnimplemented: `"UNIMPLEMENTED"`, + trace.StatusCodeInternal: `"INTERNAL"`, + trace.StatusCodeUnavailable: `"UNAVAILABLE"`, + trace.StatusCodeDataLoss: `"DATA_LOSS"`, + trace.StatusCodeUnauthenticated: `"UNAUTHENTICATED"`, +} diff --git a/vendor/go.opencensus.io/trace/propagation/propagation.go b/vendor/go.opencensus.io/trace/propagation/propagation.go new file mode 100644 index 000000000..1eb190a96 --- /dev/null +++ b/vendor/go.opencensus.io/trace/propagation/propagation.go @@ -0,0 +1,108 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package propagation implements the binary trace context format. +package propagation // import "go.opencensus.io/trace/propagation" + +// TODO: link to external spec document. + +// BinaryFormat format: +// +// Binary value: +// version_id: 1 byte representing the version id. +// +// For version_id = 0: +// +// version_format: +// field_format: +// +// Fields: +// +// TraceId: (field_id = 0, len = 16, default = "0000000000000000") - 16-byte array representing the trace_id. +// SpanId: (field_id = 1, len = 8, default = "00000000") - 8-byte array representing the span_id. +// TraceOptions: (field_id = 2, len = 1, default = "0") - 1-byte array representing the trace_options. +// +// Fields MUST be encoded using the field id order (smaller to higher). +// +// Valid value example: +// +// {0, 0, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 1, 97, +// 98, 99, 100, 101, 102, 103, 104, 2, 1} +// +// version_id = 0; +// trace_id = {64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79} +// span_id = {97, 98, 99, 100, 101, 102, 103, 104}; +// trace_options = {1}; + +import ( + "net/http" + + "go.opencensus.io/trace" +) + +// Binary returns the binary format representation of a SpanContext. +// +// If sc is the zero value, Binary returns nil. +func Binary(sc trace.SpanContext) []byte { + if sc == (trace.SpanContext{}) { + return nil + } + var b [29]byte + copy(b[2:18], sc.TraceID[:]) + b[18] = 1 + copy(b[19:27], sc.SpanID[:]) + b[27] = 2 + b[28] = uint8(sc.TraceOptions) + return b[:] +} + +// FromBinary returns the SpanContext represented by b. +// +// If b has an unsupported version ID or contains no TraceID, FromBinary +// returns with ok==false. +func FromBinary(b []byte) (sc trace.SpanContext, ok bool) { + if len(b) == 0 || b[0] != 0 { + return trace.SpanContext{}, false + } + b = b[1:] + if len(b) >= 17 && b[0] == 0 { + copy(sc.TraceID[:], b[1:17]) + b = b[17:] + } else { + return trace.SpanContext{}, false + } + if len(b) >= 9 && b[0] == 1 { + copy(sc.SpanID[:], b[1:9]) + b = b[9:] + } + if len(b) >= 2 && b[0] == 2 { + sc.TraceOptions = trace.TraceOptions(b[1]) + } + return sc, true +} + +// HTTPFormat implementations propagate span contexts +// in HTTP requests. +// +// SpanContextFromRequest extracts a span context from incoming +// requests. +// +// SpanContextToRequest modifies the given request to include the given +// span context. +type HTTPFormat interface { + SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) + SpanContextToRequest(sc trace.SpanContext, req *http.Request) +} + +// TODO(jbd): Find a more representative but short name for HTTPFormat. diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go index d3681ab42..a3c021d3f 100644 --- a/vendor/golang.org/x/net/context/context.go +++ b/vendor/golang.org/x/net/context/context.go @@ -5,6 +5,8 @@ // Package context defines the Context type, which carries deadlines, // cancelation signals, and other request-scoped values across API boundaries // and between processes. +// As of Go 1.7 this package is available in the standard library under the +// name context. https://golang.org/pkg/context. // // Incoming requests to a server should create a Context, and outgoing calls to // servers should accept a Context. The chain of function calls between must diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go new file mode 100644 index 000000000..606cf1f97 --- /dev/null +++ b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go @@ -0,0 +1,74 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7 + +// Package ctxhttp provides helper functions for performing context-aware HTTP requests. +package ctxhttp // import "golang.org/x/net/context/ctxhttp" + +import ( + "io" + "net/http" + "net/url" + "strings" + + "golang.org/x/net/context" +) + +// Do sends an HTTP request with the provided http.Client and returns +// an HTTP response. +// +// If the client is nil, http.DefaultClient is used. +// +// The provided ctx must be non-nil. If it is canceled or times out, +// ctx.Err() will be returned. +func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { + if client == nil { + client = http.DefaultClient + } + resp, err := client.Do(req.WithContext(ctx)) + // If we got an error, and the context has been canceled, + // the context's error is probably more useful. + if err != nil { + select { + case <-ctx.Done(): + err = ctx.Err() + default: + } + } + return resp, err +} + +// Get issues a GET request via the Do function. +func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) { + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + return Do(ctx, client, req) +} + +// Head issues a HEAD request via the Do function. +func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) { + req, err := http.NewRequest("HEAD", url, nil) + if err != nil { + return nil, err + } + return Do(ctx, client, req) +} + +// Post issues a POST request via the Do function. +func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) { + req, err := http.NewRequest("POST", url, body) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", bodyType) + return Do(ctx, client, req) +} + +// PostForm issues a POST request via the Do function. +func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) { + return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) +} diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go new file mode 100644 index 000000000..926870cc2 --- /dev/null +++ b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go @@ -0,0 +1,147 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.7 + +package ctxhttp // import "golang.org/x/net/context/ctxhttp" + +import ( + "io" + "net/http" + "net/url" + "strings" + + "golang.org/x/net/context" +) + +func nop() {} + +var ( + testHookContextDoneBeforeHeaders = nop + testHookDoReturned = nop + testHookDidBodyClose = nop +) + +// Do sends an HTTP request with the provided http.Client and returns an HTTP response. +// If the client is nil, http.DefaultClient is used. +// If the context is canceled or times out, ctx.Err() will be returned. +func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { + if client == nil { + client = http.DefaultClient + } + + // TODO(djd): Respect any existing value of req.Cancel. + cancel := make(chan struct{}) + req.Cancel = cancel + + type responseAndError struct { + resp *http.Response + err error + } + result := make(chan responseAndError, 1) + + // Make local copies of test hooks closed over by goroutines below. + // Prevents data races in tests. + testHookDoReturned := testHookDoReturned + testHookDidBodyClose := testHookDidBodyClose + + go func() { + resp, err := client.Do(req) + testHookDoReturned() + result <- responseAndError{resp, err} + }() + + var resp *http.Response + + select { + case <-ctx.Done(): + testHookContextDoneBeforeHeaders() + close(cancel) + // Clean up after the goroutine calling client.Do: + go func() { + if r := <-result; r.resp != nil { + testHookDidBodyClose() + r.resp.Body.Close() + } + }() + return nil, ctx.Err() + case r := <-result: + var err error + resp, err = r.resp, r.err + if err != nil { + return resp, err + } + } + + c := make(chan struct{}) + go func() { + select { + case <-ctx.Done(): + close(cancel) + case <-c: + // The response's Body is closed. + } + }() + resp.Body = ¬ifyingReader{resp.Body, c} + + return resp, nil +} + +// Get issues a GET request via the Do function. +func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) { + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + return Do(ctx, client, req) +} + +// Head issues a HEAD request via the Do function. +func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) { + req, err := http.NewRequest("HEAD", url, nil) + if err != nil { + return nil, err + } + return Do(ctx, client, req) +} + +// Post issues a POST request via the Do function. +func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) { + req, err := http.NewRequest("POST", url, body) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", bodyType) + return Do(ctx, client, req) +} + +// PostForm issues a POST request via the Do function. +func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) { + return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) +} + +// notifyingReader is an io.ReadCloser that closes the notify channel after +// Close is called or a Read fails on the underlying ReadCloser. +type notifyingReader struct { + io.ReadCloser + notify chan<- struct{} +} + +func (r *notifyingReader) Read(p []byte) (int, error) { + n, err := r.ReadCloser.Read(p) + if err != nil && r.notify != nil { + close(r.notify) + r.notify = nil + } + return n, err +} + +func (r *notifyingReader) Close() error { + err := r.ReadCloser.Close() + if r.notify != nil { + close(r.notify) + r.notify = nil + } + return err +} diff --git a/vendor/golang.org/x/net/http/httpguts/guts.go b/vendor/golang.org/x/net/http/httpguts/guts.go new file mode 100644 index 000000000..e6cd0ced3 --- /dev/null +++ b/vendor/golang.org/x/net/http/httpguts/guts.go @@ -0,0 +1,50 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package httpguts provides functions implementing various details +// of the HTTP specification. +// +// This package is shared by the standard library (which vendors it) +// and x/net/http2. It comes with no API stability promise. +package httpguts + +import ( + "net/textproto" + "strings" +) + +// ValidTrailerHeader reports whether name is a valid header field name to appear +// in trailers. +// See RFC 7230, Section 4.1.2 +func ValidTrailerHeader(name string) bool { + name = textproto.CanonicalMIMEHeaderKey(name) + if strings.HasPrefix(name, "If-") || badTrailer[name] { + return false + } + return true +} + +var badTrailer = map[string]bool{ + "Authorization": true, + "Cache-Control": true, + "Connection": true, + "Content-Encoding": true, + "Content-Length": true, + "Content-Range": true, + "Content-Type": true, + "Expect": true, + "Host": true, + "Keep-Alive": true, + "Max-Forwards": true, + "Pragma": true, + "Proxy-Authenticate": true, + "Proxy-Authorization": true, + "Proxy-Connection": true, + "Range": true, + "Realm": true, + "Te": true, + "Trailer": true, + "Transfer-Encoding": true, + "Www-Authenticate": true, +} diff --git a/vendor/golang.org/x/net/lex/httplex/httplex.go b/vendor/golang.org/x/net/http/httpguts/httplex.go similarity index 97% rename from vendor/golang.org/x/net/lex/httplex/httplex.go rename to vendor/golang.org/x/net/http/httpguts/httplex.go index 20f2b8940..e7de24ee6 100644 --- a/vendor/golang.org/x/net/lex/httplex/httplex.go +++ b/vendor/golang.org/x/net/http/httpguts/httplex.go @@ -2,12 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package httplex contains rules around lexical matters of various -// HTTP-related specifications. -// -// This package is shared by the standard library (which vendors it) -// and x/net/http2. It comes with no API stability promise. -package httplex +package httpguts import ( "net" diff --git a/vendor/golang.org/x/net/http2/ciphers.go b/vendor/golang.org/x/net/http2/ciphers.go index 698860b77..c9a0cf3b4 100644 --- a/vendor/golang.org/x/net/http2/ciphers.go +++ b/vendor/golang.org/x/net/http2/ciphers.go @@ -5,7 +5,7 @@ package http2 // A list of the possible cipher suite ids. Taken from -// http://www.iana.org/assignments/tls-parameters/tls-parameters.txt +// https://www.iana.org/assignments/tls-parameters/tls-parameters.txt const ( cipher_TLS_NULL_WITH_NULL_NULL uint16 = 0x0000 diff --git a/vendor/golang.org/x/net/http2/client_conn_pool.go b/vendor/golang.org/x/net/http2/client_conn_pool.go index bdf5652b0..f4d9b5ece 100644 --- a/vendor/golang.org/x/net/http2/client_conn_pool.go +++ b/vendor/golang.org/x/net/http2/client_conn_pool.go @@ -52,9 +52,31 @@ const ( noDialOnMiss = false ) +// shouldTraceGetConn reports whether getClientConn should call any +// ClientTrace.GetConn hook associated with the http.Request. +// +// This complexity is needed to avoid double calls of the GetConn hook +// during the back-and-forth between net/http and x/net/http2 (when the +// net/http.Transport is upgraded to also speak http2), as well as support +// the case where x/net/http2 is being used directly. +func (p *clientConnPool) shouldTraceGetConn(st clientConnIdleState) bool { + // If our Transport wasn't made via ConfigureTransport, always + // trace the GetConn hook if provided, because that means the + // http2 package is being used directly and it's the one + // dialing, as opposed to net/http. + if _, ok := p.t.ConnPool.(noDialClientConnPool); !ok { + return true + } + // Otherwise, only use the GetConn hook if this connection has + // been used previously for other requests. For fresh + // connections, the net/http package does the dialing. + return !st.freshConn +} + func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMiss bool) (*ClientConn, error) { if isConnectionCloseRequest(req) && dialOnMiss { // It gets its own connection. + traceGetConn(req, addr) const singleUse = true cc, err := p.t.dialClientConn(addr, singleUse) if err != nil { @@ -64,7 +86,10 @@ func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMis } p.mu.Lock() for _, cc := range p.conns[addr] { - if cc.CanTakeNewRequest() { + if st := cc.idleState(); st.canTakeNewRequest { + if p.shouldTraceGetConn(st) { + traceGetConn(req, addr) + } p.mu.Unlock() return cc, nil } @@ -73,6 +98,7 @@ func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMis p.mu.Unlock() return nil, ErrNoCachedConn } + traceGetConn(req, addr) call := p.getStartDialLocked(addr) p.mu.Unlock() <-call.done diff --git a/vendor/golang.org/x/net/http2/configure_transport.go b/vendor/golang.org/x/net/http2/configure_transport.go index b65fc6d42..6356b3287 100644 --- a/vendor/golang.org/x/net/http2/configure_transport.go +++ b/vendor/golang.org/x/net/http2/configure_transport.go @@ -57,7 +57,7 @@ func configureTransport(t1 *http.Transport) (*Transport, error) { // registerHTTPSProtocol calls Transport.RegisterProtocol but // converting panics into errors. -func registerHTTPSProtocol(t *http.Transport, rt http.RoundTripper) (err error) { +func registerHTTPSProtocol(t *http.Transport, rt noDialH2RoundTripper) (err error) { defer func() { if e := recover(); e != nil { err = fmt.Errorf("%v", e) @@ -69,11 +69,13 @@ func registerHTTPSProtocol(t *http.Transport, rt http.RoundTripper) (err error) // noDialH2RoundTripper is a RoundTripper which only tries to complete the request // if there's already has a cached connection to the host. -type noDialH2RoundTripper struct{ t *Transport } +// (The field is exported so it can be accessed via reflect from net/http; tested +// by TestNoDialH2RoundTripperType) +type noDialH2RoundTripper struct{ *Transport } func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - res, err := rt.t.RoundTrip(req) - if err == ErrNoCachedConn { + res, err := rt.Transport.RoundTrip(req) + if isNoCachedConnError(err) { return nil, http.ErrSkipAltProtocol } return res, err diff --git a/vendor/golang.org/x/net/http2/flow.go b/vendor/golang.org/x/net/http2/flow.go index 957de2542..cea601fcd 100644 --- a/vendor/golang.org/x/net/http2/flow.go +++ b/vendor/golang.org/x/net/http2/flow.go @@ -41,10 +41,10 @@ func (f *flow) take(n int32) { // add adds n bytes (positive or negative) to the flow control window. // It returns false if the sum would exceed 2^31-1. func (f *flow) add(n int32) bool { - remain := (1<<31 - 1) - f.n - if n > remain { - return false + sum := f.n + n + if (sum > n) == (f.n > 0) { + f.n = sum + return true } - f.n += n - return true + return false } diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go index 3b1489072..c85e31f26 100644 --- a/vendor/golang.org/x/net/http2/frame.go +++ b/vendor/golang.org/x/net/http2/frame.go @@ -14,8 +14,8 @@ import ( "strings" "sync" + "golang.org/x/net/http/httpguts" "golang.org/x/net/http2/hpack" - "golang.org/x/net/lex/httplex" ) const frameHeaderLen = 9 @@ -733,32 +733,67 @@ func (f *SettingsFrame) IsAck() bool { return f.FrameHeader.Flags.Has(FlagSettingsAck) } -func (f *SettingsFrame) Value(s SettingID) (v uint32, ok bool) { +func (f *SettingsFrame) Value(id SettingID) (v uint32, ok bool) { f.checkValid() - buf := f.p - for len(buf) > 0 { - settingID := SettingID(binary.BigEndian.Uint16(buf[:2])) - if settingID == s { - return binary.BigEndian.Uint32(buf[2:6]), true + for i := 0; i < f.NumSettings(); i++ { + if s := f.Setting(i); s.ID == id { + return s.Val, true } - buf = buf[6:] } return 0, false } +// Setting returns the setting from the frame at the given 0-based index. +// The index must be >= 0 and less than f.NumSettings(). +func (f *SettingsFrame) Setting(i int) Setting { + buf := f.p + return Setting{ + ID: SettingID(binary.BigEndian.Uint16(buf[i*6 : i*6+2])), + Val: binary.BigEndian.Uint32(buf[i*6+2 : i*6+6]), + } +} + +func (f *SettingsFrame) NumSettings() int { return len(f.p) / 6 } + +// HasDuplicates reports whether f contains any duplicate setting IDs. +func (f *SettingsFrame) HasDuplicates() bool { + num := f.NumSettings() + if num == 0 { + return false + } + // If it's small enough (the common case), just do the n^2 + // thing and avoid a map allocation. + if num < 10 { + for i := 0; i < num; i++ { + idi := f.Setting(i).ID + for j := i + 1; j < num; j++ { + idj := f.Setting(j).ID + if idi == idj { + return true + } + } + } + return false + } + seen := map[SettingID]bool{} + for i := 0; i < num; i++ { + id := f.Setting(i).ID + if seen[id] { + return true + } + seen[id] = true + } + return false +} + // ForeachSetting runs fn for each setting. // It stops and returns the first error. func (f *SettingsFrame) ForeachSetting(fn func(Setting) error) error { f.checkValid() - buf := f.p - for len(buf) > 0 { - if err := fn(Setting{ - SettingID(binary.BigEndian.Uint16(buf[:2])), - binary.BigEndian.Uint32(buf[2:6]), - }); err != nil { + for i := 0; i < f.NumSettings(); i++ { + if err := fn(f.Setting(i)); err != nil { return err } - buf = buf[6:] } return nil } @@ -1462,7 +1497,7 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { if VerboseLogs && fr.logReads { fr.debugReadLoggerf("http2: decoded hpack field %+v", hf) } - if !httplex.ValidHeaderFieldValue(hf.Value) { + if !httpguts.ValidHeaderFieldValue(hf.Value) { invalid = headerFieldValueError(hf.Value) } isPseudo := strings.HasPrefix(hf.Name, ":") diff --git a/vendor/golang.org/x/net/http2/go111.go b/vendor/golang.org/x/net/http2/go111.go new file mode 100644 index 000000000..9749dc0be --- /dev/null +++ b/vendor/golang.org/x/net/http2/go111.go @@ -0,0 +1,26 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.11 + +package http2 + +import "net/textproto" + +func traceHasWroteHeaderField(trace *clientTrace) bool { + return trace != nil && trace.WroteHeaderField != nil +} + +func traceWroteHeaderField(trace *clientTrace, k, v string) { + if trace != nil && trace.WroteHeaderField != nil { + trace.WroteHeaderField(k, []string{v}) + } +} + +func traceGot1xxResponseFunc(trace *clientTrace) func(int, textproto.MIMEHeader) error { + if trace != nil { + return trace.Got1xxResponse + } + return nil +} diff --git a/vendor/golang.org/x/net/http2/go17.go b/vendor/golang.org/x/net/http2/go17.go index 47b7fae08..d957b7bc5 100644 --- a/vendor/golang.org/x/net/http2/go17.go +++ b/vendor/golang.org/x/net/http2/go17.go @@ -18,6 +18,8 @@ type contextContext interface { context.Context } +var errCanceled = context.Canceled + func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx contextContext, cancel func()) { ctx, cancel = context.WithCancel(context.Background()) ctx = context.WithValue(ctx, http.LocalAddrContextKey, c.LocalAddr()) @@ -48,6 +50,14 @@ func (t *Transport) idleConnTimeout() time.Duration { func setResponseUncompressed(res *http.Response) { res.Uncompressed = true } +func traceGetConn(req *http.Request, hostPort string) { + trace := httptrace.ContextClientTrace(req.Context()) + if trace == nil || trace.GetConn == nil { + return + } + trace.GetConn(hostPort) +} + func traceGotConn(req *http.Request, cc *ClientConn) { trace := httptrace.ContextClientTrace(req.Context()) if trace == nil || trace.GotConn == nil { @@ -104,3 +114,8 @@ func requestTrace(req *http.Request) *clientTrace { func (cc *ClientConn) Ping(ctx context.Context) error { return cc.ping(ctx) } + +// Shutdown gracefully closes the client connection, waiting for running streams to complete. +func (cc *ClientConn) Shutdown(ctx context.Context) error { + return cc.shutdown(ctx) +} diff --git a/vendor/golang.org/x/net/http2/headermap.go b/vendor/golang.org/x/net/http2/headermap.go index c2805f6ac..c3ff3fa1c 100644 --- a/vendor/golang.org/x/net/http2/headermap.go +++ b/vendor/golang.org/x/net/http2/headermap.go @@ -7,15 +7,21 @@ package http2 import ( "net/http" "strings" + "sync" ) var ( - commonLowerHeader = map[string]string{} // Go-Canonical-Case -> lower-case - commonCanonHeader = map[string]string{} // lower-case -> Go-Canonical-Case + commonBuildOnce sync.Once + commonLowerHeader map[string]string // Go-Canonical-Case -> lower-case + commonCanonHeader map[string]string // lower-case -> Go-Canonical-Case ) -func init() { - for _, v := range []string{ +func buildCommonHeaderMapsOnce() { + commonBuildOnce.Do(buildCommonHeaderMaps) +} + +func buildCommonHeaderMaps() { + common := []string{ "accept", "accept-charset", "accept-encoding", @@ -63,7 +69,10 @@ func init() { "vary", "via", "www-authenticate", - } { + } + commonLowerHeader = make(map[string]string, len(common)) + commonCanonHeader = make(map[string]string, len(common)) + for _, v := range common { chk := http.CanonicalHeaderKey(v) commonLowerHeader[chk] = v commonCanonHeader[v] = chk @@ -71,6 +80,7 @@ func init() { } func lowerHeader(v string) string { + buildCommonHeaderMapsOnce() if s, ok := commonLowerHeader[v]; ok { return s } diff --git a/vendor/golang.org/x/net/http2/hpack/encode.go b/vendor/golang.org/x/net/http2/hpack/encode.go index 54726c2a3..1565cf270 100644 --- a/vendor/golang.org/x/net/http2/hpack/encode.go +++ b/vendor/golang.org/x/net/http2/hpack/encode.go @@ -206,7 +206,7 @@ func appendVarInt(dst []byte, n byte, i uint64) []byte { } // appendHpackString appends s, as encoded in "String Literal" -// representation, to dst and returns the the extended buffer. +// representation, to dst and returns the extended buffer. // // s will be encoded in Huffman codes only when it produces strictly // shorter byte string. diff --git a/vendor/golang.org/x/net/http2/hpack/hpack.go b/vendor/golang.org/x/net/http2/hpack/hpack.go index 176644acd..166788cee 100644 --- a/vendor/golang.org/x/net/http2/hpack/hpack.go +++ b/vendor/golang.org/x/net/http2/hpack/hpack.go @@ -389,6 +389,12 @@ func (d *Decoder) callEmit(hf HeaderField) error { // (same invariants and behavior as parseHeaderFieldRepr) func (d *Decoder) parseDynamicTableSizeUpdate() error { + // RFC 7541, sec 4.2: This dynamic table size update MUST occur at the + // beginning of the first header block following the change to the dynamic table size. + if d.dynTab.size > 0 { + return DecodingError{errors.New("dynamic table size update MUST occur at the beginning of a header block")} + } + buf := d.buf size, buf, err := readVarInt(5, buf) if err != nil { diff --git a/vendor/golang.org/x/net/http2/hpack/huffman.go b/vendor/golang.org/x/net/http2/hpack/huffman.go index 8850e3946..b412a96c5 100644 --- a/vendor/golang.org/x/net/http2/hpack/huffman.go +++ b/vendor/golang.org/x/net/http2/hpack/huffman.go @@ -47,6 +47,7 @@ var ErrInvalidHuffman = errors.New("hpack: invalid Huffman-encoded data") // If maxLen is greater than 0, attempts to write more to buf than // maxLen bytes will return ErrStringLength. func huffmanDecode(buf *bytes.Buffer, maxLen int, v []byte) error { + rootHuffmanNode := getRootHuffmanNode() n := rootHuffmanNode // cur is the bit buffer that has not been fed into n. // cbits is the number of low order bits in cur that are valid. @@ -106,7 +107,7 @@ func huffmanDecode(buf *bytes.Buffer, maxLen int, v []byte) error { type node struct { // children is non-nil for internal nodes - children []*node + children *[256]*node // The following are only valid if children is nil: codeLen uint8 // number of bits that led to the output of sym @@ -114,22 +115,31 @@ type node struct { } func newInternalNode() *node { - return &node{children: make([]*node, 256)} + return &node{children: new([256]*node)} } -var rootHuffmanNode = newInternalNode() +var ( + buildRootOnce sync.Once + lazyRootHuffmanNode *node +) -func init() { +func getRootHuffmanNode() *node { + buildRootOnce.Do(buildRootHuffmanNode) + return lazyRootHuffmanNode +} + +func buildRootHuffmanNode() { if len(huffmanCodes) != 256 { panic("unexpected size") } + lazyRootHuffmanNode = newInternalNode() for i, code := range huffmanCodes { addDecoderNode(byte(i), code, huffmanCodeLen[i]) } } func addDecoderNode(sym byte, code uint32, codeLen uint8) { - cur := rootHuffmanNode + cur := lazyRootHuffmanNode for codeLen > 8 { codeLen -= 8 i := uint8(code >> codeLen) diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go index d565f40e0..bdaba1d46 100644 --- a/vendor/golang.org/x/net/http2/http2.go +++ b/vendor/golang.org/x/net/http2/http2.go @@ -29,7 +29,7 @@ import ( "strings" "sync" - "golang.org/x/net/lex/httplex" + "golang.org/x/net/http/httpguts" ) var ( @@ -179,7 +179,7 @@ var ( ) // validWireHeaderFieldName reports whether v is a valid header field -// name (key). See httplex.ValidHeaderName for the base rules. +// name (key). See httpguts.ValidHeaderName for the base rules. // // Further, http2 says: // "Just as in HTTP/1.x, header field names are strings of ASCII @@ -191,7 +191,7 @@ func validWireHeaderFieldName(v string) bool { return false } for _, r := range v { - if !httplex.IsTokenRune(r) { + if !httpguts.IsTokenRune(r) { return false } if 'A' <= r && r <= 'Z' { @@ -201,19 +201,12 @@ func validWireHeaderFieldName(v string) bool { return true } -var httpCodeStringCommon = map[int]string{} // n -> strconv.Itoa(n) - -func init() { - for i := 100; i <= 999; i++ { - if v := http.StatusText(i); v != "" { - httpCodeStringCommon[i] = strconv.Itoa(i) - } - } -} - func httpCodeString(code int) string { - if s, ok := httpCodeStringCommon[code]; ok { - return s + switch code { + case 200: + return "200" + case 404: + return "404" } return strconv.Itoa(code) } @@ -312,7 +305,7 @@ func mustUint31(v int32) uint32 { } // bodyAllowedForStatus reports whether a given response status code -// permits a body. See RFC 2616, section 4.4. +// permits a body. See RFC 7230, section 3.3. func bodyAllowedForStatus(status int) bool { switch { case status >= 100 && status <= 199: diff --git a/vendor/golang.org/x/net/http2/not_go111.go b/vendor/golang.org/x/net/http2/not_go111.go new file mode 100644 index 000000000..0df34e6d9 --- /dev/null +++ b/vendor/golang.org/x/net/http2/not_go111.go @@ -0,0 +1,17 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.11 + +package http2 + +import "net/textproto" + +func traceHasWroteHeaderField(trace *clientTrace) bool { return false } + +func traceWroteHeaderField(trace *clientTrace, k, v string) {} + +func traceGot1xxResponseFunc(trace *clientTrace) func(int, textproto.MIMEHeader) error { + return nil +} diff --git a/vendor/golang.org/x/net/http2/not_go17.go b/vendor/golang.org/x/net/http2/not_go17.go index 140434a79..7ffb25046 100644 --- a/vendor/golang.org/x/net/http2/not_go17.go +++ b/vendor/golang.org/x/net/http2/not_go17.go @@ -8,6 +8,7 @@ package http2 import ( "crypto/tls" + "errors" "net" "net/http" "time" @@ -18,6 +19,8 @@ type contextContext interface { Err() error } +var errCanceled = errors.New("canceled") + type fakeContext struct{} func (fakeContext) Done() <-chan struct{} { return nil } @@ -34,6 +37,7 @@ func setResponseUncompressed(res *http.Response) { type clientTrace struct{} func requestTrace(*http.Request) *clientTrace { return nil } +func traceGetConn(*http.Request, string) {} func traceGotConn(*http.Request, *ClientConn) {} func traceFirstResponseByte(*clientTrace) {} func traceWroteHeaders(*clientTrace) {} @@ -84,4 +88,8 @@ func (cc *ClientConn) Ping(ctx contextContext) error { return cc.ping(ctx) } +func (cc *ClientConn) Shutdown(ctx contextContext) error { + return cc.shutdown(ctx) +} + func (t *Transport) idleConnTimeout() time.Duration { return 0 } diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index eae143ddf..56859d1f8 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -46,6 +46,7 @@ import ( "sync" "time" + "golang.org/x/net/http/httpguts" "golang.org/x/net/http2/hpack" ) @@ -220,12 +221,15 @@ func ConfigureServer(s *http.Server, conf *Server) error { } else if s.TLSConfig.CipherSuites != nil { // If they already provided a CipherSuite list, return // an error if it has a bad order or is missing - // ECDHE_RSA_WITH_AES_128_GCM_SHA256. - const requiredCipher = tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + // ECDHE_RSA_WITH_AES_128_GCM_SHA256 or ECDHE_ECDSA_WITH_AES_128_GCM_SHA256. haveRequired := false sawBad := false for i, cs := range s.TLSConfig.CipherSuites { - if cs == requiredCipher { + switch cs { + case tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + // Alternative MTI cipher to not discourage ECDSA-only servers. + // See http://golang.org/cl/30721 for further information. + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: haveRequired = true } if isBadCipher(cs) { @@ -235,7 +239,7 @@ func ConfigureServer(s *http.Server, conf *Server) error { } } if !haveRequired { - return fmt.Errorf("http2: TLSConfig.CipherSuites is missing HTTP/2-required TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256") + return fmt.Errorf("http2: TLSConfig.CipherSuites is missing an HTTP/2-required AES_128_GCM_SHA256 cipher.") } } @@ -403,7 +407,7 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { // addresses during development. // // TODO: optionally enforce? Or enforce at the time we receive - // a new request, and verify the the ServerName matches the :authority? + // a new request, and verify the ServerName matches the :authority? // But that precludes proxy situations, perhaps. // // So for now, do nothing here again. @@ -649,7 +653,7 @@ func (sc *serverConn) condlogf(err error, format string, args ...interface{}) { if err == nil { return } - if err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) { + if err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) || err == errPrefaceTimeout { // Boring, expected errors. sc.vlogf(format, args...) } else { @@ -659,6 +663,7 @@ func (sc *serverConn) condlogf(err error, format string, args ...interface{}) { func (sc *serverConn) canonicalHeader(v string) string { sc.serveG.check() + buildCommonHeaderMapsOnce() cv, ok := commonCanonHeader[v] if ok { return cv @@ -853,8 +858,13 @@ func (sc *serverConn) serve() { } } - if sc.inGoAway && sc.curOpenStreams() == 0 && !sc.needToSendGoAway && !sc.writingFrame { - return + // Start the shutdown timer after sending a GOAWAY. When sending GOAWAY + // with no error code (graceful shutdown), don't start the timer until + // all open streams have been completed. + sentGoAway := sc.inGoAway && !sc.needToSendGoAway && !sc.writingFrame + gracefulShutdownComplete := sc.goAwayCode == ErrCodeNo && sc.curOpenStreams() == 0 + if sentGoAway && sc.shutdownTimer == nil && (sc.goAwayCode != ErrCodeNo || gracefulShutdownComplete) { + sc.shutDownIn(goAwayTimeout) } } } @@ -889,8 +899,11 @@ func (sc *serverConn) sendServeMsg(msg interface{}) { } } -// readPreface reads the ClientPreface greeting from the peer -// or returns an error on timeout or an invalid greeting. +var errPrefaceTimeout = errors.New("timeout waiting for client preface") + +// readPreface reads the ClientPreface greeting from the peer or +// returns errPrefaceTimeout on timeout, or an error if the greeting +// is invalid. func (sc *serverConn) readPreface() error { errc := make(chan error, 1) go func() { @@ -908,7 +921,7 @@ func (sc *serverConn) readPreface() error { defer timer.Stop() select { case <-timer.C: - return errors.New("timeout waiting for client preface") + return errPrefaceTimeout case err := <-errc: if err == nil { if VerboseLogs { @@ -1218,30 +1231,31 @@ func (sc *serverConn) startGracefulShutdown() { sc.shutdownOnce.Do(func() { sc.sendServeMsg(gracefulShutdownMsg) }) } +// After sending GOAWAY, the connection will close after goAwayTimeout. +// If we close the connection immediately after sending GOAWAY, there may +// be unsent data in our kernel receive buffer, which will cause the kernel +// to send a TCP RST on close() instead of a FIN. This RST will abort the +// connection immediately, whether or not the client had received the GOAWAY. +// +// Ideally we should delay for at least 1 RTT + epsilon so the client has +// a chance to read the GOAWAY and stop sending messages. Measuring RTT +// is hard, so we approximate with 1 second. See golang.org/issue/18701. +// +// This is a var so it can be shorter in tests, where all requests uses the +// loopback interface making the expected RTT very small. +// +// TODO: configurable? +var goAwayTimeout = 1 * time.Second + func (sc *serverConn) startGracefulShutdownInternal() { - sc.goAwayIn(ErrCodeNo, 0) + sc.goAway(ErrCodeNo) } func (sc *serverConn) goAway(code ErrCode) { - sc.serveG.check() - var forceCloseIn time.Duration - if code != ErrCodeNo { - forceCloseIn = 250 * time.Millisecond - } else { - // TODO: configurable - forceCloseIn = 1 * time.Second - } - sc.goAwayIn(code, forceCloseIn) -} - -func (sc *serverConn) goAwayIn(code ErrCode, forceCloseIn time.Duration) { sc.serveG.check() if sc.inGoAway { return } - if forceCloseIn != 0 { - sc.shutDownIn(forceCloseIn) - } sc.inGoAway = true sc.needToSendGoAway = true sc.goAwayCode = code @@ -1474,6 +1488,12 @@ func (sc *serverConn) processSettings(f *SettingsFrame) error { } return nil } + if f.NumSettings() > 100 || f.HasDuplicates() { + // This isn't actually in the spec, but hang up on + // suspiciously large settings frames or those with + // duplicate entries. + return ConnectionError(ErrCodeProtocol) + } if err := f.ForeachSetting(sc.processSetting); err != nil { return err } @@ -1562,6 +1582,12 @@ func (sc *serverConn) processData(f *DataFrame) error { // type PROTOCOL_ERROR." return ConnectionError(ErrCodeProtocol) } + // RFC 7540, sec 6.1: If a DATA frame is received whose stream is not in + // "open" or "half-closed (local)" state, the recipient MUST respond with a + // stream error (Section 5.4.2) of type STREAM_CLOSED. + if state == stateClosed { + return streamError(id, ErrCodeStreamClosed) + } if st == nil || state != stateOpen || st.gotTrailerHeader || st.resetQueued { // This includes sending a RST_STREAM if the stream is // in stateHalfClosedLocal (which currently means that @@ -1595,7 +1621,10 @@ func (sc *serverConn) processData(f *DataFrame) error { // Sender sending more than they'd declared? if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes { st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes)) - return streamError(id, ErrCodeStreamClosed) + // RFC 7540, sec 8.1.2.6: A request or response is also malformed if the + // value of a content-length header field does not equal the sum of the + // DATA frame payload lengths that form the body. + return streamError(id, ErrCodeProtocol) } if f.Length > 0 { // Check whether the client has flow control quota. @@ -1705,6 +1734,13 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { // processing this frame. return nil } + // RFC 7540, sec 5.1: If an endpoint receives additional frames, other than + // WINDOW_UPDATE, PRIORITY, or RST_STREAM, for a stream that is in + // this state, it MUST respond with a stream error (Section 5.4.2) of + // type STREAM_CLOSED. + if st.state == stateHalfClosedRemote { + return streamError(id, ErrCodeStreamClosed) + } return st.processTrailerHeaders(f) } @@ -1805,7 +1841,7 @@ func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error { if st.trailer != nil { for _, hf := range f.RegularFields() { key := sc.canonicalHeader(hf.Name) - if !ValidTrailerHeader(key) { + if !httpguts.ValidTrailerHeader(key) { // TODO: send more details to the peer somehow. But http2 has // no way to send debug data at a stream level. Discuss with // HTTP folk. @@ -2272,8 +2308,8 @@ func (rws *responseWriterState) hasTrailers() bool { return len(rws.trailers) != // written in the trailers at the end of the response. func (rws *responseWriterState) declareTrailer(k string) { k = http.CanonicalHeaderKey(k) - if !ValidTrailerHeader(k) { - // Forbidden by RFC 2616 14.40. + if !httpguts.ValidTrailerHeader(k) { + // Forbidden by RFC 7230, section 4.1.2. rws.conn.logf("ignoring invalid trailer %q", k) return } @@ -2310,7 +2346,7 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { clen = strconv.Itoa(len(p)) } _, hasContentType := rws.snapHeader["Content-Type"] - if !hasContentType && bodyAllowedForStatus(rws.status) { + if !hasContentType && bodyAllowedForStatus(rws.status) && len(p) > 0 { ctype = http.DetectContentType(p) } var date string @@ -2323,6 +2359,19 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { foreachHeaderElement(v, rws.declareTrailer) } + // "Connection" headers aren't allowed in HTTP/2 (RFC 7540, 8.1.2.2), + // but respect "Connection" == "close" to mean sending a GOAWAY and tearing + // down the TCP connection when idle, like we do for HTTP/1. + // TODO: remove more Connection-specific header fields here, in addition + // to "Connection". + if _, ok := rws.snapHeader["Connection"]; ok { + v := rws.snapHeader.Get("Connection") + delete(rws.snapHeader, "Connection") + if v == "close" { + rws.conn.startGracefulShutdown() + } + } + endStream := (rws.handlerDone && !rws.hasTrailers() && len(p) == 0) || isHeadResp err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{ streamID: rws.stream.id, @@ -2394,7 +2443,7 @@ const TrailerPrefix = "Trailer:" // after the header has already been flushed. Because the Go // ResponseWriter interface has no way to set Trailers (only the // Header), and because we didn't want to expand the ResponseWriter -// interface, and because nobody used trailers, and because RFC 2616 +// interface, and because nobody used trailers, and because RFC 7230 // says you SHOULD (but not must) predeclare any trailers in the // header, the official ResponseWriter rules said trailers in Go must // be predeclared, and then we reuse the same ResponseWriter.Header() @@ -2478,6 +2527,24 @@ func (w *responseWriter) Header() http.Header { return rws.handlerHeader } +// checkWriteHeaderCode is a copy of net/http's checkWriteHeaderCode. +func checkWriteHeaderCode(code int) { + // Issue 22880: require valid WriteHeader status codes. + // For now we only enforce that it's three digits. + // In the future we might block things over 599 (600 and above aren't defined + // at http://httpwg.org/specs/rfc7231.html#status.codes) + // and we might block under 200 (once we have more mature 1xx support). + // But for now any three digits. + // + // We used to send "HTTP/1.1 000 0" on the wire in responses but there's + // no equivalent bogus thing we can realistically send in HTTP/2, + // so we'll consistently panic instead and help people find their bugs + // early. (We can't return an error from WriteHeader even if we wanted to.) + if code < 100 || code > 999 { + panic(fmt.Sprintf("invalid WriteHeader code %v", code)) + } +} + func (w *responseWriter) WriteHeader(code int) { rws := w.rws if rws == nil { @@ -2488,6 +2555,7 @@ func (w *responseWriter) WriteHeader(code int) { func (rws *responseWriterState) writeHeader(code int) { if !rws.wroteHeader { + checkWriteHeaderCode(code) rws.wroteHeader = true rws.status = code if len(rws.handlerHeader) > 0 { @@ -2759,7 +2827,7 @@ func (sc *serverConn) startPush(msg *startPushRequest) { } // foreachHeaderElement splits v according to the "#rule" construction -// in RFC 2616 section 2.1 and calls fn for each non-empty element. +// in RFC 7230 section 7 and calls fn for each non-empty element. func foreachHeaderElement(v string, fn func(string)) { v = textproto.TrimString(v) if v == "" { @@ -2807,41 +2875,6 @@ func new400Handler(err error) http.HandlerFunc { } } -// ValidTrailerHeader reports whether name is a valid header field name to appear -// in trailers. -// See: http://tools.ietf.org/html/rfc7230#section-4.1.2 -func ValidTrailerHeader(name string) bool { - name = http.CanonicalHeaderKey(name) - if strings.HasPrefix(name, "If-") || badTrailer[name] { - return false - } - return true -} - -var badTrailer = map[string]bool{ - "Authorization": true, - "Cache-Control": true, - "Connection": true, - "Content-Encoding": true, - "Content-Length": true, - "Content-Range": true, - "Content-Type": true, - "Expect": true, - "Host": true, - "Keep-Alive": true, - "Max-Forwards": true, - "Pragma": true, - "Proxy-Authenticate": true, - "Proxy-Authorization": true, - "Proxy-Connection": true, - "Range": true, - "Realm": true, - "Te": true, - "Trailer": true, - "Transfer-Encoding": true, - "Www-Authenticate": true, -} - // h1ServerKeepAlivesDisabled reports whether hs has its keep-alives // disabled. See comments on h1ServerShutdownChan above for why // the code is written this way. diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index e0dfe9f6a..9d1f2fadd 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -21,15 +21,16 @@ import ( mathrand "math/rand" "net" "net/http" + "net/textproto" "sort" "strconv" "strings" "sync" "time" + "golang.org/x/net/http/httpguts" "golang.org/x/net/http2/hpack" "golang.org/x/net/idna" - "golang.org/x/net/lex/httplex" ) const ( @@ -87,7 +88,7 @@ type Transport struct { // MaxHeaderListSize is the http2 SETTINGS_MAX_HEADER_LIST_SIZE to // send in the initial settings frame. It is how many bytes - // of response headers are allow. Unlike the http2 spec, zero here + // of response headers are allowed. Unlike the http2 spec, zero here // means to use a default limit (currently 10MB). If you actually // want to advertise an ulimited value to the peer, Transport // interprets the highest possible value here (0xffffffff or 1<<32-1) @@ -159,6 +160,7 @@ type ClientConn struct { cond *sync.Cond // hold mu; broadcast on flow/closed changes flow flow // our conn-level flow control quota (cs.flow is per stream) inflow flow // peer's conn-level flow control + closing bool closed bool wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received @@ -172,9 +174,10 @@ type ClientConn struct { fr *Framer lastActive time.Time // Settings from peer: (also guarded by mu) - maxFrameSize uint32 - maxConcurrentStreams uint32 - initialWindowSize uint32 + maxFrameSize uint32 + maxConcurrentStreams uint32 + peerMaxHeaderListSize uint64 + initialWindowSize uint32 hbuf bytes.Buffer // HPACK encoder writes into this henc *hpack.Encoder @@ -210,9 +213,10 @@ type clientStream struct { done chan struct{} // closed when stream remove from cc.streams map; close calls guarded by cc.mu // owned by clientConnReadLoop: - firstByte bool // got the first response byte - pastHeaders bool // got first MetaHeadersFrame (actual headers) - pastTrailers bool // got optional second MetaHeadersFrame (trailers) + firstByte bool // got the first response byte + pastHeaders bool // got first MetaHeadersFrame (actual headers) + pastTrailers bool // got optional second MetaHeadersFrame (trailers) + num1xx uint8 // number of 1xx responses seen trailer http.Header // accumulated trailers resTrailer *http.Header // client's Response.Trailer @@ -236,6 +240,17 @@ func awaitRequestCancel(req *http.Request, done <-chan struct{}) error { } } +var got1xxFuncForTests func(int, textproto.MIMEHeader) error + +// get1xxTraceFunc returns the value of request's httptrace.ClientTrace.Got1xxResponse func, +// if any. It returns nil if not set or if the Go version is too old. +func (cs *clientStream) get1xxTraceFunc() func(int, textproto.MIMEHeader) error { + if fn := got1xxFuncForTests; fn != nil { + return fn + } + return traceGot1xxResponseFunc(cs.trace) +} + // awaitRequestCancel waits for the user to cancel a request, its context to // expire, or for the request to be done (any way it might be removed from the // cc.streams map: peer reset, successful completion, TCP connection breakage, @@ -273,6 +288,13 @@ func (cs *clientStream) checkResetOrDone() error { } } +func (cs *clientStream) getStartedWrite() bool { + cc := cs.cc + cc.mu.Lock() + defer cc.mu.Unlock() + return cs.startedWrite +} + func (cs *clientStream) abortRequestBodyWrite(err error) { if err == nil { panic("nil error") @@ -298,7 +320,26 @@ func (sew stickyErrWriter) Write(p []byte) (n int, err error) { return } -var ErrNoCachedConn = errors.New("http2: no cached connection was available") +// noCachedConnError is the concrete type of ErrNoCachedConn, which +// needs to be detected by net/http regardless of whether it's its +// bundled version (in h2_bundle.go with a rewritten type name) or +// from a user's x/net/http2. As such, as it has a unique method name +// (IsHTTP2NoCachedConnError) that net/http sniffs for via func +// isNoCachedConnError. +type noCachedConnError struct{} + +func (noCachedConnError) IsHTTP2NoCachedConnError() {} +func (noCachedConnError) Error() string { return "http2: no cached connection was available" } + +// isNoCachedConnError reports whether err is of type noCachedConnError +// or its equivalent renamed type in net/http2's h2_bundle.go. Both types +// may coexist in the same running program. +func isNoCachedConnError(err error) bool { + _, ok := err.(interface{ IsHTTP2NoCachedConnError() }) + return ok +} + +var ErrNoCachedConn error = noCachedConnError{} // RoundTripOpt are options for the Transport.RoundTripOpt method. type RoundTripOpt struct { @@ -348,14 +389,9 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res return nil, err } traceGotConn(req, cc) - res, err := cc.RoundTrip(req) + res, gotErrAfterReqBodyWrite, err := cc.roundTrip(req) if err != nil && retry <= 6 { - afterBodyWrite := false - if e, ok := err.(afterReqBodyWriteError); ok { - err = e - afterBodyWrite = true - } - if req, err = shouldRetryRequest(req, err, afterBodyWrite); err == nil { + if req, err = shouldRetryRequest(req, err, gotErrAfterReqBodyWrite); err == nil { // After the first retry, do exponential backoff with 10% jitter. if retry == 0 { continue @@ -393,16 +429,6 @@ var ( errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY") ) -// afterReqBodyWriteError is a wrapper around errors returned by ClientConn.RoundTrip. -// It is used to signal that err happened after part of Request.Body was sent to the server. -type afterReqBodyWriteError struct { - err error -} - -func (e afterReqBodyWriteError) Error() string { - return e.err.Error() + "; some request body already written" -} - // shouldRetryRequest is called by RoundTrip when a request fails to get // response headers. It is always called with a non-nil error. // It returns either a request to retry (either the same request, or a @@ -411,27 +437,36 @@ func shouldRetryRequest(req *http.Request, err error, afterBodyWrite bool) (*htt if !canRetryError(err) { return nil, err } - if !afterBodyWrite { - return req, nil - } // If the Body is nil (or http.NoBody), it's safe to reuse // this request and its Body. if req.Body == nil || reqBodyIsNoBody(req.Body) { return req, nil } - // Otherwise we depend on the Request having its GetBody - // func defined. + + // If the request body can be reset back to its original + // state via the optional req.GetBody, do that. getBody := reqGetBody(req) // Go 1.8: getBody = req.GetBody - if getBody == nil { - return nil, fmt.Errorf("http2: Transport: cannot retry err [%v] after Request.Body was written; define Request.GetBody to avoid this error", err) + if getBody != nil { + // TODO: consider a req.Body.Close here? or audit that all caller paths do? + body, err := getBody() + if err != nil { + return nil, err + } + newReq := *req + newReq.Body = body + return &newReq, nil } - body, err := getBody() - if err != nil { - return nil, err + + // The Request.Body can't reset back to the beginning, but we + // don't seem to have started to read from it yet, so reuse + // the request directly. The "afterBodyWrite" means the + // bodyWrite process has started, which becomes true before + // the first Read. + if !afterBodyWrite { + return req, nil } - newReq := *req - newReq.Body = body - return &newReq, nil + + return nil, fmt.Errorf("http2: Transport: cannot retry err [%v] after Request.Body was written; define Request.GetBody to avoid this error", err) } func canRetryError(err error) bool { @@ -519,17 +554,18 @@ func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) { cc := &ClientConn{ - t: t, - tconn: c, - readerDone: make(chan struct{}), - nextStreamID: 1, - maxFrameSize: 16 << 10, // spec default - initialWindowSize: 65535, // spec default - maxConcurrentStreams: 1000, // "infinite", per spec. 1000 seems good enough. - streams: make(map[uint32]*clientStream), - singleUse: singleUse, - wantSettingsAck: true, - pings: make(map[[8]byte]chan struct{}), + t: t, + tconn: c, + readerDone: make(chan struct{}), + nextStreamID: 1, + maxFrameSize: 16 << 10, // spec default + initialWindowSize: 65535, // spec default + maxConcurrentStreams: 1000, // "infinite", per spec. 1000 seems good enough. + peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. + streams: make(map[uint32]*clientStream), + singleUse: singleUse, + wantSettingsAck: true, + pings: make(map[[8]byte]chan struct{}), } if d := t.idleConnTimeout(); d != 0 { cc.idleTimeout = d @@ -554,6 +590,10 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro // henc in response to SETTINGS frames? cc.henc = hpack.NewEncoder(&cc.hbuf) + if t.AllowHTTP { + cc.nextStreamID = 3 + } + if cs, ok := c.(connectionStater); ok { state := cs.ConnectionState() cc.tlsState = &state @@ -613,12 +653,32 @@ func (cc *ClientConn) CanTakeNewRequest() bool { return cc.canTakeNewRequestLocked() } -func (cc *ClientConn) canTakeNewRequestLocked() bool { +// clientConnIdleState describes the suitability of a client +// connection to initiate a new RoundTrip request. +type clientConnIdleState struct { + canTakeNewRequest bool + freshConn bool // whether it's unused by any previous request +} + +func (cc *ClientConn) idleState() clientConnIdleState { + cc.mu.Lock() + defer cc.mu.Unlock() + return cc.idleStateLocked() +} + +func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) { if cc.singleUse && cc.nextStreamID > 1 { - return false + return } - return cc.goAway == nil && !cc.closed && + st.canTakeNewRequest = cc.goAway == nil && !cc.closed && !cc.closing && int64(cc.nextStreamID)+int64(cc.pendingRequests) < math.MaxInt32 + st.freshConn = cc.nextStreamID == 1 && st.canTakeNewRequest + return +} + +func (cc *ClientConn) canTakeNewRequestLocked() bool { + st := cc.idleStateLocked() + return st.canTakeNewRequest } // onIdleTimeout is called from a time.AfterFunc goroutine. It will @@ -648,6 +708,88 @@ func (cc *ClientConn) closeIfIdle() { cc.tconn.Close() } +var shutdownEnterWaitStateHook = func() {} + +// Shutdown gracefully close the client connection, waiting for running streams to complete. +// Public implementation is in go17.go and not_go17.go +func (cc *ClientConn) shutdown(ctx contextContext) error { + if err := cc.sendGoAway(); err != nil { + return err + } + // Wait for all in-flight streams to complete or connection to close + done := make(chan error, 1) + cancelled := false // guarded by cc.mu + go func() { + cc.mu.Lock() + defer cc.mu.Unlock() + for { + if len(cc.streams) == 0 || cc.closed { + cc.closed = true + done <- cc.tconn.Close() + break + } + if cancelled { + break + } + cc.cond.Wait() + } + }() + shutdownEnterWaitStateHook() + select { + case err := <-done: + return err + case <-ctx.Done(): + cc.mu.Lock() + // Free the goroutine above + cancelled = true + cc.cond.Broadcast() + cc.mu.Unlock() + return ctx.Err() + } +} + +func (cc *ClientConn) sendGoAway() error { + cc.mu.Lock() + defer cc.mu.Unlock() + cc.wmu.Lock() + defer cc.wmu.Unlock() + if cc.closing { + // GOAWAY sent already + return nil + } + // Send a graceful shutdown frame to server + maxStreamID := cc.nextStreamID + if err := cc.fr.WriteGoAway(maxStreamID, ErrCodeNo, nil); err != nil { + return err + } + if err := cc.bw.Flush(); err != nil { + return err + } + // Prevent new requests + cc.closing = true + return nil +} + +// Close closes the client connection immediately. +// +// In-flight requests are interrupted. For a graceful shutdown, use Shutdown instead. +func (cc *ClientConn) Close() error { + cc.mu.Lock() + defer cc.cond.Broadcast() + defer cc.mu.Unlock() + err := errors.New("http2: client connection force closed via ClientConn.Close") + for id, cs := range cc.streams { + select { + case cs.resc <- resAndError{err: err}: + default: + } + cs.bufPipe.CloseWithError(err) + delete(cc.streams, id) + } + cc.closed = true + return cc.tconn.Close() +} + const maxAllocFrameSize = 512 << 10 // frameBuffer returns a scratch buffer suitable for writing DATA frames. @@ -730,7 +872,7 @@ func checkConnHeaders(req *http.Request) error { if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") { return fmt.Errorf("http2: invalid Transfer-Encoding request header: %q", vv) } - if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "close" && vv[0] != "keep-alive") { + if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && !strings.EqualFold(vv[0], "close") && !strings.EqualFold(vv[0], "keep-alive")) { return fmt.Errorf("http2: invalid Connection request header: %q", vv) } return nil @@ -750,8 +892,13 @@ func actualContentLength(req *http.Request) int64 { } func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { + resp, _, err := cc.roundTrip(req) + return resp, err +} + +func (cc *ClientConn) roundTrip(req *http.Request) (res *http.Response, gotErrAfterReqBodyWrite bool, err error) { if err := checkConnHeaders(req); err != nil { - return nil, err + return nil, false, err } if cc.idleTimer != nil { cc.idleTimer.Stop() @@ -759,14 +906,14 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { trailers, err := commaSeparatedTrailers(req) if err != nil { - return nil, err + return nil, false, err } hasTrailers := trailers != "" cc.mu.Lock() if err := cc.awaitOpenSlotForRequest(req); err != nil { cc.mu.Unlock() - return nil, err + return nil, false, err } body := req.Body @@ -800,7 +947,7 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { hdrs, err := cc.encodeHeaders(req, requestedGzip, trailers, contentLen) if err != nil { cc.mu.Unlock() - return nil, err + return nil, false, err } cs := cc.newStream() @@ -812,7 +959,7 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { cc.wmu.Lock() endStream := !hasBody && !hasTrailers - werr := cc.writeHeaders(cs.ID, endStream, hdrs) + werr := cc.writeHeaders(cs.ID, endStream, int(cc.maxFrameSize), hdrs) cc.wmu.Unlock() traceWroteHeaders(cs.trace) cc.mu.Unlock() @@ -826,7 +973,7 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { // Don't bother sending a RST_STREAM (our write already failed; // no need to keep writing) traceWroteRequest(cs.trace, werr) - return nil, werr + return nil, false, werr } var respHeaderTimer <-chan time.Time @@ -845,7 +992,7 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { bodyWritten := false ctx := reqContext(req) - handleReadLoopResponse := func(re resAndError) (*http.Response, error) { + handleReadLoopResponse := func(re resAndError) (*http.Response, bool, error) { res := re.res if re.err != nil || res.StatusCode > 299 { // On error or status code 3xx, 4xx, 5xx, etc abort any @@ -861,18 +1008,12 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { cs.abortRequestBodyWrite(errStopReqBodyWrite) } if re.err != nil { - cc.mu.Lock() - afterBodyWrite := cs.startedWrite - cc.mu.Unlock() cc.forgetStreamID(cs.ID) - if afterBodyWrite { - return nil, afterReqBodyWriteError{re.err} - } - return nil, re.err + return nil, cs.getStartedWrite(), re.err } res.Request = req res.TLS = cc.tlsState - return res, nil + return res, false, nil } for { @@ -887,7 +1028,7 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) } cc.forgetStreamID(cs.ID) - return nil, errTimeout + return nil, cs.getStartedWrite(), errTimeout case <-ctx.Done(): if !hasBody || bodyWritten { cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) @@ -896,7 +1037,7 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) } cc.forgetStreamID(cs.ID) - return nil, ctx.Err() + return nil, cs.getStartedWrite(), ctx.Err() case <-req.Cancel: if !hasBody || bodyWritten { cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) @@ -905,12 +1046,12 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) } cc.forgetStreamID(cs.ID) - return nil, errRequestCanceled + return nil, cs.getStartedWrite(), errRequestCanceled case <-cs.peerReset: // processResetStream already removed the // stream from the streams map; no need for // forgetStreamID. - return nil, cs.resetErr + return nil, cs.getStartedWrite(), cs.resetErr case err := <-bodyWriter.resc: // Prefer the read loop's response, if available. Issue 16102. select { @@ -919,7 +1060,7 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { default: } if err != nil { - return nil, err + return nil, cs.getStartedWrite(), err } bodyWritten = true if d := cc.responseHeaderTimeout(); d != 0 { @@ -939,6 +1080,9 @@ func (cc *ClientConn) awaitOpenSlotForRequest(req *http.Request) error { for { cc.lastActive = time.Now() if cc.closed || !cc.canTakeNewRequestLocked() { + if waitingForConn != nil { + close(waitingForConn) + } return errClientConnUnusable } if int64(len(cc.streams))+1 <= int64(cc.maxConcurrentStreams) { @@ -971,13 +1115,12 @@ func (cc *ClientConn) awaitOpenSlotForRequest(req *http.Request) error { } // requires cc.wmu be held -func (cc *ClientConn) writeHeaders(streamID uint32, endStream bool, hdrs []byte) error { +func (cc *ClientConn) writeHeaders(streamID uint32, endStream bool, maxFrameSize int, hdrs []byte) error { first := true // first frame written (HEADERS is first, then CONTINUATION) - frameSize := int(cc.maxFrameSize) for len(hdrs) > 0 && cc.werr == nil { chunk := hdrs - if len(chunk) > frameSize { - chunk = chunk[:frameSize] + if len(chunk) > maxFrameSize { + chunk = chunk[:maxFrameSize] } hdrs = hdrs[len(chunk):] endHeaders := len(hdrs) == 0 @@ -1085,17 +1228,26 @@ func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) ( var trls []byte if hasTrailers { cc.mu.Lock() - defer cc.mu.Unlock() - trls = cc.encodeTrailers(req) + trls, err = cc.encodeTrailers(req) + cc.mu.Unlock() + if err != nil { + cc.writeStreamReset(cs.ID, ErrCodeInternal, err) + cc.forgetStreamID(cs.ID) + return err + } } + cc.mu.Lock() + maxFrameSize := int(cc.maxFrameSize) + cc.mu.Unlock() + cc.wmu.Lock() defer cc.wmu.Unlock() // Two ways to send END_STREAM: either with trailers, or // with an empty DATA frame. if len(trls) > 0 { - err = cc.writeHeaders(cs.ID, true, trls) + err = cc.writeHeaders(cs.ID, true, maxFrameSize, trls) } else { err = cc.fr.WriteData(cs.ID, true, nil) } @@ -1154,7 +1306,7 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail if host == "" { host = req.URL.Host } - host, err := httplex.PunycodeHostPort(host) + host, err := httpguts.PunycodeHostPort(host) if err != nil { return nil, err } @@ -1179,72 +1331,103 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail // potentially pollute our hpack state. (We want to be able to // continue to reuse the hpack encoder for future requests) for k, vv := range req.Header { - if !httplex.ValidHeaderFieldName(k) { + if !httpguts.ValidHeaderFieldName(k) { return nil, fmt.Errorf("invalid HTTP header name %q", k) } for _, v := range vv { - if !httplex.ValidHeaderFieldValue(v) { + if !httpguts.ValidHeaderFieldValue(v) { return nil, fmt.Errorf("invalid HTTP header value %q for header %q", v, k) } } } - // 8.1.2.3 Request Pseudo-Header Fields - // The :path pseudo-header field includes the path and query parts of the - // target URI (the path-absolute production and optionally a '?' character - // followed by the query production (see Sections 3.3 and 3.4 of - // [RFC3986]). - cc.writeHeader(":authority", host) - cc.writeHeader(":method", req.Method) - if req.Method != "CONNECT" { - cc.writeHeader(":path", path) - cc.writeHeader(":scheme", req.URL.Scheme) - } - if trailers != "" { - cc.writeHeader("trailer", trailers) + enumerateHeaders := func(f func(name, value string)) { + // 8.1.2.3 Request Pseudo-Header Fields + // The :path pseudo-header field includes the path and query parts of the + // target URI (the path-absolute production and optionally a '?' character + // followed by the query production (see Sections 3.3 and 3.4 of + // [RFC3986]). + f(":authority", host) + f(":method", req.Method) + if req.Method != "CONNECT" { + f(":path", path) + f(":scheme", req.URL.Scheme) + } + if trailers != "" { + f("trailer", trailers) + } + + var didUA bool + for k, vv := range req.Header { + if strings.EqualFold(k, "host") || strings.EqualFold(k, "content-length") { + // Host is :authority, already sent. + // Content-Length is automatic, set below. + continue + } else if strings.EqualFold(k, "connection") || strings.EqualFold(k, "proxy-connection") || + strings.EqualFold(k, "transfer-encoding") || strings.EqualFold(k, "upgrade") || + strings.EqualFold(k, "keep-alive") { + // Per 8.1.2.2 Connection-Specific Header + // Fields, don't send connection-specific + // fields. We have already checked if any + // are error-worthy so just ignore the rest. + continue + } else if strings.EqualFold(k, "user-agent") { + // Match Go's http1 behavior: at most one + // User-Agent. If set to nil or empty string, + // then omit it. Otherwise if not mentioned, + // include the default (below). + didUA = true + if len(vv) < 1 { + continue + } + vv = vv[:1] + if vv[0] == "" { + continue + } + + } + + for _, v := range vv { + f(k, v) + } + } + if shouldSendReqContentLength(req.Method, contentLength) { + f("content-length", strconv.FormatInt(contentLength, 10)) + } + if addGzipHeader { + f("accept-encoding", "gzip") + } + if !didUA { + f("user-agent", defaultUserAgent) + } } - var didUA bool - for k, vv := range req.Header { - lowKey := strings.ToLower(k) - switch lowKey { - case "host", "content-length": - // Host is :authority, already sent. - // Content-Length is automatic, set below. - continue - case "connection", "proxy-connection", "transfer-encoding", "upgrade", "keep-alive": - // Per 8.1.2.2 Connection-Specific Header - // Fields, don't send connection-specific - // fields. We have already checked if any - // are error-worthy so just ignore the rest. - continue - case "user-agent": - // Match Go's http1 behavior: at most one - // User-Agent. If set to nil or empty string, - // then omit it. Otherwise if not mentioned, - // include the default (below). - didUA = true - if len(vv) < 1 { - continue - } - vv = vv[:1] - if vv[0] == "" { - continue - } + // Do a first pass over the headers counting bytes to ensure + // we don't exceed cc.peerMaxHeaderListSize. This is done as a + // separate pass before encoding the headers to prevent + // modifying the hpack state. + hlSize := uint64(0) + enumerateHeaders(func(name, value string) { + hf := hpack.HeaderField{Name: name, Value: value} + hlSize += uint64(hf.Size()) + }) + + if hlSize > cc.peerMaxHeaderListSize { + return nil, errRequestHeaderListSize + } + + trace := requestTrace(req) + traceHeaders := traceHasWroteHeaderField(trace) + + // Header list size is ok. Write the headers. + enumerateHeaders(func(name, value string) { + name = strings.ToLower(name) + cc.writeHeader(name, value) + if traceHeaders { + traceWroteHeaderField(trace, name, value) } - for _, v := range vv { - cc.writeHeader(lowKey, v) - } - } - if shouldSendReqContentLength(req.Method, contentLength) { - cc.writeHeader("content-length", strconv.FormatInt(contentLength, 10)) - } - if addGzipHeader { - cc.writeHeader("accept-encoding", "gzip") - } - if !didUA { - cc.writeHeader("user-agent", defaultUserAgent) - } + }) + return cc.hbuf.Bytes(), nil } @@ -1271,17 +1454,29 @@ func shouldSendReqContentLength(method string, contentLength int64) bool { } // requires cc.mu be held. -func (cc *ClientConn) encodeTrailers(req *http.Request) []byte { +func (cc *ClientConn) encodeTrailers(req *http.Request) ([]byte, error) { cc.hbuf.Reset() + + hlSize := uint64(0) for k, vv := range req.Trailer { - // Transfer-Encoding, etc.. have already been filter at the + for _, v := range vv { + hf := hpack.HeaderField{Name: k, Value: v} + hlSize += uint64(hf.Size()) + } + } + if hlSize > cc.peerMaxHeaderListSize { + return nil, errRequestHeaderListSize + } + + for k, vv := range req.Trailer { + // Transfer-Encoding, etc.. have already been filtered at the // start of RoundTrip lowKey := strings.ToLower(k) for _, v := range vv { cc.writeHeader(lowKey, v) } } - return cc.hbuf.Bytes() + return cc.hbuf.Bytes(), nil } func (cc *ClientConn) writeHeader(name, value string) { @@ -1339,17 +1534,12 @@ func (cc *ClientConn) streamByID(id uint32, andRemove bool) *clientStream { // clientConnReadLoop is the state owned by the clientConn's frame-reading readLoop. type clientConnReadLoop struct { cc *ClientConn - activeRes map[uint32]*clientStream // keyed by streamID closeWhenIdle bool } // readLoop runs in its own goroutine and reads and dispatches frames. func (cc *ClientConn) readLoop() { - rl := &clientConnReadLoop{ - cc: cc, - activeRes: make(map[uint32]*clientStream), - } - + rl := &clientConnReadLoop{cc: cc} defer rl.cleanup() cc.readerErr = rl.run() if ce, ok := cc.readerErr.(ConnectionError); ok { @@ -1404,10 +1594,8 @@ func (rl *clientConnReadLoop) cleanup() { } else if err == io.EOF { err = io.ErrUnexpectedEOF } - for _, cs := range rl.activeRes { - cs.bufPipe.CloseWithError(err) - } for _, cs := range cc.streams { + cs.bufPipe.CloseWithError(err) // no-op if already closed select { case cs.resc <- resAndError{err: err}: default: @@ -1485,7 +1673,7 @@ func (rl *clientConnReadLoop) run() error { } return err } - if rl.closeWhenIdle && gotReply && maybeIdle && len(rl.activeRes) == 0 { + if rl.closeWhenIdle && gotReply && maybeIdle { cc.closeIfIdle() } } @@ -1493,13 +1681,31 @@ func (rl *clientConnReadLoop) run() error { func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error { cc := rl.cc - cs := cc.streamByID(f.StreamID, f.StreamEnded()) + cs := cc.streamByID(f.StreamID, false) if cs == nil { // We'd get here if we canceled a request while the // server had its response still in flight. So if this // was just something we canceled, ignore it. return nil } + if f.StreamEnded() { + // Issue 20521: If the stream has ended, streamByID() causes + // clientStream.done to be closed, which causes the request's bodyWriter + // to be closed with an errStreamClosed, which may be received by + // clientConn.RoundTrip before the result of processing these headers. + // Deferring stream closure allows the header processing to occur first. + // clientConn.RoundTrip may still receive the bodyWriter error first, but + // the fix for issue 16102 prioritises any response. + // + // Issue 22413: If there is no request body, we should close the + // stream before writing to cs.resc so that the stream is closed + // immediately once RoundTrip returns. + if cs.req.Body != nil { + defer cc.forgetStreamID(f.StreamID) + } else { + cc.forgetStreamID(f.StreamID) + } + } if !cs.firstByte { if cs.trace != nil { // TODO(bradfitz): move first response byte earlier, @@ -1523,6 +1729,7 @@ func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error { } // Any other error type is a stream error. cs.cc.writeStreamReset(f.StreamID, ErrCodeProtocol, err) + cc.forgetStreamID(cs.ID) cs.resc <- resAndError{err: err} return nil // return nil from process* funcs to keep conn alive } @@ -1530,9 +1737,6 @@ func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error { // (nil, nil) special case. See handleResponse docs. return nil } - if res.Body != noBody { - rl.activeRes[cs.ID] = cs - } cs.resTrailer = &res.Trailer cs.resc <- resAndError{res: res} return nil @@ -1543,8 +1747,7 @@ func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error { // is the detail. // // As a special case, handleResponse may return (nil, nil) to skip the -// frame (currently only used for 100 expect continue). This special -// case is going away after Issue 13851 is fixed. +// frame (currently only used for 1xx responses). func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFrame) (*http.Response, error) { if f.Truncated { return nil, errResponseHeaderListSize @@ -1552,20 +1755,11 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra status := f.PseudoValue("status") if status == "" { - return nil, errors.New("missing status pseudo header") + return nil, errors.New("malformed response from server: missing status pseudo header") } statusCode, err := strconv.Atoi(status) if err != nil { - return nil, errors.New("malformed non-numeric status pseudo header") - } - - if statusCode == 100 { - traceGot100Continue(cs.trace) - if cs.on100 != nil { - cs.on100() // forces any write delay timer to fire - } - cs.pastHeaders = false // do it all again - return nil, nil + return nil, errors.New("malformed response from server: malformed non-numeric status pseudo header") } header := make(http.Header) @@ -1592,6 +1786,27 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra } } + if statusCode >= 100 && statusCode <= 199 { + cs.num1xx++ + const max1xxResponses = 5 // arbitrary bound on number of informational responses, same as net/http + if cs.num1xx > max1xxResponses { + return nil, errors.New("http2: too many 1xx informational responses") + } + if fn := cs.get1xxTraceFunc(); fn != nil { + if err := fn(statusCode, textproto.MIMEHeader(header)); err != nil { + return nil, err + } + } + if statusCode == 100 { + traceGot100Continue(cs.trace) + if cs.on100 != nil { + cs.on100() // forces any write delay timer to fire + } + } + cs.pastHeaders = false // do it all again + return nil, nil + } + streamEnded := f.StreamEnded() isHead := cs.req.Method == "HEAD" if !streamEnded || isHead { @@ -1789,7 +2004,23 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error { } return nil } + if !cs.firstByte { + cc.logf("protocol error: received DATA before a HEADERS frame") + rl.endStreamError(cs, StreamError{ + StreamID: f.StreamID, + Code: ErrCodeProtocol, + }) + return nil + } if f.Length > 0 { + if cs.req.Method == "HEAD" && len(data) > 0 { + cc.logf("protocol error: received DATA on a HEAD request") + rl.endStreamError(cs, StreamError{ + StreamID: f.StreamID, + Code: ErrCodeProtocol, + }) + return nil + } // Check connection-level flow control. cc.mu.Lock() if cs.inflow.available() >= int32(f.Length) { @@ -1851,11 +2082,10 @@ func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) { err = io.EOF code = cs.copyTrailers } - cs.bufPipe.closeWithErrorAndCode(err, code) - delete(rl.activeRes, cs.ID) if isConnectionCloseRequest(cs.req) { rl.closeWhenIdle = true } + cs.bufPipe.closeWithErrorAndCode(err, code) select { case cs.resc <- resAndError{err: err}: @@ -1903,6 +2133,8 @@ func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error { cc.maxFrameSize = s.Val case SettingMaxConcurrentStreams: cc.maxConcurrentStreams = s.Val + case SettingMaxHeaderListSize: + cc.peerMaxHeaderListSize = uint64(s.Val) case SettingInitialWindowSize: // Values above the maximum flow-control // window size of 2^31-1 MUST be treated as a @@ -1980,7 +2212,6 @@ func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error { cs.bufPipe.CloseWithError(err) cs.cc.cond.Broadcast() // wake up checkResetOrDone via clientStream.awaitFlowControl } - delete(rl.activeRes, cs.ID) return nil } @@ -2069,6 +2300,7 @@ func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, err error) var ( errResponseHeaderListSize = errors.New("http2: response header list larger than advertised limit") + errRequestHeaderListSize = errors.New("http2: request header list larger than peer's advertised limit") errPseudoTrailers = errors.New("http2: invalid pseudo header in trailers") ) @@ -2162,7 +2394,7 @@ func (t *Transport) getBodyWriterState(cs *clientStream, body io.Reader) (s body } s.delay = t.expectContinueTimeout() if s.delay == 0 || - !httplex.HeaderValuesContainsToken( + !httpguts.HeaderValuesContainsToken( cs.req.Header["Expect"], "100-continue") { return @@ -2217,5 +2449,5 @@ func (s bodyWriterState) scheduleBodyWrite() { // isConnectionCloseRequest reports whether req should use its own // connection for a single request and then close the connection. func isConnectionCloseRequest(req *http.Request) bool { - return req.Close || httplex.HeaderValuesContainsToken(req.Header["Connection"], "close") + return req.Close || httpguts.HeaderValuesContainsToken(req.Header["Connection"], "close") } diff --git a/vendor/golang.org/x/net/http2/write.go b/vendor/golang.org/x/net/http2/write.go index 6b0dfae31..fae5c8adc 100644 --- a/vendor/golang.org/x/net/http2/write.go +++ b/vendor/golang.org/x/net/http2/write.go @@ -10,10 +10,9 @@ import ( "log" "net/http" "net/url" - "time" + "golang.org/x/net/http/httpguts" "golang.org/x/net/http2/hpack" - "golang.org/x/net/lex/httplex" ) // writeFramer is implemented by any type that is used to write frames. @@ -90,11 +89,7 @@ type writeGoAway struct { func (p *writeGoAway) writeFrame(ctx writeContext) error { err := ctx.Framer().WriteGoAway(p.maxStreamID, p.code, nil) - if p.code != 0 { - ctx.Flush() // ignore error: we're hanging up on them anyway - time.Sleep(50 * time.Millisecond) - ctx.CloseConn() - } + ctx.Flush() // ignore error: we're hanging up on them anyway return err } @@ -204,7 +199,7 @@ func (w *writeResHeaders) staysWithinBuffer(max int) bool { // TODO: this is a common one. It'd be nice to return true // here and get into the fast path if we could be clever and // calculate the size fast enough, or at least a conservative - // uppper bound that usually fires. (Maybe if w.h and + // upper bound that usually fires. (Maybe if w.h and // w.trailers are nil, so we don't need to enumerate it.) // Otherwise I'm afraid that just calculating the length to // answer this question would be slower than the ~2µs benefit. @@ -355,7 +350,7 @@ func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) { } isTE := k == "transfer-encoding" for _, v := range vv { - if !httplex.ValidHeaderFieldValue(v) { + if !httpguts.ValidHeaderFieldValue(v) { // TODO: return an error? golang.org/issue/14048 // For now just omit it. continue diff --git a/vendor/golang.org/x/net/idna/idna.go b/vendor/golang.org/x/net/idna/idna.go index eb2473507..346fe4423 100644 --- a/vendor/golang.org/x/net/idna/idna.go +++ b/vendor/golang.org/x/net/idna/idna.go @@ -21,6 +21,7 @@ import ( "unicode/utf8" "golang.org/x/text/secure/bidirule" + "golang.org/x/text/unicode/bidi" "golang.org/x/text/unicode/norm" ) @@ -68,7 +69,7 @@ func VerifyDNSLength(verify bool) Option { } // RemoveLeadingDots removes leading label separators. Leading runes that map to -// dots, such as U+3002, are removed as well. +// dots, such as U+3002 IDEOGRAPHIC FULL STOP, are removed as well. // // This is the behavior suggested by the UTS #46 and is adopted by some // browsers. @@ -92,7 +93,7 @@ func ValidateLabels(enable bool) Option { } } -// StrictDomainName limits the set of permissable ASCII characters to those +// StrictDomainName limits the set of permissible ASCII characters to those // allowed in domain names as defined in RFC 1034 (A-Z, a-z, 0-9 and the // hyphen). This is set by default for MapForLookup and ValidateForRegistration. // @@ -142,7 +143,6 @@ func MapForLookup() Option { o.mapping = validateAndMap StrictDomainName(true)(o) ValidateLabels(true)(o) - RemoveLeadingDots(true)(o) } } @@ -160,14 +160,14 @@ type options struct { // mapping implements a validation and mapping step as defined in RFC 5895 // or UTS 46, tailored to, for example, domain registration or lookup. - mapping func(p *Profile, s string) (string, error) + mapping func(p *Profile, s string) (mapped string, isBidi bool, err error) // bidirule, if specified, checks whether s conforms to the Bidi Rule // defined in RFC 5893. bidirule func(s string) bool } -// A Profile defines the configuration of a IDNA mapper. +// A Profile defines the configuration of an IDNA mapper. type Profile struct { options } @@ -251,23 +251,21 @@ var ( punycode = &Profile{} lookup = &Profile{options{ - transitional: true, - useSTD3Rules: true, - validateLabels: true, - removeLeadingDots: true, - trie: trie, - fromPuny: validateFromPunycode, - mapping: validateAndMap, - bidirule: bidirule.ValidString, + transitional: true, + useSTD3Rules: true, + validateLabels: true, + trie: trie, + fromPuny: validateFromPunycode, + mapping: validateAndMap, + bidirule: bidirule.ValidString, }} display = &Profile{options{ - useSTD3Rules: true, - validateLabels: true, - removeLeadingDots: true, - trie: trie, - fromPuny: validateFromPunycode, - mapping: validateAndMap, - bidirule: bidirule.ValidString, + useSTD3Rules: true, + validateLabels: true, + trie: trie, + fromPuny: validateFromPunycode, + mapping: validateAndMap, + bidirule: bidirule.ValidString, }} registration = &Profile{options{ useSTD3Rules: true, @@ -302,14 +300,16 @@ func (e runeError) Error() string { // see http://www.unicode.org/reports/tr46. func (p *Profile) process(s string, toASCII bool) (string, error) { var err error + var isBidi bool if p.mapping != nil { - s, err = p.mapping(p, s) + s, isBidi, err = p.mapping(p, s) } // Remove leading empty labels. if p.removeLeadingDots { for ; len(s) > 0 && s[0] == '.'; s = s[1:] { } } + // TODO: allow for a quick check of the tables data. // It seems like we should only create this error on ToASCII, but the // UTS 46 conformance tests suggests we should always check this. if err == nil && p.verifyDNSLength && s == "" { @@ -335,6 +335,7 @@ func (p *Profile) process(s string, toASCII bool) (string, error) { // Spec says keep the old label. continue } + isBidi = isBidi || bidirule.DirectionString(u) != bidi.LeftToRight labels.set(u) if err == nil && p.validateLabels { err = p.fromPuny(p, u) @@ -349,6 +350,14 @@ func (p *Profile) process(s string, toASCII bool) (string, error) { err = p.validateLabel(label) } } + if isBidi && p.bidirule != nil && err == nil { + for labels.reset(); !labels.done(); labels.next() { + if !p.bidirule(labels.label()) { + err = &labelError{s, "B"} + break + } + } + } if toASCII { for labels.reset(); !labels.done(); labels.next() { label := labels.label() @@ -380,16 +389,26 @@ func (p *Profile) process(s string, toASCII bool) (string, error) { return s, err } -func normalize(p *Profile, s string) (string, error) { - return norm.NFC.String(s), nil +func normalize(p *Profile, s string) (mapped string, isBidi bool, err error) { + // TODO: consider first doing a quick check to see if any of these checks + // need to be done. This will make it slower in the general case, but + // faster in the common case. + mapped = norm.NFC.String(s) + isBidi = bidirule.DirectionString(mapped) == bidi.RightToLeft + return mapped, isBidi, nil } -func validateRegistration(p *Profile, s string) (string, error) { +func validateRegistration(p *Profile, s string) (idem string, bidi bool, err error) { + // TODO: filter need for normalization in loop below. if !norm.NFC.IsNormalString(s) { - return s, &labelError{s, "V1"} + return s, false, &labelError{s, "V1"} } for i := 0; i < len(s); { v, sz := trie.lookupString(s[i:]) + if sz == 0 { + return s, bidi, runeError(utf8.RuneError) + } + bidi = bidi || info(v).isBidi(s[i:]) // Copy bytes not copied so far. switch p.simplify(info(v).category()) { // TODO: handle the NV8 defined in the Unicode idna data set to allow @@ -397,21 +416,50 @@ func validateRegistration(p *Profile, s string) (string, error) { case valid, deviation: case disallowed, mapped, unknown, ignored: r, _ := utf8.DecodeRuneInString(s[i:]) - return s, runeError(r) + return s, bidi, runeError(r) } i += sz } - return s, nil + return s, bidi, nil } -func validateAndMap(p *Profile, s string) (string, error) { +func (c info) isBidi(s string) bool { + if !c.isMapped() { + return c&attributesMask == rtl + } + // TODO: also store bidi info for mapped data. This is possible, but a bit + // cumbersome and not for the common case. + p, _ := bidi.LookupString(s) + switch p.Class() { + case bidi.R, bidi.AL, bidi.AN: + return true + } + return false +} + +func validateAndMap(p *Profile, s string) (vm string, bidi bool, err error) { var ( - err error - b []byte - k int + b []byte + k int ) + // combinedInfoBits contains the or-ed bits of all runes. We use this + // to derive the mayNeedNorm bit later. This may trigger normalization + // overeagerly, but it will not do so in the common case. The end result + // is another 10% saving on BenchmarkProfile for the common case. + var combinedInfoBits info for i := 0; i < len(s); { v, sz := trie.lookupString(s[i:]) + if sz == 0 { + b = append(b, s[k:i]...) + b = append(b, "\ufffd"...) + k = len(s) + if err == nil { + err = runeError(utf8.RuneError) + } + break + } + combinedInfoBits |= info(v) + bidi = bidi || info(v).isBidi(s[i:]) start := i i += sz // Copy bytes not copied so far. @@ -438,7 +486,9 @@ func validateAndMap(p *Profile, s string) (string, error) { } if k == 0 { // No changes so far. - s = norm.NFC.String(s) + if combinedInfoBits&mayNeedNorm != 0 { + s = norm.NFC.String(s) + } } else { b = append(b, s[k:]...) if norm.NFC.QuickSpan(b) != len(b) { @@ -447,7 +497,7 @@ func validateAndMap(p *Profile, s string) (string, error) { // TODO: the punycode converters require strings as input. s = string(b) } - return s, err + return s, bidi, err } // A labelIter allows iterating over domain name labels. @@ -542,8 +592,13 @@ func validateFromPunycode(p *Profile, s string) error { if !norm.NFC.IsNormalString(s) { return &labelError{s, "V1"} } + // TODO: detect whether string may have to be normalized in the following + // loop. for i := 0; i < len(s); { v, sz := trie.lookupString(s[i:]) + if sz == 0 { + return runeError(utf8.RuneError) + } if c := p.simplify(info(v).category()); c != valid && c != deviation { return &labelError{s, "V6"} } @@ -616,16 +671,13 @@ var joinStates = [][numJoinTypes]joinState{ // validateLabel validates the criteria from Section 4.1. Item 1, 4, and 6 are // already implicitly satisfied by the overall implementation. -func (p *Profile) validateLabel(s string) error { +func (p *Profile) validateLabel(s string) (err error) { if s == "" { if p.verifyDNSLength { return &labelError{s, "A4"} } return nil } - if p.bidirule != nil && !p.bidirule(s) { - return &labelError{s, "B"} - } if !p.validateLabels { return nil } diff --git a/vendor/golang.org/x/net/idna/tables.go b/vendor/golang.org/x/net/idna/tables.go index d2819345f..f910b2691 100644 --- a/vendor/golang.org/x/net/idna/tables.go +++ b/vendor/golang.org/x/net/idna/tables.go @@ -3,7 +3,7 @@ package idna // UnicodeVersion is the Unicode version from which the tables in this package are derived. -const UnicodeVersion = "9.0.0" +const UnicodeVersion = "10.0.0" var mappings string = "" + // Size: 8176 bytes "\x00\x01 \x03 ̈\x01a\x03 ̄\x012\x013\x03 ́\x03 ̧\x011\x01o\x051⁄4\x051⁄2" + @@ -544,7 +544,7 @@ func (t *idnaTrie) lookupStringUnsafe(s string) uint16 { return 0 } -// idnaTrie. Total size: 28496 bytes (27.83 KiB). Checksum: 43288b883596640e. +// idnaTrie. Total size: 29052 bytes (28.37 KiB). Checksum: ef06e7ecc26f36dd. type idnaTrie struct{} func newIdnaTrie(i int) *idnaTrie { @@ -554,17 +554,17 @@ func newIdnaTrie(i int) *idnaTrie { // lookupValue determines the type of block n and looks up the value for b. func (t *idnaTrie) lookupValue(n uint32, b byte) uint16 { switch { - case n < 123: + case n < 125: return uint16(idnaValues[n<<6+uint32(b)]) default: - n -= 123 + n -= 125 return uint16(idnaSparse.lookup(n, b)) } } -// idnaValues: 125 blocks, 8000 entries, 16000 bytes +// idnaValues: 127 blocks, 8128 entries, 16256 bytes // The third block is the zero block. -var idnaValues = [8000]uint16{ +var idnaValues = [8128]uint16{ // Block 0x0, offset 0x0 0x00: 0x0080, 0x01: 0x0080, 0x02: 0x0080, 0x03: 0x0080, 0x04: 0x0080, 0x05: 0x0080, 0x06: 0x0080, 0x07: 0x0080, 0x08: 0x0080, 0x09: 0x0080, 0x0a: 0x0080, 0x0b: 0x0080, @@ -675,14 +675,14 @@ var idnaValues = [8000]uint16{ 0x276: 0x0018, 0x277: 0x0018, 0x278: 0x0018, 0x279: 0x0018, 0x27a: 0x0018, 0x27b: 0x0018, 0x27c: 0x0018, 0x27d: 0x0018, 0x27e: 0x0018, 0x27f: 0x0018, // Block 0xa, offset 0x280 - 0x280: 0x03dd, 0x281: 0x03dd, 0x282: 0x1308, 0x283: 0x03f5, 0x284: 0x0379, 0x285: 0x040d, - 0x286: 0x1308, 0x287: 0x1308, 0x288: 0x1308, 0x289: 0x1308, 0x28a: 0x1308, 0x28b: 0x1308, - 0x28c: 0x1308, 0x28d: 0x1308, 0x28e: 0x1308, 0x28f: 0x13c0, 0x290: 0x1308, 0x291: 0x1308, - 0x292: 0x1308, 0x293: 0x1308, 0x294: 0x1308, 0x295: 0x1308, 0x296: 0x1308, 0x297: 0x1308, - 0x298: 0x1308, 0x299: 0x1308, 0x29a: 0x1308, 0x29b: 0x1308, 0x29c: 0x1308, 0x29d: 0x1308, - 0x29e: 0x1308, 0x29f: 0x1308, 0x2a0: 0x1308, 0x2a1: 0x1308, 0x2a2: 0x1308, 0x2a3: 0x1308, - 0x2a4: 0x1308, 0x2a5: 0x1308, 0x2a6: 0x1308, 0x2a7: 0x1308, 0x2a8: 0x1308, 0x2a9: 0x1308, - 0x2aa: 0x1308, 0x2ab: 0x1308, 0x2ac: 0x1308, 0x2ad: 0x1308, 0x2ae: 0x1308, 0x2af: 0x1308, + 0x280: 0x03dd, 0x281: 0x03dd, 0x282: 0x3308, 0x283: 0x03f5, 0x284: 0x0379, 0x285: 0x040d, + 0x286: 0x3308, 0x287: 0x3308, 0x288: 0x3308, 0x289: 0x3308, 0x28a: 0x3308, 0x28b: 0x3308, + 0x28c: 0x3308, 0x28d: 0x3308, 0x28e: 0x3308, 0x28f: 0x33c0, 0x290: 0x3308, 0x291: 0x3308, + 0x292: 0x3308, 0x293: 0x3308, 0x294: 0x3308, 0x295: 0x3308, 0x296: 0x3308, 0x297: 0x3308, + 0x298: 0x3308, 0x299: 0x3308, 0x29a: 0x3308, 0x29b: 0x3308, 0x29c: 0x3308, 0x29d: 0x3308, + 0x29e: 0x3308, 0x29f: 0x3308, 0x2a0: 0x3308, 0x2a1: 0x3308, 0x2a2: 0x3308, 0x2a3: 0x3308, + 0x2a4: 0x3308, 0x2a5: 0x3308, 0x2a6: 0x3308, 0x2a7: 0x3308, 0x2a8: 0x3308, 0x2a9: 0x3308, + 0x2aa: 0x3308, 0x2ab: 0x3308, 0x2ac: 0x3308, 0x2ad: 0x3308, 0x2ae: 0x3308, 0x2af: 0x3308, 0x2b0: 0xe00d, 0x2b1: 0x0008, 0x2b2: 0xe00d, 0x2b3: 0x0008, 0x2b4: 0x0425, 0x2b5: 0x0008, 0x2b6: 0xe00d, 0x2b7: 0x0008, 0x2b8: 0x0040, 0x2b9: 0x0040, 0x2ba: 0x03a2, 0x2bb: 0x0008, 0x2bc: 0x0008, 0x2bd: 0x0008, 0x2be: 0x03c2, 0x2bf: 0x043d, @@ -723,8 +723,8 @@ var idnaValues = [8000]uint16{ 0x376: 0xe00d, 0x377: 0x0008, 0x378: 0xe00d, 0x379: 0x0008, 0x37a: 0xe00d, 0x37b: 0x0008, 0x37c: 0xe00d, 0x37d: 0x0008, 0x37e: 0xe00d, 0x37f: 0x0008, // Block 0xe, offset 0x380 - 0x380: 0xe00d, 0x381: 0x0008, 0x382: 0x0018, 0x383: 0x1308, 0x384: 0x1308, 0x385: 0x1308, - 0x386: 0x1308, 0x387: 0x1308, 0x388: 0x1318, 0x389: 0x1318, 0x38a: 0xe00d, 0x38b: 0x0008, + 0x380: 0xe00d, 0x381: 0x0008, 0x382: 0x0018, 0x383: 0x3308, 0x384: 0x3308, 0x385: 0x3308, + 0x386: 0x3308, 0x387: 0x3308, 0x388: 0x3318, 0x389: 0x3318, 0x38a: 0xe00d, 0x38b: 0x0008, 0x38c: 0xe00d, 0x38d: 0x0008, 0x38e: 0xe00d, 0x38f: 0x0008, 0x390: 0xe00d, 0x391: 0x0008, 0x392: 0xe00d, 0x393: 0x0008, 0x394: 0xe00d, 0x395: 0x0008, 0x396: 0xe00d, 0x397: 0x0008, 0x398: 0xe00d, 0x399: 0x0008, 0x39a: 0xe00d, 0x39b: 0x0008, 0x39c: 0xe00d, 0x39d: 0x0008, @@ -759,129 +759,129 @@ var idnaValues = [8000]uint16{ 0x436: 0x03f5, 0x437: 0x03f5, 0x438: 0x03f5, 0x439: 0x03f5, 0x43a: 0x03f5, 0x43b: 0x03f5, 0x43c: 0x03f5, 0x43d: 0x03f5, 0x43e: 0x03f5, 0x43f: 0x03f5, // Block 0x11, offset 0x440 - 0x440: 0x0040, 0x441: 0x0040, 0x442: 0x0040, 0x443: 0x0040, 0x444: 0x0040, 0x445: 0x0040, - 0x446: 0x0018, 0x447: 0x0018, 0x448: 0x0018, 0x449: 0x0018, 0x44a: 0x0018, 0x44b: 0x0018, - 0x44c: 0x0018, 0x44d: 0x0018, 0x44e: 0x0018, 0x44f: 0x0018, 0x450: 0x1308, 0x451: 0x1308, - 0x452: 0x1308, 0x453: 0x1308, 0x454: 0x1308, 0x455: 0x1308, 0x456: 0x1308, 0x457: 0x1308, - 0x458: 0x1308, 0x459: 0x1308, 0x45a: 0x1308, 0x45b: 0x0018, 0x45c: 0x0340, 0x45d: 0x0040, - 0x45e: 0x0018, 0x45f: 0x0018, 0x460: 0x0208, 0x461: 0x0008, 0x462: 0x0408, 0x463: 0x0408, - 0x464: 0x0408, 0x465: 0x0408, 0x466: 0x0208, 0x467: 0x0408, 0x468: 0x0208, 0x469: 0x0408, - 0x46a: 0x0208, 0x46b: 0x0208, 0x46c: 0x0208, 0x46d: 0x0208, 0x46e: 0x0208, 0x46f: 0x0408, - 0x470: 0x0408, 0x471: 0x0408, 0x472: 0x0408, 0x473: 0x0208, 0x474: 0x0208, 0x475: 0x0208, - 0x476: 0x0208, 0x477: 0x0208, 0x478: 0x0208, 0x479: 0x0208, 0x47a: 0x0208, 0x47b: 0x0208, - 0x47c: 0x0208, 0x47d: 0x0208, 0x47e: 0x0208, 0x47f: 0x0208, + 0x440: 0x0840, 0x441: 0x0840, 0x442: 0x0840, 0x443: 0x0840, 0x444: 0x0840, 0x445: 0x0840, + 0x446: 0x0018, 0x447: 0x0018, 0x448: 0x0818, 0x449: 0x0018, 0x44a: 0x0018, 0x44b: 0x0818, + 0x44c: 0x0018, 0x44d: 0x0818, 0x44e: 0x0018, 0x44f: 0x0018, 0x450: 0x3308, 0x451: 0x3308, + 0x452: 0x3308, 0x453: 0x3308, 0x454: 0x3308, 0x455: 0x3308, 0x456: 0x3308, 0x457: 0x3308, + 0x458: 0x3308, 0x459: 0x3308, 0x45a: 0x3308, 0x45b: 0x0818, 0x45c: 0x0b40, 0x45d: 0x0040, + 0x45e: 0x0818, 0x45f: 0x0818, 0x460: 0x0a08, 0x461: 0x0808, 0x462: 0x0c08, 0x463: 0x0c08, + 0x464: 0x0c08, 0x465: 0x0c08, 0x466: 0x0a08, 0x467: 0x0c08, 0x468: 0x0a08, 0x469: 0x0c08, + 0x46a: 0x0a08, 0x46b: 0x0a08, 0x46c: 0x0a08, 0x46d: 0x0a08, 0x46e: 0x0a08, 0x46f: 0x0c08, + 0x470: 0x0c08, 0x471: 0x0c08, 0x472: 0x0c08, 0x473: 0x0a08, 0x474: 0x0a08, 0x475: 0x0a08, + 0x476: 0x0a08, 0x477: 0x0a08, 0x478: 0x0a08, 0x479: 0x0a08, 0x47a: 0x0a08, 0x47b: 0x0a08, + 0x47c: 0x0a08, 0x47d: 0x0a08, 0x47e: 0x0a08, 0x47f: 0x0a08, // Block 0x12, offset 0x480 - 0x480: 0x0408, 0x481: 0x0208, 0x482: 0x0208, 0x483: 0x0408, 0x484: 0x0408, 0x485: 0x0408, - 0x486: 0x0408, 0x487: 0x0408, 0x488: 0x0408, 0x489: 0x0408, 0x48a: 0x0408, 0x48b: 0x0408, - 0x48c: 0x0208, 0x48d: 0x0408, 0x48e: 0x0208, 0x48f: 0x0408, 0x490: 0x0208, 0x491: 0x0208, - 0x492: 0x0408, 0x493: 0x0408, 0x494: 0x0018, 0x495: 0x0408, 0x496: 0x1308, 0x497: 0x1308, - 0x498: 0x1308, 0x499: 0x1308, 0x49a: 0x1308, 0x49b: 0x1308, 0x49c: 0x1308, 0x49d: 0x0040, - 0x49e: 0x0018, 0x49f: 0x1308, 0x4a0: 0x1308, 0x4a1: 0x1308, 0x4a2: 0x1308, 0x4a3: 0x1308, - 0x4a4: 0x1308, 0x4a5: 0x0008, 0x4a6: 0x0008, 0x4a7: 0x1308, 0x4a8: 0x1308, 0x4a9: 0x0018, - 0x4aa: 0x1308, 0x4ab: 0x1308, 0x4ac: 0x1308, 0x4ad: 0x1308, 0x4ae: 0x0408, 0x4af: 0x0408, - 0x4b0: 0x0008, 0x4b1: 0x0008, 0x4b2: 0x0008, 0x4b3: 0x0008, 0x4b4: 0x0008, 0x4b5: 0x0008, - 0x4b6: 0x0008, 0x4b7: 0x0008, 0x4b8: 0x0008, 0x4b9: 0x0008, 0x4ba: 0x0208, 0x4bb: 0x0208, - 0x4bc: 0x0208, 0x4bd: 0x0008, 0x4be: 0x0008, 0x4bf: 0x0208, + 0x480: 0x0818, 0x481: 0x0a08, 0x482: 0x0a08, 0x483: 0x0a08, 0x484: 0x0a08, 0x485: 0x0a08, + 0x486: 0x0a08, 0x487: 0x0a08, 0x488: 0x0c08, 0x489: 0x0a08, 0x48a: 0x0a08, 0x48b: 0x3308, + 0x48c: 0x3308, 0x48d: 0x3308, 0x48e: 0x3308, 0x48f: 0x3308, 0x490: 0x3308, 0x491: 0x3308, + 0x492: 0x3308, 0x493: 0x3308, 0x494: 0x3308, 0x495: 0x3308, 0x496: 0x3308, 0x497: 0x3308, + 0x498: 0x3308, 0x499: 0x3308, 0x49a: 0x3308, 0x49b: 0x3308, 0x49c: 0x3308, 0x49d: 0x3308, + 0x49e: 0x3308, 0x49f: 0x3308, 0x4a0: 0x0808, 0x4a1: 0x0808, 0x4a2: 0x0808, 0x4a3: 0x0808, + 0x4a4: 0x0808, 0x4a5: 0x0808, 0x4a6: 0x0808, 0x4a7: 0x0808, 0x4a8: 0x0808, 0x4a9: 0x0808, + 0x4aa: 0x0018, 0x4ab: 0x0818, 0x4ac: 0x0818, 0x4ad: 0x0818, 0x4ae: 0x0a08, 0x4af: 0x0a08, + 0x4b0: 0x3308, 0x4b1: 0x0c08, 0x4b2: 0x0c08, 0x4b3: 0x0c08, 0x4b4: 0x0808, 0x4b5: 0x0429, + 0x4b6: 0x0451, 0x4b7: 0x0479, 0x4b8: 0x04a1, 0x4b9: 0x0a08, 0x4ba: 0x0a08, 0x4bb: 0x0a08, + 0x4bc: 0x0a08, 0x4bd: 0x0a08, 0x4be: 0x0a08, 0x4bf: 0x0a08, // Block 0x13, offset 0x4c0 - 0x4c0: 0x0018, 0x4c1: 0x0018, 0x4c2: 0x0018, 0x4c3: 0x0018, 0x4c4: 0x0018, 0x4c5: 0x0018, - 0x4c6: 0x0018, 0x4c7: 0x0018, 0x4c8: 0x0018, 0x4c9: 0x0018, 0x4ca: 0x0018, 0x4cb: 0x0018, - 0x4cc: 0x0018, 0x4cd: 0x0018, 0x4ce: 0x0040, 0x4cf: 0x0340, 0x4d0: 0x0408, 0x4d1: 0x1308, - 0x4d2: 0x0208, 0x4d3: 0x0208, 0x4d4: 0x0208, 0x4d5: 0x0408, 0x4d6: 0x0408, 0x4d7: 0x0408, - 0x4d8: 0x0408, 0x4d9: 0x0408, 0x4da: 0x0208, 0x4db: 0x0208, 0x4dc: 0x0208, 0x4dd: 0x0208, - 0x4de: 0x0408, 0x4df: 0x0208, 0x4e0: 0x0208, 0x4e1: 0x0208, 0x4e2: 0x0208, 0x4e3: 0x0208, - 0x4e4: 0x0208, 0x4e5: 0x0208, 0x4e6: 0x0208, 0x4e7: 0x0208, 0x4e8: 0x0408, 0x4e9: 0x0208, - 0x4ea: 0x0408, 0x4eb: 0x0208, 0x4ec: 0x0408, 0x4ed: 0x0208, 0x4ee: 0x0208, 0x4ef: 0x0408, - 0x4f0: 0x1308, 0x4f1: 0x1308, 0x4f2: 0x1308, 0x4f3: 0x1308, 0x4f4: 0x1308, 0x4f5: 0x1308, - 0x4f6: 0x1308, 0x4f7: 0x1308, 0x4f8: 0x1308, 0x4f9: 0x1308, 0x4fa: 0x1308, 0x4fb: 0x1308, - 0x4fc: 0x1308, 0x4fd: 0x1308, 0x4fe: 0x1308, 0x4ff: 0x1308, + 0x4c0: 0x0c08, 0x4c1: 0x0a08, 0x4c2: 0x0a08, 0x4c3: 0x0c08, 0x4c4: 0x0c08, 0x4c5: 0x0c08, + 0x4c6: 0x0c08, 0x4c7: 0x0c08, 0x4c8: 0x0c08, 0x4c9: 0x0c08, 0x4ca: 0x0c08, 0x4cb: 0x0c08, + 0x4cc: 0x0a08, 0x4cd: 0x0c08, 0x4ce: 0x0a08, 0x4cf: 0x0c08, 0x4d0: 0x0a08, 0x4d1: 0x0a08, + 0x4d2: 0x0c08, 0x4d3: 0x0c08, 0x4d4: 0x0818, 0x4d5: 0x0c08, 0x4d6: 0x3308, 0x4d7: 0x3308, + 0x4d8: 0x3308, 0x4d9: 0x3308, 0x4da: 0x3308, 0x4db: 0x3308, 0x4dc: 0x3308, 0x4dd: 0x0840, + 0x4de: 0x0018, 0x4df: 0x3308, 0x4e0: 0x3308, 0x4e1: 0x3308, 0x4e2: 0x3308, 0x4e3: 0x3308, + 0x4e4: 0x3308, 0x4e5: 0x0808, 0x4e6: 0x0808, 0x4e7: 0x3308, 0x4e8: 0x3308, 0x4e9: 0x0018, + 0x4ea: 0x3308, 0x4eb: 0x3308, 0x4ec: 0x3308, 0x4ed: 0x3308, 0x4ee: 0x0c08, 0x4ef: 0x0c08, + 0x4f0: 0x0008, 0x4f1: 0x0008, 0x4f2: 0x0008, 0x4f3: 0x0008, 0x4f4: 0x0008, 0x4f5: 0x0008, + 0x4f6: 0x0008, 0x4f7: 0x0008, 0x4f8: 0x0008, 0x4f9: 0x0008, 0x4fa: 0x0a08, 0x4fb: 0x0a08, + 0x4fc: 0x0a08, 0x4fd: 0x0808, 0x4fe: 0x0808, 0x4ff: 0x0a08, // Block 0x14, offset 0x500 - 0x500: 0x1008, 0x501: 0x1308, 0x502: 0x1308, 0x503: 0x1308, 0x504: 0x1308, 0x505: 0x1308, - 0x506: 0x1308, 0x507: 0x1308, 0x508: 0x1308, 0x509: 0x1008, 0x50a: 0x1008, 0x50b: 0x1008, - 0x50c: 0x1008, 0x50d: 0x1b08, 0x50e: 0x1008, 0x50f: 0x1008, 0x510: 0x0008, 0x511: 0x1308, - 0x512: 0x1308, 0x513: 0x1308, 0x514: 0x1308, 0x515: 0x1308, 0x516: 0x1308, 0x517: 0x1308, - 0x518: 0x04c9, 0x519: 0x0501, 0x51a: 0x0539, 0x51b: 0x0571, 0x51c: 0x05a9, 0x51d: 0x05e1, - 0x51e: 0x0619, 0x51f: 0x0651, 0x520: 0x0008, 0x521: 0x0008, 0x522: 0x1308, 0x523: 0x1308, - 0x524: 0x0018, 0x525: 0x0018, 0x526: 0x0008, 0x527: 0x0008, 0x528: 0x0008, 0x529: 0x0008, - 0x52a: 0x0008, 0x52b: 0x0008, 0x52c: 0x0008, 0x52d: 0x0008, 0x52e: 0x0008, 0x52f: 0x0008, - 0x530: 0x0018, 0x531: 0x0008, 0x532: 0x0008, 0x533: 0x0008, 0x534: 0x0008, 0x535: 0x0008, - 0x536: 0x0008, 0x537: 0x0008, 0x538: 0x0008, 0x539: 0x0008, 0x53a: 0x0008, 0x53b: 0x0008, - 0x53c: 0x0008, 0x53d: 0x0008, 0x53e: 0x0008, 0x53f: 0x0008, + 0x500: 0x0818, 0x501: 0x0818, 0x502: 0x0818, 0x503: 0x0818, 0x504: 0x0818, 0x505: 0x0818, + 0x506: 0x0818, 0x507: 0x0818, 0x508: 0x0818, 0x509: 0x0818, 0x50a: 0x0818, 0x50b: 0x0818, + 0x50c: 0x0818, 0x50d: 0x0818, 0x50e: 0x0040, 0x50f: 0x0b40, 0x510: 0x0c08, 0x511: 0x3308, + 0x512: 0x0a08, 0x513: 0x0a08, 0x514: 0x0a08, 0x515: 0x0c08, 0x516: 0x0c08, 0x517: 0x0c08, + 0x518: 0x0c08, 0x519: 0x0c08, 0x51a: 0x0a08, 0x51b: 0x0a08, 0x51c: 0x0a08, 0x51d: 0x0a08, + 0x51e: 0x0c08, 0x51f: 0x0a08, 0x520: 0x0a08, 0x521: 0x0a08, 0x522: 0x0a08, 0x523: 0x0a08, + 0x524: 0x0a08, 0x525: 0x0a08, 0x526: 0x0a08, 0x527: 0x0a08, 0x528: 0x0c08, 0x529: 0x0a08, + 0x52a: 0x0c08, 0x52b: 0x0a08, 0x52c: 0x0c08, 0x52d: 0x0a08, 0x52e: 0x0a08, 0x52f: 0x0c08, + 0x530: 0x3308, 0x531: 0x3308, 0x532: 0x3308, 0x533: 0x3308, 0x534: 0x3308, 0x535: 0x3308, + 0x536: 0x3308, 0x537: 0x3308, 0x538: 0x3308, 0x539: 0x3308, 0x53a: 0x3308, 0x53b: 0x3308, + 0x53c: 0x3308, 0x53d: 0x3308, 0x53e: 0x3308, 0x53f: 0x3308, // Block 0x15, offset 0x540 - 0x540: 0x0008, 0x541: 0x1308, 0x542: 0x1008, 0x543: 0x1008, 0x544: 0x0040, 0x545: 0x0008, - 0x546: 0x0008, 0x547: 0x0008, 0x548: 0x0008, 0x549: 0x0008, 0x54a: 0x0008, 0x54b: 0x0008, - 0x54c: 0x0008, 0x54d: 0x0040, 0x54e: 0x0040, 0x54f: 0x0008, 0x550: 0x0008, 0x551: 0x0040, - 0x552: 0x0040, 0x553: 0x0008, 0x554: 0x0008, 0x555: 0x0008, 0x556: 0x0008, 0x557: 0x0008, - 0x558: 0x0008, 0x559: 0x0008, 0x55a: 0x0008, 0x55b: 0x0008, 0x55c: 0x0008, 0x55d: 0x0008, - 0x55e: 0x0008, 0x55f: 0x0008, 0x560: 0x0008, 0x561: 0x0008, 0x562: 0x0008, 0x563: 0x0008, - 0x564: 0x0008, 0x565: 0x0008, 0x566: 0x0008, 0x567: 0x0008, 0x568: 0x0008, 0x569: 0x0040, - 0x56a: 0x0008, 0x56b: 0x0008, 0x56c: 0x0008, 0x56d: 0x0008, 0x56e: 0x0008, 0x56f: 0x0008, - 0x570: 0x0008, 0x571: 0x0040, 0x572: 0x0008, 0x573: 0x0040, 0x574: 0x0040, 0x575: 0x0040, - 0x576: 0x0008, 0x577: 0x0008, 0x578: 0x0008, 0x579: 0x0008, 0x57a: 0x0040, 0x57b: 0x0040, - 0x57c: 0x1308, 0x57d: 0x0008, 0x57e: 0x1008, 0x57f: 0x1008, + 0x540: 0x0c08, 0x541: 0x0a08, 0x542: 0x0a08, 0x543: 0x0a08, 0x544: 0x0a08, 0x545: 0x0a08, + 0x546: 0x0c08, 0x547: 0x0c08, 0x548: 0x0a08, 0x549: 0x0c08, 0x54a: 0x0a08, 0x54b: 0x0a08, + 0x54c: 0x0a08, 0x54d: 0x0a08, 0x54e: 0x0a08, 0x54f: 0x0a08, 0x550: 0x0a08, 0x551: 0x0a08, + 0x552: 0x0a08, 0x553: 0x0a08, 0x554: 0x0c08, 0x555: 0x0a08, 0x556: 0x0808, 0x557: 0x0808, + 0x558: 0x0808, 0x559: 0x3308, 0x55a: 0x3308, 0x55b: 0x3308, 0x55c: 0x0040, 0x55d: 0x0040, + 0x55e: 0x0818, 0x55f: 0x0040, 0x560: 0x0a08, 0x561: 0x0808, 0x562: 0x0a08, 0x563: 0x0a08, + 0x564: 0x0a08, 0x565: 0x0a08, 0x566: 0x0808, 0x567: 0x0c08, 0x568: 0x0a08, 0x569: 0x0c08, + 0x56a: 0x0c08, 0x56b: 0x0040, 0x56c: 0x0040, 0x56d: 0x0040, 0x56e: 0x0040, 0x56f: 0x0040, + 0x570: 0x0040, 0x571: 0x0040, 0x572: 0x0040, 0x573: 0x0040, 0x574: 0x0040, 0x575: 0x0040, + 0x576: 0x0040, 0x577: 0x0040, 0x578: 0x0040, 0x579: 0x0040, 0x57a: 0x0040, 0x57b: 0x0040, + 0x57c: 0x0040, 0x57d: 0x0040, 0x57e: 0x0040, 0x57f: 0x0040, // Block 0x16, offset 0x580 - 0x580: 0x1008, 0x581: 0x1308, 0x582: 0x1308, 0x583: 0x1308, 0x584: 0x1308, 0x585: 0x0040, - 0x586: 0x0040, 0x587: 0x1008, 0x588: 0x1008, 0x589: 0x0040, 0x58a: 0x0040, 0x58b: 0x1008, - 0x58c: 0x1008, 0x58d: 0x1b08, 0x58e: 0x0008, 0x58f: 0x0040, 0x590: 0x0040, 0x591: 0x0040, - 0x592: 0x0040, 0x593: 0x0040, 0x594: 0x0040, 0x595: 0x0040, 0x596: 0x0040, 0x597: 0x1008, - 0x598: 0x0040, 0x599: 0x0040, 0x59a: 0x0040, 0x59b: 0x0040, 0x59c: 0x0689, 0x59d: 0x06c1, - 0x59e: 0x0040, 0x59f: 0x06f9, 0x5a0: 0x0008, 0x5a1: 0x0008, 0x5a2: 0x1308, 0x5a3: 0x1308, - 0x5a4: 0x0040, 0x5a5: 0x0040, 0x5a6: 0x0008, 0x5a7: 0x0008, 0x5a8: 0x0008, 0x5a9: 0x0008, + 0x580: 0x3008, 0x581: 0x3308, 0x582: 0x3308, 0x583: 0x3308, 0x584: 0x3308, 0x585: 0x3308, + 0x586: 0x3308, 0x587: 0x3308, 0x588: 0x3308, 0x589: 0x3008, 0x58a: 0x3008, 0x58b: 0x3008, + 0x58c: 0x3008, 0x58d: 0x3b08, 0x58e: 0x3008, 0x58f: 0x3008, 0x590: 0x0008, 0x591: 0x3308, + 0x592: 0x3308, 0x593: 0x3308, 0x594: 0x3308, 0x595: 0x3308, 0x596: 0x3308, 0x597: 0x3308, + 0x598: 0x04c9, 0x599: 0x0501, 0x59a: 0x0539, 0x59b: 0x0571, 0x59c: 0x05a9, 0x59d: 0x05e1, + 0x59e: 0x0619, 0x59f: 0x0651, 0x5a0: 0x0008, 0x5a1: 0x0008, 0x5a2: 0x3308, 0x5a3: 0x3308, + 0x5a4: 0x0018, 0x5a5: 0x0018, 0x5a6: 0x0008, 0x5a7: 0x0008, 0x5a8: 0x0008, 0x5a9: 0x0008, 0x5aa: 0x0008, 0x5ab: 0x0008, 0x5ac: 0x0008, 0x5ad: 0x0008, 0x5ae: 0x0008, 0x5af: 0x0008, - 0x5b0: 0x0008, 0x5b1: 0x0008, 0x5b2: 0x0018, 0x5b3: 0x0018, 0x5b4: 0x0018, 0x5b5: 0x0018, - 0x5b6: 0x0018, 0x5b7: 0x0018, 0x5b8: 0x0018, 0x5b9: 0x0018, 0x5ba: 0x0018, 0x5bb: 0x0018, - 0x5bc: 0x0040, 0x5bd: 0x0040, 0x5be: 0x0040, 0x5bf: 0x0040, + 0x5b0: 0x0018, 0x5b1: 0x0008, 0x5b2: 0x0008, 0x5b3: 0x0008, 0x5b4: 0x0008, 0x5b5: 0x0008, + 0x5b6: 0x0008, 0x5b7: 0x0008, 0x5b8: 0x0008, 0x5b9: 0x0008, 0x5ba: 0x0008, 0x5bb: 0x0008, + 0x5bc: 0x0008, 0x5bd: 0x0008, 0x5be: 0x0008, 0x5bf: 0x0008, // Block 0x17, offset 0x5c0 - 0x5c0: 0x0040, 0x5c1: 0x1308, 0x5c2: 0x1308, 0x5c3: 0x1008, 0x5c4: 0x0040, 0x5c5: 0x0008, - 0x5c6: 0x0008, 0x5c7: 0x0008, 0x5c8: 0x0008, 0x5c9: 0x0008, 0x5ca: 0x0008, 0x5cb: 0x0040, - 0x5cc: 0x0040, 0x5cd: 0x0040, 0x5ce: 0x0040, 0x5cf: 0x0008, 0x5d0: 0x0008, 0x5d1: 0x0040, + 0x5c0: 0x0008, 0x5c1: 0x3308, 0x5c2: 0x3008, 0x5c3: 0x3008, 0x5c4: 0x0040, 0x5c5: 0x0008, + 0x5c6: 0x0008, 0x5c7: 0x0008, 0x5c8: 0x0008, 0x5c9: 0x0008, 0x5ca: 0x0008, 0x5cb: 0x0008, + 0x5cc: 0x0008, 0x5cd: 0x0040, 0x5ce: 0x0040, 0x5cf: 0x0008, 0x5d0: 0x0008, 0x5d1: 0x0040, 0x5d2: 0x0040, 0x5d3: 0x0008, 0x5d4: 0x0008, 0x5d5: 0x0008, 0x5d6: 0x0008, 0x5d7: 0x0008, 0x5d8: 0x0008, 0x5d9: 0x0008, 0x5da: 0x0008, 0x5db: 0x0008, 0x5dc: 0x0008, 0x5dd: 0x0008, 0x5de: 0x0008, 0x5df: 0x0008, 0x5e0: 0x0008, 0x5e1: 0x0008, 0x5e2: 0x0008, 0x5e3: 0x0008, 0x5e4: 0x0008, 0x5e5: 0x0008, 0x5e6: 0x0008, 0x5e7: 0x0008, 0x5e8: 0x0008, 0x5e9: 0x0040, 0x5ea: 0x0008, 0x5eb: 0x0008, 0x5ec: 0x0008, 0x5ed: 0x0008, 0x5ee: 0x0008, 0x5ef: 0x0008, - 0x5f0: 0x0008, 0x5f1: 0x0040, 0x5f2: 0x0008, 0x5f3: 0x0731, 0x5f4: 0x0040, 0x5f5: 0x0008, - 0x5f6: 0x0769, 0x5f7: 0x0040, 0x5f8: 0x0008, 0x5f9: 0x0008, 0x5fa: 0x0040, 0x5fb: 0x0040, - 0x5fc: 0x1308, 0x5fd: 0x0040, 0x5fe: 0x1008, 0x5ff: 0x1008, + 0x5f0: 0x0008, 0x5f1: 0x0040, 0x5f2: 0x0008, 0x5f3: 0x0040, 0x5f4: 0x0040, 0x5f5: 0x0040, + 0x5f6: 0x0008, 0x5f7: 0x0008, 0x5f8: 0x0008, 0x5f9: 0x0008, 0x5fa: 0x0040, 0x5fb: 0x0040, + 0x5fc: 0x3308, 0x5fd: 0x0008, 0x5fe: 0x3008, 0x5ff: 0x3008, // Block 0x18, offset 0x600 - 0x600: 0x1008, 0x601: 0x1308, 0x602: 0x1308, 0x603: 0x0040, 0x604: 0x0040, 0x605: 0x0040, - 0x606: 0x0040, 0x607: 0x1308, 0x608: 0x1308, 0x609: 0x0040, 0x60a: 0x0040, 0x60b: 0x1308, - 0x60c: 0x1308, 0x60d: 0x1b08, 0x60e: 0x0040, 0x60f: 0x0040, 0x610: 0x0040, 0x611: 0x1308, - 0x612: 0x0040, 0x613: 0x0040, 0x614: 0x0040, 0x615: 0x0040, 0x616: 0x0040, 0x617: 0x0040, - 0x618: 0x0040, 0x619: 0x07a1, 0x61a: 0x07d9, 0x61b: 0x0811, 0x61c: 0x0008, 0x61d: 0x0040, - 0x61e: 0x0849, 0x61f: 0x0040, 0x620: 0x0040, 0x621: 0x0040, 0x622: 0x0040, 0x623: 0x0040, + 0x600: 0x3008, 0x601: 0x3308, 0x602: 0x3308, 0x603: 0x3308, 0x604: 0x3308, 0x605: 0x0040, + 0x606: 0x0040, 0x607: 0x3008, 0x608: 0x3008, 0x609: 0x0040, 0x60a: 0x0040, 0x60b: 0x3008, + 0x60c: 0x3008, 0x60d: 0x3b08, 0x60e: 0x0008, 0x60f: 0x0040, 0x610: 0x0040, 0x611: 0x0040, + 0x612: 0x0040, 0x613: 0x0040, 0x614: 0x0040, 0x615: 0x0040, 0x616: 0x0040, 0x617: 0x3008, + 0x618: 0x0040, 0x619: 0x0040, 0x61a: 0x0040, 0x61b: 0x0040, 0x61c: 0x0689, 0x61d: 0x06c1, + 0x61e: 0x0040, 0x61f: 0x06f9, 0x620: 0x0008, 0x621: 0x0008, 0x622: 0x3308, 0x623: 0x3308, 0x624: 0x0040, 0x625: 0x0040, 0x626: 0x0008, 0x627: 0x0008, 0x628: 0x0008, 0x629: 0x0008, 0x62a: 0x0008, 0x62b: 0x0008, 0x62c: 0x0008, 0x62d: 0x0008, 0x62e: 0x0008, 0x62f: 0x0008, - 0x630: 0x1308, 0x631: 0x1308, 0x632: 0x0008, 0x633: 0x0008, 0x634: 0x0008, 0x635: 0x1308, - 0x636: 0x0040, 0x637: 0x0040, 0x638: 0x0040, 0x639: 0x0040, 0x63a: 0x0040, 0x63b: 0x0040, - 0x63c: 0x0040, 0x63d: 0x0040, 0x63e: 0x0040, 0x63f: 0x0040, + 0x630: 0x0008, 0x631: 0x0008, 0x632: 0x0018, 0x633: 0x0018, 0x634: 0x0018, 0x635: 0x0018, + 0x636: 0x0018, 0x637: 0x0018, 0x638: 0x0018, 0x639: 0x0018, 0x63a: 0x0018, 0x63b: 0x0018, + 0x63c: 0x0008, 0x63d: 0x0018, 0x63e: 0x0040, 0x63f: 0x0040, // Block 0x19, offset 0x640 - 0x640: 0x0040, 0x641: 0x1308, 0x642: 0x1308, 0x643: 0x1008, 0x644: 0x0040, 0x645: 0x0008, - 0x646: 0x0008, 0x647: 0x0008, 0x648: 0x0008, 0x649: 0x0008, 0x64a: 0x0008, 0x64b: 0x0008, - 0x64c: 0x0008, 0x64d: 0x0008, 0x64e: 0x0040, 0x64f: 0x0008, 0x650: 0x0008, 0x651: 0x0008, + 0x640: 0x0040, 0x641: 0x3308, 0x642: 0x3308, 0x643: 0x3008, 0x644: 0x0040, 0x645: 0x0008, + 0x646: 0x0008, 0x647: 0x0008, 0x648: 0x0008, 0x649: 0x0008, 0x64a: 0x0008, 0x64b: 0x0040, + 0x64c: 0x0040, 0x64d: 0x0040, 0x64e: 0x0040, 0x64f: 0x0008, 0x650: 0x0008, 0x651: 0x0040, 0x652: 0x0040, 0x653: 0x0008, 0x654: 0x0008, 0x655: 0x0008, 0x656: 0x0008, 0x657: 0x0008, 0x658: 0x0008, 0x659: 0x0008, 0x65a: 0x0008, 0x65b: 0x0008, 0x65c: 0x0008, 0x65d: 0x0008, 0x65e: 0x0008, 0x65f: 0x0008, 0x660: 0x0008, 0x661: 0x0008, 0x662: 0x0008, 0x663: 0x0008, 0x664: 0x0008, 0x665: 0x0008, 0x666: 0x0008, 0x667: 0x0008, 0x668: 0x0008, 0x669: 0x0040, 0x66a: 0x0008, 0x66b: 0x0008, 0x66c: 0x0008, 0x66d: 0x0008, 0x66e: 0x0008, 0x66f: 0x0008, - 0x670: 0x0008, 0x671: 0x0040, 0x672: 0x0008, 0x673: 0x0008, 0x674: 0x0040, 0x675: 0x0008, - 0x676: 0x0008, 0x677: 0x0008, 0x678: 0x0008, 0x679: 0x0008, 0x67a: 0x0040, 0x67b: 0x0040, - 0x67c: 0x1308, 0x67d: 0x0008, 0x67e: 0x1008, 0x67f: 0x1008, + 0x670: 0x0008, 0x671: 0x0040, 0x672: 0x0008, 0x673: 0x0731, 0x674: 0x0040, 0x675: 0x0008, + 0x676: 0x0769, 0x677: 0x0040, 0x678: 0x0008, 0x679: 0x0008, 0x67a: 0x0040, 0x67b: 0x0040, + 0x67c: 0x3308, 0x67d: 0x0040, 0x67e: 0x3008, 0x67f: 0x3008, // Block 0x1a, offset 0x680 - 0x680: 0x1008, 0x681: 0x1308, 0x682: 0x1308, 0x683: 0x1308, 0x684: 0x1308, 0x685: 0x1308, - 0x686: 0x0040, 0x687: 0x1308, 0x688: 0x1308, 0x689: 0x1008, 0x68a: 0x0040, 0x68b: 0x1008, - 0x68c: 0x1008, 0x68d: 0x1b08, 0x68e: 0x0040, 0x68f: 0x0040, 0x690: 0x0008, 0x691: 0x0040, + 0x680: 0x3008, 0x681: 0x3308, 0x682: 0x3308, 0x683: 0x0040, 0x684: 0x0040, 0x685: 0x0040, + 0x686: 0x0040, 0x687: 0x3308, 0x688: 0x3308, 0x689: 0x0040, 0x68a: 0x0040, 0x68b: 0x3308, + 0x68c: 0x3308, 0x68d: 0x3b08, 0x68e: 0x0040, 0x68f: 0x0040, 0x690: 0x0040, 0x691: 0x3308, 0x692: 0x0040, 0x693: 0x0040, 0x694: 0x0040, 0x695: 0x0040, 0x696: 0x0040, 0x697: 0x0040, - 0x698: 0x0040, 0x699: 0x0040, 0x69a: 0x0040, 0x69b: 0x0040, 0x69c: 0x0040, 0x69d: 0x0040, - 0x69e: 0x0040, 0x69f: 0x0040, 0x6a0: 0x0008, 0x6a1: 0x0008, 0x6a2: 0x1308, 0x6a3: 0x1308, + 0x698: 0x0040, 0x699: 0x07a1, 0x69a: 0x07d9, 0x69b: 0x0811, 0x69c: 0x0008, 0x69d: 0x0040, + 0x69e: 0x0849, 0x69f: 0x0040, 0x6a0: 0x0040, 0x6a1: 0x0040, 0x6a2: 0x0040, 0x6a3: 0x0040, 0x6a4: 0x0040, 0x6a5: 0x0040, 0x6a6: 0x0008, 0x6a7: 0x0008, 0x6a8: 0x0008, 0x6a9: 0x0008, 0x6aa: 0x0008, 0x6ab: 0x0008, 0x6ac: 0x0008, 0x6ad: 0x0008, 0x6ae: 0x0008, 0x6af: 0x0008, - 0x6b0: 0x0018, 0x6b1: 0x0018, 0x6b2: 0x0040, 0x6b3: 0x0040, 0x6b4: 0x0040, 0x6b5: 0x0040, - 0x6b6: 0x0040, 0x6b7: 0x0040, 0x6b8: 0x0040, 0x6b9: 0x0008, 0x6ba: 0x0040, 0x6bb: 0x0040, + 0x6b0: 0x3308, 0x6b1: 0x3308, 0x6b2: 0x0008, 0x6b3: 0x0008, 0x6b4: 0x0008, 0x6b5: 0x3308, + 0x6b6: 0x0040, 0x6b7: 0x0040, 0x6b8: 0x0040, 0x6b9: 0x0040, 0x6ba: 0x0040, 0x6bb: 0x0040, 0x6bc: 0x0040, 0x6bd: 0x0040, 0x6be: 0x0040, 0x6bf: 0x0040, // Block 0x1b, offset 0x6c0 - 0x6c0: 0x0040, 0x6c1: 0x1308, 0x6c2: 0x1008, 0x6c3: 0x1008, 0x6c4: 0x0040, 0x6c5: 0x0008, + 0x6c0: 0x0040, 0x6c1: 0x3308, 0x6c2: 0x3308, 0x6c3: 0x3008, 0x6c4: 0x0040, 0x6c5: 0x0008, 0x6c6: 0x0008, 0x6c7: 0x0008, 0x6c8: 0x0008, 0x6c9: 0x0008, 0x6ca: 0x0008, 0x6cb: 0x0008, - 0x6cc: 0x0008, 0x6cd: 0x0040, 0x6ce: 0x0040, 0x6cf: 0x0008, 0x6d0: 0x0008, 0x6d1: 0x0040, + 0x6cc: 0x0008, 0x6cd: 0x0008, 0x6ce: 0x0040, 0x6cf: 0x0008, 0x6d0: 0x0008, 0x6d1: 0x0008, 0x6d2: 0x0040, 0x6d3: 0x0008, 0x6d4: 0x0008, 0x6d5: 0x0008, 0x6d6: 0x0008, 0x6d7: 0x0008, 0x6d8: 0x0008, 0x6d9: 0x0008, 0x6da: 0x0008, 0x6db: 0x0008, 0x6dc: 0x0008, 0x6dd: 0x0008, 0x6de: 0x0008, 0x6df: 0x0008, 0x6e0: 0x0008, 0x6e1: 0x0008, 0x6e2: 0x0008, 0x6e3: 0x0008, @@ -889,1457 +889,1490 @@ var idnaValues = [8000]uint16{ 0x6ea: 0x0008, 0x6eb: 0x0008, 0x6ec: 0x0008, 0x6ed: 0x0008, 0x6ee: 0x0008, 0x6ef: 0x0008, 0x6f0: 0x0008, 0x6f1: 0x0040, 0x6f2: 0x0008, 0x6f3: 0x0008, 0x6f4: 0x0040, 0x6f5: 0x0008, 0x6f6: 0x0008, 0x6f7: 0x0008, 0x6f8: 0x0008, 0x6f9: 0x0008, 0x6fa: 0x0040, 0x6fb: 0x0040, - 0x6fc: 0x1308, 0x6fd: 0x0008, 0x6fe: 0x1008, 0x6ff: 0x1308, + 0x6fc: 0x3308, 0x6fd: 0x0008, 0x6fe: 0x3008, 0x6ff: 0x3008, // Block 0x1c, offset 0x700 - 0x700: 0x1008, 0x701: 0x1308, 0x702: 0x1308, 0x703: 0x1308, 0x704: 0x1308, 0x705: 0x0040, - 0x706: 0x0040, 0x707: 0x1008, 0x708: 0x1008, 0x709: 0x0040, 0x70a: 0x0040, 0x70b: 0x1008, - 0x70c: 0x1008, 0x70d: 0x1b08, 0x70e: 0x0040, 0x70f: 0x0040, 0x710: 0x0040, 0x711: 0x0040, - 0x712: 0x0040, 0x713: 0x0040, 0x714: 0x0040, 0x715: 0x0040, 0x716: 0x1308, 0x717: 0x1008, - 0x718: 0x0040, 0x719: 0x0040, 0x71a: 0x0040, 0x71b: 0x0040, 0x71c: 0x0881, 0x71d: 0x08b9, - 0x71e: 0x0040, 0x71f: 0x0008, 0x720: 0x0008, 0x721: 0x0008, 0x722: 0x1308, 0x723: 0x1308, + 0x700: 0x3008, 0x701: 0x3308, 0x702: 0x3308, 0x703: 0x3308, 0x704: 0x3308, 0x705: 0x3308, + 0x706: 0x0040, 0x707: 0x3308, 0x708: 0x3308, 0x709: 0x3008, 0x70a: 0x0040, 0x70b: 0x3008, + 0x70c: 0x3008, 0x70d: 0x3b08, 0x70e: 0x0040, 0x70f: 0x0040, 0x710: 0x0008, 0x711: 0x0040, + 0x712: 0x0040, 0x713: 0x0040, 0x714: 0x0040, 0x715: 0x0040, 0x716: 0x0040, 0x717: 0x0040, + 0x718: 0x0040, 0x719: 0x0040, 0x71a: 0x0040, 0x71b: 0x0040, 0x71c: 0x0040, 0x71d: 0x0040, + 0x71e: 0x0040, 0x71f: 0x0040, 0x720: 0x0008, 0x721: 0x0008, 0x722: 0x3308, 0x723: 0x3308, 0x724: 0x0040, 0x725: 0x0040, 0x726: 0x0008, 0x727: 0x0008, 0x728: 0x0008, 0x729: 0x0008, 0x72a: 0x0008, 0x72b: 0x0008, 0x72c: 0x0008, 0x72d: 0x0008, 0x72e: 0x0008, 0x72f: 0x0008, - 0x730: 0x0018, 0x731: 0x0008, 0x732: 0x0018, 0x733: 0x0018, 0x734: 0x0018, 0x735: 0x0018, - 0x736: 0x0018, 0x737: 0x0018, 0x738: 0x0040, 0x739: 0x0040, 0x73a: 0x0040, 0x73b: 0x0040, - 0x73c: 0x0040, 0x73d: 0x0040, 0x73e: 0x0040, 0x73f: 0x0040, + 0x730: 0x0018, 0x731: 0x0018, 0x732: 0x0040, 0x733: 0x0040, 0x734: 0x0040, 0x735: 0x0040, + 0x736: 0x0040, 0x737: 0x0040, 0x738: 0x0040, 0x739: 0x0008, 0x73a: 0x3308, 0x73b: 0x3308, + 0x73c: 0x3308, 0x73d: 0x3308, 0x73e: 0x3308, 0x73f: 0x3308, // Block 0x1d, offset 0x740 - 0x740: 0x0040, 0x741: 0x0040, 0x742: 0x1308, 0x743: 0x0008, 0x744: 0x0040, 0x745: 0x0008, - 0x746: 0x0008, 0x747: 0x0008, 0x748: 0x0008, 0x749: 0x0008, 0x74a: 0x0008, 0x74b: 0x0040, - 0x74c: 0x0040, 0x74d: 0x0040, 0x74e: 0x0008, 0x74f: 0x0008, 0x750: 0x0008, 0x751: 0x0040, - 0x752: 0x0008, 0x753: 0x0008, 0x754: 0x0008, 0x755: 0x0008, 0x756: 0x0040, 0x757: 0x0040, - 0x758: 0x0040, 0x759: 0x0008, 0x75a: 0x0008, 0x75b: 0x0040, 0x75c: 0x0008, 0x75d: 0x0040, - 0x75e: 0x0008, 0x75f: 0x0008, 0x760: 0x0040, 0x761: 0x0040, 0x762: 0x0040, 0x763: 0x0008, - 0x764: 0x0008, 0x765: 0x0040, 0x766: 0x0040, 0x767: 0x0040, 0x768: 0x0008, 0x769: 0x0008, - 0x76a: 0x0008, 0x76b: 0x0040, 0x76c: 0x0040, 0x76d: 0x0040, 0x76e: 0x0008, 0x76f: 0x0008, - 0x770: 0x0008, 0x771: 0x0008, 0x772: 0x0008, 0x773: 0x0008, 0x774: 0x0008, 0x775: 0x0008, + 0x740: 0x0040, 0x741: 0x3308, 0x742: 0x3008, 0x743: 0x3008, 0x744: 0x0040, 0x745: 0x0008, + 0x746: 0x0008, 0x747: 0x0008, 0x748: 0x0008, 0x749: 0x0008, 0x74a: 0x0008, 0x74b: 0x0008, + 0x74c: 0x0008, 0x74d: 0x0040, 0x74e: 0x0040, 0x74f: 0x0008, 0x750: 0x0008, 0x751: 0x0040, + 0x752: 0x0040, 0x753: 0x0008, 0x754: 0x0008, 0x755: 0x0008, 0x756: 0x0008, 0x757: 0x0008, + 0x758: 0x0008, 0x759: 0x0008, 0x75a: 0x0008, 0x75b: 0x0008, 0x75c: 0x0008, 0x75d: 0x0008, + 0x75e: 0x0008, 0x75f: 0x0008, 0x760: 0x0008, 0x761: 0x0008, 0x762: 0x0008, 0x763: 0x0008, + 0x764: 0x0008, 0x765: 0x0008, 0x766: 0x0008, 0x767: 0x0008, 0x768: 0x0008, 0x769: 0x0040, + 0x76a: 0x0008, 0x76b: 0x0008, 0x76c: 0x0008, 0x76d: 0x0008, 0x76e: 0x0008, 0x76f: 0x0008, + 0x770: 0x0008, 0x771: 0x0040, 0x772: 0x0008, 0x773: 0x0008, 0x774: 0x0040, 0x775: 0x0008, 0x776: 0x0008, 0x777: 0x0008, 0x778: 0x0008, 0x779: 0x0008, 0x77a: 0x0040, 0x77b: 0x0040, - 0x77c: 0x0040, 0x77d: 0x0040, 0x77e: 0x1008, 0x77f: 0x1008, + 0x77c: 0x3308, 0x77d: 0x0008, 0x77e: 0x3008, 0x77f: 0x3308, // Block 0x1e, offset 0x780 - 0x780: 0x1308, 0x781: 0x1008, 0x782: 0x1008, 0x783: 0x1008, 0x784: 0x1008, 0x785: 0x0040, - 0x786: 0x1308, 0x787: 0x1308, 0x788: 0x1308, 0x789: 0x0040, 0x78a: 0x1308, 0x78b: 0x1308, - 0x78c: 0x1308, 0x78d: 0x1b08, 0x78e: 0x0040, 0x78f: 0x0040, 0x790: 0x0040, 0x791: 0x0040, - 0x792: 0x0040, 0x793: 0x0040, 0x794: 0x0040, 0x795: 0x1308, 0x796: 0x1308, 0x797: 0x0040, - 0x798: 0x0008, 0x799: 0x0008, 0x79a: 0x0008, 0x79b: 0x0040, 0x79c: 0x0040, 0x79d: 0x0040, - 0x79e: 0x0040, 0x79f: 0x0040, 0x7a0: 0x0008, 0x7a1: 0x0008, 0x7a2: 0x1308, 0x7a3: 0x1308, + 0x780: 0x3008, 0x781: 0x3308, 0x782: 0x3308, 0x783: 0x3308, 0x784: 0x3308, 0x785: 0x0040, + 0x786: 0x0040, 0x787: 0x3008, 0x788: 0x3008, 0x789: 0x0040, 0x78a: 0x0040, 0x78b: 0x3008, + 0x78c: 0x3008, 0x78d: 0x3b08, 0x78e: 0x0040, 0x78f: 0x0040, 0x790: 0x0040, 0x791: 0x0040, + 0x792: 0x0040, 0x793: 0x0040, 0x794: 0x0040, 0x795: 0x0040, 0x796: 0x3308, 0x797: 0x3008, + 0x798: 0x0040, 0x799: 0x0040, 0x79a: 0x0040, 0x79b: 0x0040, 0x79c: 0x0881, 0x79d: 0x08b9, + 0x79e: 0x0040, 0x79f: 0x0008, 0x7a0: 0x0008, 0x7a1: 0x0008, 0x7a2: 0x3308, 0x7a3: 0x3308, 0x7a4: 0x0040, 0x7a5: 0x0040, 0x7a6: 0x0008, 0x7a7: 0x0008, 0x7a8: 0x0008, 0x7a9: 0x0008, 0x7aa: 0x0008, 0x7ab: 0x0008, 0x7ac: 0x0008, 0x7ad: 0x0008, 0x7ae: 0x0008, 0x7af: 0x0008, - 0x7b0: 0x0040, 0x7b1: 0x0040, 0x7b2: 0x0040, 0x7b3: 0x0040, 0x7b4: 0x0040, 0x7b5: 0x0040, - 0x7b6: 0x0040, 0x7b7: 0x0040, 0x7b8: 0x0018, 0x7b9: 0x0018, 0x7ba: 0x0018, 0x7bb: 0x0018, - 0x7bc: 0x0018, 0x7bd: 0x0018, 0x7be: 0x0018, 0x7bf: 0x0018, + 0x7b0: 0x0018, 0x7b1: 0x0008, 0x7b2: 0x0018, 0x7b3: 0x0018, 0x7b4: 0x0018, 0x7b5: 0x0018, + 0x7b6: 0x0018, 0x7b7: 0x0018, 0x7b8: 0x0040, 0x7b9: 0x0040, 0x7ba: 0x0040, 0x7bb: 0x0040, + 0x7bc: 0x0040, 0x7bd: 0x0040, 0x7be: 0x0040, 0x7bf: 0x0040, // Block 0x1f, offset 0x7c0 - 0x7c0: 0x0008, 0x7c1: 0x1308, 0x7c2: 0x1008, 0x7c3: 0x1008, 0x7c4: 0x0040, 0x7c5: 0x0008, - 0x7c6: 0x0008, 0x7c7: 0x0008, 0x7c8: 0x0008, 0x7c9: 0x0008, 0x7ca: 0x0008, 0x7cb: 0x0008, - 0x7cc: 0x0008, 0x7cd: 0x0040, 0x7ce: 0x0008, 0x7cf: 0x0008, 0x7d0: 0x0008, 0x7d1: 0x0040, - 0x7d2: 0x0008, 0x7d3: 0x0008, 0x7d4: 0x0008, 0x7d5: 0x0008, 0x7d6: 0x0008, 0x7d7: 0x0008, - 0x7d8: 0x0008, 0x7d9: 0x0008, 0x7da: 0x0008, 0x7db: 0x0008, 0x7dc: 0x0008, 0x7dd: 0x0008, - 0x7de: 0x0008, 0x7df: 0x0008, 0x7e0: 0x0008, 0x7e1: 0x0008, 0x7e2: 0x0008, 0x7e3: 0x0008, - 0x7e4: 0x0008, 0x7e5: 0x0008, 0x7e6: 0x0008, 0x7e7: 0x0008, 0x7e8: 0x0008, 0x7e9: 0x0040, - 0x7ea: 0x0008, 0x7eb: 0x0008, 0x7ec: 0x0008, 0x7ed: 0x0008, 0x7ee: 0x0008, 0x7ef: 0x0008, - 0x7f0: 0x0008, 0x7f1: 0x0008, 0x7f2: 0x0008, 0x7f3: 0x0008, 0x7f4: 0x0040, 0x7f5: 0x0008, + 0x7c0: 0x0040, 0x7c1: 0x0040, 0x7c2: 0x3308, 0x7c3: 0x0008, 0x7c4: 0x0040, 0x7c5: 0x0008, + 0x7c6: 0x0008, 0x7c7: 0x0008, 0x7c8: 0x0008, 0x7c9: 0x0008, 0x7ca: 0x0008, 0x7cb: 0x0040, + 0x7cc: 0x0040, 0x7cd: 0x0040, 0x7ce: 0x0008, 0x7cf: 0x0008, 0x7d0: 0x0008, 0x7d1: 0x0040, + 0x7d2: 0x0008, 0x7d3: 0x0008, 0x7d4: 0x0008, 0x7d5: 0x0008, 0x7d6: 0x0040, 0x7d7: 0x0040, + 0x7d8: 0x0040, 0x7d9: 0x0008, 0x7da: 0x0008, 0x7db: 0x0040, 0x7dc: 0x0008, 0x7dd: 0x0040, + 0x7de: 0x0008, 0x7df: 0x0008, 0x7e0: 0x0040, 0x7e1: 0x0040, 0x7e2: 0x0040, 0x7e3: 0x0008, + 0x7e4: 0x0008, 0x7e5: 0x0040, 0x7e6: 0x0040, 0x7e7: 0x0040, 0x7e8: 0x0008, 0x7e9: 0x0008, + 0x7ea: 0x0008, 0x7eb: 0x0040, 0x7ec: 0x0040, 0x7ed: 0x0040, 0x7ee: 0x0008, 0x7ef: 0x0008, + 0x7f0: 0x0008, 0x7f1: 0x0008, 0x7f2: 0x0008, 0x7f3: 0x0008, 0x7f4: 0x0008, 0x7f5: 0x0008, 0x7f6: 0x0008, 0x7f7: 0x0008, 0x7f8: 0x0008, 0x7f9: 0x0008, 0x7fa: 0x0040, 0x7fb: 0x0040, - 0x7fc: 0x1308, 0x7fd: 0x0008, 0x7fe: 0x1008, 0x7ff: 0x1308, + 0x7fc: 0x0040, 0x7fd: 0x0040, 0x7fe: 0x3008, 0x7ff: 0x3008, // Block 0x20, offset 0x800 - 0x800: 0x1008, 0x801: 0x1008, 0x802: 0x1008, 0x803: 0x1008, 0x804: 0x1008, 0x805: 0x0040, - 0x806: 0x1308, 0x807: 0x1008, 0x808: 0x1008, 0x809: 0x0040, 0x80a: 0x1008, 0x80b: 0x1008, - 0x80c: 0x1308, 0x80d: 0x1b08, 0x80e: 0x0040, 0x80f: 0x0040, 0x810: 0x0040, 0x811: 0x0040, - 0x812: 0x0040, 0x813: 0x0040, 0x814: 0x0040, 0x815: 0x1008, 0x816: 0x1008, 0x817: 0x0040, - 0x818: 0x0040, 0x819: 0x0040, 0x81a: 0x0040, 0x81b: 0x0040, 0x81c: 0x0040, 0x81d: 0x0040, - 0x81e: 0x0008, 0x81f: 0x0040, 0x820: 0x0008, 0x821: 0x0008, 0x822: 0x1308, 0x823: 0x1308, + 0x800: 0x3308, 0x801: 0x3008, 0x802: 0x3008, 0x803: 0x3008, 0x804: 0x3008, 0x805: 0x0040, + 0x806: 0x3308, 0x807: 0x3308, 0x808: 0x3308, 0x809: 0x0040, 0x80a: 0x3308, 0x80b: 0x3308, + 0x80c: 0x3308, 0x80d: 0x3b08, 0x80e: 0x0040, 0x80f: 0x0040, 0x810: 0x0040, 0x811: 0x0040, + 0x812: 0x0040, 0x813: 0x0040, 0x814: 0x0040, 0x815: 0x3308, 0x816: 0x3308, 0x817: 0x0040, + 0x818: 0x0008, 0x819: 0x0008, 0x81a: 0x0008, 0x81b: 0x0040, 0x81c: 0x0040, 0x81d: 0x0040, + 0x81e: 0x0040, 0x81f: 0x0040, 0x820: 0x0008, 0x821: 0x0008, 0x822: 0x3308, 0x823: 0x3308, 0x824: 0x0040, 0x825: 0x0040, 0x826: 0x0008, 0x827: 0x0008, 0x828: 0x0008, 0x829: 0x0008, 0x82a: 0x0008, 0x82b: 0x0008, 0x82c: 0x0008, 0x82d: 0x0008, 0x82e: 0x0008, 0x82f: 0x0008, - 0x830: 0x0040, 0x831: 0x0008, 0x832: 0x0008, 0x833: 0x0040, 0x834: 0x0040, 0x835: 0x0040, - 0x836: 0x0040, 0x837: 0x0040, 0x838: 0x0040, 0x839: 0x0040, 0x83a: 0x0040, 0x83b: 0x0040, - 0x83c: 0x0040, 0x83d: 0x0040, 0x83e: 0x0040, 0x83f: 0x0040, + 0x830: 0x0040, 0x831: 0x0040, 0x832: 0x0040, 0x833: 0x0040, 0x834: 0x0040, 0x835: 0x0040, + 0x836: 0x0040, 0x837: 0x0040, 0x838: 0x0018, 0x839: 0x0018, 0x83a: 0x0018, 0x83b: 0x0018, + 0x83c: 0x0018, 0x83d: 0x0018, 0x83e: 0x0018, 0x83f: 0x0018, // Block 0x21, offset 0x840 - 0x840: 0x1008, 0x841: 0x1308, 0x842: 0x1308, 0x843: 0x1308, 0x844: 0x1308, 0x845: 0x0040, - 0x846: 0x1008, 0x847: 0x1008, 0x848: 0x1008, 0x849: 0x0040, 0x84a: 0x1008, 0x84b: 0x1008, - 0x84c: 0x1008, 0x84d: 0x1b08, 0x84e: 0x0008, 0x84f: 0x0018, 0x850: 0x0040, 0x851: 0x0040, - 0x852: 0x0040, 0x853: 0x0040, 0x854: 0x0008, 0x855: 0x0008, 0x856: 0x0008, 0x857: 0x1008, - 0x858: 0x0018, 0x859: 0x0018, 0x85a: 0x0018, 0x85b: 0x0018, 0x85c: 0x0018, 0x85d: 0x0018, - 0x85e: 0x0018, 0x85f: 0x0008, 0x860: 0x0008, 0x861: 0x0008, 0x862: 0x1308, 0x863: 0x1308, - 0x864: 0x0040, 0x865: 0x0040, 0x866: 0x0008, 0x867: 0x0008, 0x868: 0x0008, 0x869: 0x0008, + 0x840: 0x0008, 0x841: 0x3308, 0x842: 0x3008, 0x843: 0x3008, 0x844: 0x0040, 0x845: 0x0008, + 0x846: 0x0008, 0x847: 0x0008, 0x848: 0x0008, 0x849: 0x0008, 0x84a: 0x0008, 0x84b: 0x0008, + 0x84c: 0x0008, 0x84d: 0x0040, 0x84e: 0x0008, 0x84f: 0x0008, 0x850: 0x0008, 0x851: 0x0040, + 0x852: 0x0008, 0x853: 0x0008, 0x854: 0x0008, 0x855: 0x0008, 0x856: 0x0008, 0x857: 0x0008, + 0x858: 0x0008, 0x859: 0x0008, 0x85a: 0x0008, 0x85b: 0x0008, 0x85c: 0x0008, 0x85d: 0x0008, + 0x85e: 0x0008, 0x85f: 0x0008, 0x860: 0x0008, 0x861: 0x0008, 0x862: 0x0008, 0x863: 0x0008, + 0x864: 0x0008, 0x865: 0x0008, 0x866: 0x0008, 0x867: 0x0008, 0x868: 0x0008, 0x869: 0x0040, 0x86a: 0x0008, 0x86b: 0x0008, 0x86c: 0x0008, 0x86d: 0x0008, 0x86e: 0x0008, 0x86f: 0x0008, - 0x870: 0x0018, 0x871: 0x0018, 0x872: 0x0018, 0x873: 0x0018, 0x874: 0x0018, 0x875: 0x0018, - 0x876: 0x0018, 0x877: 0x0018, 0x878: 0x0018, 0x879: 0x0018, 0x87a: 0x0008, 0x87b: 0x0008, - 0x87c: 0x0008, 0x87d: 0x0008, 0x87e: 0x0008, 0x87f: 0x0008, + 0x870: 0x0008, 0x871: 0x0008, 0x872: 0x0008, 0x873: 0x0008, 0x874: 0x0040, 0x875: 0x0008, + 0x876: 0x0008, 0x877: 0x0008, 0x878: 0x0008, 0x879: 0x0008, 0x87a: 0x0040, 0x87b: 0x0040, + 0x87c: 0x3308, 0x87d: 0x0008, 0x87e: 0x3008, 0x87f: 0x3308, // Block 0x22, offset 0x880 - 0x880: 0x0040, 0x881: 0x0008, 0x882: 0x0008, 0x883: 0x0040, 0x884: 0x0008, 0x885: 0x0040, - 0x886: 0x0040, 0x887: 0x0008, 0x888: 0x0008, 0x889: 0x0040, 0x88a: 0x0008, 0x88b: 0x0040, - 0x88c: 0x0040, 0x88d: 0x0008, 0x88e: 0x0040, 0x88f: 0x0040, 0x890: 0x0040, 0x891: 0x0040, - 0x892: 0x0040, 0x893: 0x0040, 0x894: 0x0008, 0x895: 0x0008, 0x896: 0x0008, 0x897: 0x0008, - 0x898: 0x0040, 0x899: 0x0008, 0x89a: 0x0008, 0x89b: 0x0008, 0x89c: 0x0008, 0x89d: 0x0008, - 0x89e: 0x0008, 0x89f: 0x0008, 0x8a0: 0x0040, 0x8a1: 0x0008, 0x8a2: 0x0008, 0x8a3: 0x0008, - 0x8a4: 0x0040, 0x8a5: 0x0008, 0x8a6: 0x0040, 0x8a7: 0x0008, 0x8a8: 0x0040, 0x8a9: 0x0040, - 0x8aa: 0x0008, 0x8ab: 0x0008, 0x8ac: 0x0040, 0x8ad: 0x0008, 0x8ae: 0x0008, 0x8af: 0x0008, - 0x8b0: 0x0008, 0x8b1: 0x1308, 0x8b2: 0x0008, 0x8b3: 0x0929, 0x8b4: 0x1308, 0x8b5: 0x1308, - 0x8b6: 0x1308, 0x8b7: 0x1308, 0x8b8: 0x1308, 0x8b9: 0x1308, 0x8ba: 0x0040, 0x8bb: 0x1308, - 0x8bc: 0x1308, 0x8bd: 0x0008, 0x8be: 0x0040, 0x8bf: 0x0040, + 0x880: 0x3008, 0x881: 0x3008, 0x882: 0x3008, 0x883: 0x3008, 0x884: 0x3008, 0x885: 0x0040, + 0x886: 0x3308, 0x887: 0x3008, 0x888: 0x3008, 0x889: 0x0040, 0x88a: 0x3008, 0x88b: 0x3008, + 0x88c: 0x3308, 0x88d: 0x3b08, 0x88e: 0x0040, 0x88f: 0x0040, 0x890: 0x0040, 0x891: 0x0040, + 0x892: 0x0040, 0x893: 0x0040, 0x894: 0x0040, 0x895: 0x3008, 0x896: 0x3008, 0x897: 0x0040, + 0x898: 0x0040, 0x899: 0x0040, 0x89a: 0x0040, 0x89b: 0x0040, 0x89c: 0x0040, 0x89d: 0x0040, + 0x89e: 0x0008, 0x89f: 0x0040, 0x8a0: 0x0008, 0x8a1: 0x0008, 0x8a2: 0x3308, 0x8a3: 0x3308, + 0x8a4: 0x0040, 0x8a5: 0x0040, 0x8a6: 0x0008, 0x8a7: 0x0008, 0x8a8: 0x0008, 0x8a9: 0x0008, + 0x8aa: 0x0008, 0x8ab: 0x0008, 0x8ac: 0x0008, 0x8ad: 0x0008, 0x8ae: 0x0008, 0x8af: 0x0008, + 0x8b0: 0x0040, 0x8b1: 0x0008, 0x8b2: 0x0008, 0x8b3: 0x0040, 0x8b4: 0x0040, 0x8b5: 0x0040, + 0x8b6: 0x0040, 0x8b7: 0x0040, 0x8b8: 0x0040, 0x8b9: 0x0040, 0x8ba: 0x0040, 0x8bb: 0x0040, + 0x8bc: 0x0040, 0x8bd: 0x0040, 0x8be: 0x0040, 0x8bf: 0x0040, // Block 0x23, offset 0x8c0 - 0x8c0: 0x0008, 0x8c1: 0x0008, 0x8c2: 0x0008, 0x8c3: 0x09d1, 0x8c4: 0x0008, 0x8c5: 0x0008, - 0x8c6: 0x0008, 0x8c7: 0x0008, 0x8c8: 0x0040, 0x8c9: 0x0008, 0x8ca: 0x0008, 0x8cb: 0x0008, - 0x8cc: 0x0008, 0x8cd: 0x0a09, 0x8ce: 0x0008, 0x8cf: 0x0008, 0x8d0: 0x0008, 0x8d1: 0x0008, - 0x8d2: 0x0a41, 0x8d3: 0x0008, 0x8d4: 0x0008, 0x8d5: 0x0008, 0x8d6: 0x0008, 0x8d7: 0x0a79, - 0x8d8: 0x0008, 0x8d9: 0x0008, 0x8da: 0x0008, 0x8db: 0x0008, 0x8dc: 0x0ab1, 0x8dd: 0x0008, - 0x8de: 0x0008, 0x8df: 0x0008, 0x8e0: 0x0008, 0x8e1: 0x0008, 0x8e2: 0x0008, 0x8e3: 0x0008, - 0x8e4: 0x0008, 0x8e5: 0x0008, 0x8e6: 0x0008, 0x8e7: 0x0008, 0x8e8: 0x0008, 0x8e9: 0x0ae9, - 0x8ea: 0x0008, 0x8eb: 0x0008, 0x8ec: 0x0008, 0x8ed: 0x0040, 0x8ee: 0x0040, 0x8ef: 0x0040, - 0x8f0: 0x0040, 0x8f1: 0x1308, 0x8f2: 0x1308, 0x8f3: 0x0b21, 0x8f4: 0x1308, 0x8f5: 0x0b59, - 0x8f6: 0x0b91, 0x8f7: 0x0bc9, 0x8f8: 0x0c19, 0x8f9: 0x0c51, 0x8fa: 0x1308, 0x8fb: 0x1308, - 0x8fc: 0x1308, 0x8fd: 0x1308, 0x8fe: 0x1308, 0x8ff: 0x1008, + 0x8c0: 0x3008, 0x8c1: 0x3308, 0x8c2: 0x3308, 0x8c3: 0x3308, 0x8c4: 0x3308, 0x8c5: 0x0040, + 0x8c6: 0x3008, 0x8c7: 0x3008, 0x8c8: 0x3008, 0x8c9: 0x0040, 0x8ca: 0x3008, 0x8cb: 0x3008, + 0x8cc: 0x3008, 0x8cd: 0x3b08, 0x8ce: 0x0008, 0x8cf: 0x0018, 0x8d0: 0x0040, 0x8d1: 0x0040, + 0x8d2: 0x0040, 0x8d3: 0x0040, 0x8d4: 0x0008, 0x8d5: 0x0008, 0x8d6: 0x0008, 0x8d7: 0x3008, + 0x8d8: 0x0018, 0x8d9: 0x0018, 0x8da: 0x0018, 0x8db: 0x0018, 0x8dc: 0x0018, 0x8dd: 0x0018, + 0x8de: 0x0018, 0x8df: 0x0008, 0x8e0: 0x0008, 0x8e1: 0x0008, 0x8e2: 0x3308, 0x8e3: 0x3308, + 0x8e4: 0x0040, 0x8e5: 0x0040, 0x8e6: 0x0008, 0x8e7: 0x0008, 0x8e8: 0x0008, 0x8e9: 0x0008, + 0x8ea: 0x0008, 0x8eb: 0x0008, 0x8ec: 0x0008, 0x8ed: 0x0008, 0x8ee: 0x0008, 0x8ef: 0x0008, + 0x8f0: 0x0018, 0x8f1: 0x0018, 0x8f2: 0x0018, 0x8f3: 0x0018, 0x8f4: 0x0018, 0x8f5: 0x0018, + 0x8f6: 0x0018, 0x8f7: 0x0018, 0x8f8: 0x0018, 0x8f9: 0x0018, 0x8fa: 0x0008, 0x8fb: 0x0008, + 0x8fc: 0x0008, 0x8fd: 0x0008, 0x8fe: 0x0008, 0x8ff: 0x0008, // Block 0x24, offset 0x900 - 0x900: 0x1308, 0x901: 0x0ca1, 0x902: 0x1308, 0x903: 0x1308, 0x904: 0x1b08, 0x905: 0x0018, - 0x906: 0x1308, 0x907: 0x1308, 0x908: 0x0008, 0x909: 0x0008, 0x90a: 0x0008, 0x90b: 0x0008, - 0x90c: 0x0008, 0x90d: 0x1308, 0x90e: 0x1308, 0x90f: 0x1308, 0x910: 0x1308, 0x911: 0x1308, - 0x912: 0x1308, 0x913: 0x0cd9, 0x914: 0x1308, 0x915: 0x1308, 0x916: 0x1308, 0x917: 0x1308, - 0x918: 0x0040, 0x919: 0x1308, 0x91a: 0x1308, 0x91b: 0x1308, 0x91c: 0x1308, 0x91d: 0x0d11, - 0x91e: 0x1308, 0x91f: 0x1308, 0x920: 0x1308, 0x921: 0x1308, 0x922: 0x0d49, 0x923: 0x1308, - 0x924: 0x1308, 0x925: 0x1308, 0x926: 0x1308, 0x927: 0x0d81, 0x928: 0x1308, 0x929: 0x1308, - 0x92a: 0x1308, 0x92b: 0x1308, 0x92c: 0x0db9, 0x92d: 0x1308, 0x92e: 0x1308, 0x92f: 0x1308, - 0x930: 0x1308, 0x931: 0x1308, 0x932: 0x1308, 0x933: 0x1308, 0x934: 0x1308, 0x935: 0x1308, - 0x936: 0x1308, 0x937: 0x1308, 0x938: 0x1308, 0x939: 0x0df1, 0x93a: 0x1308, 0x93b: 0x1308, - 0x93c: 0x1308, 0x93d: 0x0040, 0x93e: 0x0018, 0x93f: 0x0018, + 0x900: 0x0040, 0x901: 0x0008, 0x902: 0x0008, 0x903: 0x0040, 0x904: 0x0008, 0x905: 0x0040, + 0x906: 0x0040, 0x907: 0x0008, 0x908: 0x0008, 0x909: 0x0040, 0x90a: 0x0008, 0x90b: 0x0040, + 0x90c: 0x0040, 0x90d: 0x0008, 0x90e: 0x0040, 0x90f: 0x0040, 0x910: 0x0040, 0x911: 0x0040, + 0x912: 0x0040, 0x913: 0x0040, 0x914: 0x0008, 0x915: 0x0008, 0x916: 0x0008, 0x917: 0x0008, + 0x918: 0x0040, 0x919: 0x0008, 0x91a: 0x0008, 0x91b: 0x0008, 0x91c: 0x0008, 0x91d: 0x0008, + 0x91e: 0x0008, 0x91f: 0x0008, 0x920: 0x0040, 0x921: 0x0008, 0x922: 0x0008, 0x923: 0x0008, + 0x924: 0x0040, 0x925: 0x0008, 0x926: 0x0040, 0x927: 0x0008, 0x928: 0x0040, 0x929: 0x0040, + 0x92a: 0x0008, 0x92b: 0x0008, 0x92c: 0x0040, 0x92d: 0x0008, 0x92e: 0x0008, 0x92f: 0x0008, + 0x930: 0x0008, 0x931: 0x3308, 0x932: 0x0008, 0x933: 0x0929, 0x934: 0x3308, 0x935: 0x3308, + 0x936: 0x3308, 0x937: 0x3308, 0x938: 0x3308, 0x939: 0x3308, 0x93a: 0x0040, 0x93b: 0x3308, + 0x93c: 0x3308, 0x93d: 0x0008, 0x93e: 0x0040, 0x93f: 0x0040, // Block 0x25, offset 0x940 - 0x940: 0x0008, 0x941: 0x0008, 0x942: 0x0008, 0x943: 0x0008, 0x944: 0x0008, 0x945: 0x0008, - 0x946: 0x0008, 0x947: 0x0008, 0x948: 0x0008, 0x949: 0x0008, 0x94a: 0x0008, 0x94b: 0x0008, - 0x94c: 0x0008, 0x94d: 0x0008, 0x94e: 0x0008, 0x94f: 0x0008, 0x950: 0x0008, 0x951: 0x0008, - 0x952: 0x0008, 0x953: 0x0008, 0x954: 0x0008, 0x955: 0x0008, 0x956: 0x0008, 0x957: 0x0008, - 0x958: 0x0008, 0x959: 0x0008, 0x95a: 0x0008, 0x95b: 0x0008, 0x95c: 0x0008, 0x95d: 0x0008, + 0x940: 0x0008, 0x941: 0x0008, 0x942: 0x0008, 0x943: 0x09d1, 0x944: 0x0008, 0x945: 0x0008, + 0x946: 0x0008, 0x947: 0x0008, 0x948: 0x0040, 0x949: 0x0008, 0x94a: 0x0008, 0x94b: 0x0008, + 0x94c: 0x0008, 0x94d: 0x0a09, 0x94e: 0x0008, 0x94f: 0x0008, 0x950: 0x0008, 0x951: 0x0008, + 0x952: 0x0a41, 0x953: 0x0008, 0x954: 0x0008, 0x955: 0x0008, 0x956: 0x0008, 0x957: 0x0a79, + 0x958: 0x0008, 0x959: 0x0008, 0x95a: 0x0008, 0x95b: 0x0008, 0x95c: 0x0ab1, 0x95d: 0x0008, 0x95e: 0x0008, 0x95f: 0x0008, 0x960: 0x0008, 0x961: 0x0008, 0x962: 0x0008, 0x963: 0x0008, - 0x964: 0x0008, 0x965: 0x0008, 0x966: 0x0008, 0x967: 0x0008, 0x968: 0x0008, 0x969: 0x0008, - 0x96a: 0x0008, 0x96b: 0x0008, 0x96c: 0x0039, 0x96d: 0x0ed1, 0x96e: 0x0ee9, 0x96f: 0x0008, - 0x970: 0x0ef9, 0x971: 0x0f09, 0x972: 0x0f19, 0x973: 0x0f31, 0x974: 0x0249, 0x975: 0x0f41, - 0x976: 0x0259, 0x977: 0x0f51, 0x978: 0x0359, 0x979: 0x0f61, 0x97a: 0x0f71, 0x97b: 0x0008, - 0x97c: 0x00d9, 0x97d: 0x0f81, 0x97e: 0x0f99, 0x97f: 0x0269, + 0x964: 0x0008, 0x965: 0x0008, 0x966: 0x0008, 0x967: 0x0008, 0x968: 0x0008, 0x969: 0x0ae9, + 0x96a: 0x0008, 0x96b: 0x0008, 0x96c: 0x0008, 0x96d: 0x0040, 0x96e: 0x0040, 0x96f: 0x0040, + 0x970: 0x0040, 0x971: 0x3308, 0x972: 0x3308, 0x973: 0x0b21, 0x974: 0x3308, 0x975: 0x0b59, + 0x976: 0x0b91, 0x977: 0x0bc9, 0x978: 0x0c19, 0x979: 0x0c51, 0x97a: 0x3308, 0x97b: 0x3308, + 0x97c: 0x3308, 0x97d: 0x3308, 0x97e: 0x3308, 0x97f: 0x3008, // Block 0x26, offset 0x980 - 0x980: 0x0fa9, 0x981: 0x0fb9, 0x982: 0x0279, 0x983: 0x0039, 0x984: 0x0fc9, 0x985: 0x0fe1, - 0x986: 0x059d, 0x987: 0x0ee9, 0x988: 0x0ef9, 0x989: 0x0f09, 0x98a: 0x0ff9, 0x98b: 0x1011, - 0x98c: 0x1029, 0x98d: 0x0f31, 0x98e: 0x0008, 0x98f: 0x0f51, 0x990: 0x0f61, 0x991: 0x1041, - 0x992: 0x00d9, 0x993: 0x1059, 0x994: 0x05b5, 0x995: 0x05b5, 0x996: 0x0f99, 0x997: 0x0fa9, - 0x998: 0x0fb9, 0x999: 0x059d, 0x99a: 0x1071, 0x99b: 0x1089, 0x99c: 0x05cd, 0x99d: 0x1099, - 0x99e: 0x10b1, 0x99f: 0x10c9, 0x9a0: 0x10e1, 0x9a1: 0x10f9, 0x9a2: 0x0f41, 0x9a3: 0x0269, - 0x9a4: 0x0fb9, 0x9a5: 0x1089, 0x9a6: 0x1099, 0x9a7: 0x10b1, 0x9a8: 0x1111, 0x9a9: 0x10e1, - 0x9aa: 0x10f9, 0x9ab: 0x0008, 0x9ac: 0x0008, 0x9ad: 0x0008, 0x9ae: 0x0008, 0x9af: 0x0008, - 0x9b0: 0x0008, 0x9b1: 0x0008, 0x9b2: 0x0008, 0x9b3: 0x0008, 0x9b4: 0x0008, 0x9b5: 0x0008, - 0x9b6: 0x0008, 0x9b7: 0x0008, 0x9b8: 0x1129, 0x9b9: 0x0008, 0x9ba: 0x0008, 0x9bb: 0x0008, - 0x9bc: 0x0008, 0x9bd: 0x0008, 0x9be: 0x0008, 0x9bf: 0x0008, + 0x980: 0x3308, 0x981: 0x0ca1, 0x982: 0x3308, 0x983: 0x3308, 0x984: 0x3b08, 0x985: 0x0018, + 0x986: 0x3308, 0x987: 0x3308, 0x988: 0x0008, 0x989: 0x0008, 0x98a: 0x0008, 0x98b: 0x0008, + 0x98c: 0x0008, 0x98d: 0x3308, 0x98e: 0x3308, 0x98f: 0x3308, 0x990: 0x3308, 0x991: 0x3308, + 0x992: 0x3308, 0x993: 0x0cd9, 0x994: 0x3308, 0x995: 0x3308, 0x996: 0x3308, 0x997: 0x3308, + 0x998: 0x0040, 0x999: 0x3308, 0x99a: 0x3308, 0x99b: 0x3308, 0x99c: 0x3308, 0x99d: 0x0d11, + 0x99e: 0x3308, 0x99f: 0x3308, 0x9a0: 0x3308, 0x9a1: 0x3308, 0x9a2: 0x0d49, 0x9a3: 0x3308, + 0x9a4: 0x3308, 0x9a5: 0x3308, 0x9a6: 0x3308, 0x9a7: 0x0d81, 0x9a8: 0x3308, 0x9a9: 0x3308, + 0x9aa: 0x3308, 0x9ab: 0x3308, 0x9ac: 0x0db9, 0x9ad: 0x3308, 0x9ae: 0x3308, 0x9af: 0x3308, + 0x9b0: 0x3308, 0x9b1: 0x3308, 0x9b2: 0x3308, 0x9b3: 0x3308, 0x9b4: 0x3308, 0x9b5: 0x3308, + 0x9b6: 0x3308, 0x9b7: 0x3308, 0x9b8: 0x3308, 0x9b9: 0x0df1, 0x9ba: 0x3308, 0x9bb: 0x3308, + 0x9bc: 0x3308, 0x9bd: 0x0040, 0x9be: 0x0018, 0x9bf: 0x0018, // Block 0x27, offset 0x9c0 0x9c0: 0x0008, 0x9c1: 0x0008, 0x9c2: 0x0008, 0x9c3: 0x0008, 0x9c4: 0x0008, 0x9c5: 0x0008, 0x9c6: 0x0008, 0x9c7: 0x0008, 0x9c8: 0x0008, 0x9c9: 0x0008, 0x9ca: 0x0008, 0x9cb: 0x0008, 0x9cc: 0x0008, 0x9cd: 0x0008, 0x9ce: 0x0008, 0x9cf: 0x0008, 0x9d0: 0x0008, 0x9d1: 0x0008, 0x9d2: 0x0008, 0x9d3: 0x0008, 0x9d4: 0x0008, 0x9d5: 0x0008, 0x9d6: 0x0008, 0x9d7: 0x0008, - 0x9d8: 0x0008, 0x9d9: 0x0008, 0x9da: 0x0008, 0x9db: 0x1141, 0x9dc: 0x1159, 0x9dd: 0x1169, - 0x9de: 0x1181, 0x9df: 0x1029, 0x9e0: 0x1199, 0x9e1: 0x11a9, 0x9e2: 0x11c1, 0x9e3: 0x11d9, - 0x9e4: 0x11f1, 0x9e5: 0x1209, 0x9e6: 0x1221, 0x9e7: 0x05e5, 0x9e8: 0x1239, 0x9e9: 0x1251, - 0x9ea: 0xe17d, 0x9eb: 0x1269, 0x9ec: 0x1281, 0x9ed: 0x1299, 0x9ee: 0x12b1, 0x9ef: 0x12c9, - 0x9f0: 0x12e1, 0x9f1: 0x12f9, 0x9f2: 0x1311, 0x9f3: 0x1329, 0x9f4: 0x1341, 0x9f5: 0x1359, - 0x9f6: 0x1371, 0x9f7: 0x1389, 0x9f8: 0x05fd, 0x9f9: 0x13a1, 0x9fa: 0x13b9, 0x9fb: 0x13d1, - 0x9fc: 0x13e1, 0x9fd: 0x13f9, 0x9fe: 0x1411, 0x9ff: 0x1429, + 0x9d8: 0x0008, 0x9d9: 0x0008, 0x9da: 0x0008, 0x9db: 0x0008, 0x9dc: 0x0008, 0x9dd: 0x0008, + 0x9de: 0x0008, 0x9df: 0x0008, 0x9e0: 0x0008, 0x9e1: 0x0008, 0x9e2: 0x0008, 0x9e3: 0x0008, + 0x9e4: 0x0008, 0x9e5: 0x0008, 0x9e6: 0x0008, 0x9e7: 0x0008, 0x9e8: 0x0008, 0x9e9: 0x0008, + 0x9ea: 0x0008, 0x9eb: 0x0008, 0x9ec: 0x0039, 0x9ed: 0x0ed1, 0x9ee: 0x0ee9, 0x9ef: 0x0008, + 0x9f0: 0x0ef9, 0x9f1: 0x0f09, 0x9f2: 0x0f19, 0x9f3: 0x0f31, 0x9f4: 0x0249, 0x9f5: 0x0f41, + 0x9f6: 0x0259, 0x9f7: 0x0f51, 0x9f8: 0x0359, 0x9f9: 0x0f61, 0x9fa: 0x0f71, 0x9fb: 0x0008, + 0x9fc: 0x00d9, 0x9fd: 0x0f81, 0x9fe: 0x0f99, 0x9ff: 0x0269, // Block 0x28, offset 0xa00 - 0xa00: 0xe00d, 0xa01: 0x0008, 0xa02: 0xe00d, 0xa03: 0x0008, 0xa04: 0xe00d, 0xa05: 0x0008, - 0xa06: 0xe00d, 0xa07: 0x0008, 0xa08: 0xe00d, 0xa09: 0x0008, 0xa0a: 0xe00d, 0xa0b: 0x0008, - 0xa0c: 0xe00d, 0xa0d: 0x0008, 0xa0e: 0xe00d, 0xa0f: 0x0008, 0xa10: 0xe00d, 0xa11: 0x0008, - 0xa12: 0xe00d, 0xa13: 0x0008, 0xa14: 0xe00d, 0xa15: 0x0008, 0xa16: 0xe00d, 0xa17: 0x0008, - 0xa18: 0xe00d, 0xa19: 0x0008, 0xa1a: 0xe00d, 0xa1b: 0x0008, 0xa1c: 0xe00d, 0xa1d: 0x0008, - 0xa1e: 0xe00d, 0xa1f: 0x0008, 0xa20: 0xe00d, 0xa21: 0x0008, 0xa22: 0xe00d, 0xa23: 0x0008, - 0xa24: 0xe00d, 0xa25: 0x0008, 0xa26: 0xe00d, 0xa27: 0x0008, 0xa28: 0xe00d, 0xa29: 0x0008, - 0xa2a: 0xe00d, 0xa2b: 0x0008, 0xa2c: 0xe00d, 0xa2d: 0x0008, 0xa2e: 0xe00d, 0xa2f: 0x0008, - 0xa30: 0xe00d, 0xa31: 0x0008, 0xa32: 0xe00d, 0xa33: 0x0008, 0xa34: 0xe00d, 0xa35: 0x0008, - 0xa36: 0xe00d, 0xa37: 0x0008, 0xa38: 0xe00d, 0xa39: 0x0008, 0xa3a: 0xe00d, 0xa3b: 0x0008, - 0xa3c: 0xe00d, 0xa3d: 0x0008, 0xa3e: 0xe00d, 0xa3f: 0x0008, + 0xa00: 0x0fa9, 0xa01: 0x0fb9, 0xa02: 0x0279, 0xa03: 0x0039, 0xa04: 0x0fc9, 0xa05: 0x0fe1, + 0xa06: 0x059d, 0xa07: 0x0ee9, 0xa08: 0x0ef9, 0xa09: 0x0f09, 0xa0a: 0x0ff9, 0xa0b: 0x1011, + 0xa0c: 0x1029, 0xa0d: 0x0f31, 0xa0e: 0x0008, 0xa0f: 0x0f51, 0xa10: 0x0f61, 0xa11: 0x1041, + 0xa12: 0x00d9, 0xa13: 0x1059, 0xa14: 0x05b5, 0xa15: 0x05b5, 0xa16: 0x0f99, 0xa17: 0x0fa9, + 0xa18: 0x0fb9, 0xa19: 0x059d, 0xa1a: 0x1071, 0xa1b: 0x1089, 0xa1c: 0x05cd, 0xa1d: 0x1099, + 0xa1e: 0x10b1, 0xa1f: 0x10c9, 0xa20: 0x10e1, 0xa21: 0x10f9, 0xa22: 0x0f41, 0xa23: 0x0269, + 0xa24: 0x0fb9, 0xa25: 0x1089, 0xa26: 0x1099, 0xa27: 0x10b1, 0xa28: 0x1111, 0xa29: 0x10e1, + 0xa2a: 0x10f9, 0xa2b: 0x0008, 0xa2c: 0x0008, 0xa2d: 0x0008, 0xa2e: 0x0008, 0xa2f: 0x0008, + 0xa30: 0x0008, 0xa31: 0x0008, 0xa32: 0x0008, 0xa33: 0x0008, 0xa34: 0x0008, 0xa35: 0x0008, + 0xa36: 0x0008, 0xa37: 0x0008, 0xa38: 0x1129, 0xa39: 0x0008, 0xa3a: 0x0008, 0xa3b: 0x0008, + 0xa3c: 0x0008, 0xa3d: 0x0008, 0xa3e: 0x0008, 0xa3f: 0x0008, // Block 0x29, offset 0xa40 - 0xa40: 0xe00d, 0xa41: 0x0008, 0xa42: 0xe00d, 0xa43: 0x0008, 0xa44: 0xe00d, 0xa45: 0x0008, - 0xa46: 0xe00d, 0xa47: 0x0008, 0xa48: 0xe00d, 0xa49: 0x0008, 0xa4a: 0xe00d, 0xa4b: 0x0008, - 0xa4c: 0xe00d, 0xa4d: 0x0008, 0xa4e: 0xe00d, 0xa4f: 0x0008, 0xa50: 0xe00d, 0xa51: 0x0008, - 0xa52: 0xe00d, 0xa53: 0x0008, 0xa54: 0xe00d, 0xa55: 0x0008, 0xa56: 0x0008, 0xa57: 0x0008, - 0xa58: 0x0008, 0xa59: 0x0008, 0xa5a: 0x0615, 0xa5b: 0x0635, 0xa5c: 0x0008, 0xa5d: 0x0008, - 0xa5e: 0x1441, 0xa5f: 0x0008, 0xa60: 0xe00d, 0xa61: 0x0008, 0xa62: 0xe00d, 0xa63: 0x0008, - 0xa64: 0xe00d, 0xa65: 0x0008, 0xa66: 0xe00d, 0xa67: 0x0008, 0xa68: 0xe00d, 0xa69: 0x0008, - 0xa6a: 0xe00d, 0xa6b: 0x0008, 0xa6c: 0xe00d, 0xa6d: 0x0008, 0xa6e: 0xe00d, 0xa6f: 0x0008, - 0xa70: 0xe00d, 0xa71: 0x0008, 0xa72: 0xe00d, 0xa73: 0x0008, 0xa74: 0xe00d, 0xa75: 0x0008, - 0xa76: 0xe00d, 0xa77: 0x0008, 0xa78: 0xe00d, 0xa79: 0x0008, 0xa7a: 0xe00d, 0xa7b: 0x0008, - 0xa7c: 0xe00d, 0xa7d: 0x0008, 0xa7e: 0xe00d, 0xa7f: 0x0008, + 0xa40: 0x0008, 0xa41: 0x0008, 0xa42: 0x0008, 0xa43: 0x0008, 0xa44: 0x0008, 0xa45: 0x0008, + 0xa46: 0x0008, 0xa47: 0x0008, 0xa48: 0x0008, 0xa49: 0x0008, 0xa4a: 0x0008, 0xa4b: 0x0008, + 0xa4c: 0x0008, 0xa4d: 0x0008, 0xa4e: 0x0008, 0xa4f: 0x0008, 0xa50: 0x0008, 0xa51: 0x0008, + 0xa52: 0x0008, 0xa53: 0x0008, 0xa54: 0x0008, 0xa55: 0x0008, 0xa56: 0x0008, 0xa57: 0x0008, + 0xa58: 0x0008, 0xa59: 0x0008, 0xa5a: 0x0008, 0xa5b: 0x1141, 0xa5c: 0x1159, 0xa5d: 0x1169, + 0xa5e: 0x1181, 0xa5f: 0x1029, 0xa60: 0x1199, 0xa61: 0x11a9, 0xa62: 0x11c1, 0xa63: 0x11d9, + 0xa64: 0x11f1, 0xa65: 0x1209, 0xa66: 0x1221, 0xa67: 0x05e5, 0xa68: 0x1239, 0xa69: 0x1251, + 0xa6a: 0xe17d, 0xa6b: 0x1269, 0xa6c: 0x1281, 0xa6d: 0x1299, 0xa6e: 0x12b1, 0xa6f: 0x12c9, + 0xa70: 0x12e1, 0xa71: 0x12f9, 0xa72: 0x1311, 0xa73: 0x1329, 0xa74: 0x1341, 0xa75: 0x1359, + 0xa76: 0x1371, 0xa77: 0x1389, 0xa78: 0x05fd, 0xa79: 0x13a1, 0xa7a: 0x13b9, 0xa7b: 0x13d1, + 0xa7c: 0x13e1, 0xa7d: 0x13f9, 0xa7e: 0x1411, 0xa7f: 0x1429, // Block 0x2a, offset 0xa80 - 0xa80: 0x0008, 0xa81: 0x0008, 0xa82: 0x0008, 0xa83: 0x0008, 0xa84: 0x0008, 0xa85: 0x0008, - 0xa86: 0x0040, 0xa87: 0x0040, 0xa88: 0xe045, 0xa89: 0xe045, 0xa8a: 0xe045, 0xa8b: 0xe045, - 0xa8c: 0xe045, 0xa8d: 0xe045, 0xa8e: 0x0040, 0xa8f: 0x0040, 0xa90: 0x0008, 0xa91: 0x0008, - 0xa92: 0x0008, 0xa93: 0x0008, 0xa94: 0x0008, 0xa95: 0x0008, 0xa96: 0x0008, 0xa97: 0x0008, - 0xa98: 0x0040, 0xa99: 0xe045, 0xa9a: 0x0040, 0xa9b: 0xe045, 0xa9c: 0x0040, 0xa9d: 0xe045, - 0xa9e: 0x0040, 0xa9f: 0xe045, 0xaa0: 0x0008, 0xaa1: 0x0008, 0xaa2: 0x0008, 0xaa3: 0x0008, - 0xaa4: 0x0008, 0xaa5: 0x0008, 0xaa6: 0x0008, 0xaa7: 0x0008, 0xaa8: 0xe045, 0xaa9: 0xe045, - 0xaaa: 0xe045, 0xaab: 0xe045, 0xaac: 0xe045, 0xaad: 0xe045, 0xaae: 0xe045, 0xaaf: 0xe045, - 0xab0: 0x0008, 0xab1: 0x1459, 0xab2: 0x0008, 0xab3: 0x1471, 0xab4: 0x0008, 0xab5: 0x1489, - 0xab6: 0x0008, 0xab7: 0x14a1, 0xab8: 0x0008, 0xab9: 0x14b9, 0xaba: 0x0008, 0xabb: 0x14d1, - 0xabc: 0x0008, 0xabd: 0x14e9, 0xabe: 0x0040, 0xabf: 0x0040, + 0xa80: 0xe00d, 0xa81: 0x0008, 0xa82: 0xe00d, 0xa83: 0x0008, 0xa84: 0xe00d, 0xa85: 0x0008, + 0xa86: 0xe00d, 0xa87: 0x0008, 0xa88: 0xe00d, 0xa89: 0x0008, 0xa8a: 0xe00d, 0xa8b: 0x0008, + 0xa8c: 0xe00d, 0xa8d: 0x0008, 0xa8e: 0xe00d, 0xa8f: 0x0008, 0xa90: 0xe00d, 0xa91: 0x0008, + 0xa92: 0xe00d, 0xa93: 0x0008, 0xa94: 0xe00d, 0xa95: 0x0008, 0xa96: 0xe00d, 0xa97: 0x0008, + 0xa98: 0xe00d, 0xa99: 0x0008, 0xa9a: 0xe00d, 0xa9b: 0x0008, 0xa9c: 0xe00d, 0xa9d: 0x0008, + 0xa9e: 0xe00d, 0xa9f: 0x0008, 0xaa0: 0xe00d, 0xaa1: 0x0008, 0xaa2: 0xe00d, 0xaa3: 0x0008, + 0xaa4: 0xe00d, 0xaa5: 0x0008, 0xaa6: 0xe00d, 0xaa7: 0x0008, 0xaa8: 0xe00d, 0xaa9: 0x0008, + 0xaaa: 0xe00d, 0xaab: 0x0008, 0xaac: 0xe00d, 0xaad: 0x0008, 0xaae: 0xe00d, 0xaaf: 0x0008, + 0xab0: 0xe00d, 0xab1: 0x0008, 0xab2: 0xe00d, 0xab3: 0x0008, 0xab4: 0xe00d, 0xab5: 0x0008, + 0xab6: 0xe00d, 0xab7: 0x0008, 0xab8: 0xe00d, 0xab9: 0x0008, 0xaba: 0xe00d, 0xabb: 0x0008, + 0xabc: 0xe00d, 0xabd: 0x0008, 0xabe: 0xe00d, 0xabf: 0x0008, // Block 0x2b, offset 0xac0 - 0xac0: 0x1501, 0xac1: 0x1531, 0xac2: 0x1561, 0xac3: 0x1591, 0xac4: 0x15c1, 0xac5: 0x15f1, - 0xac6: 0x1621, 0xac7: 0x1651, 0xac8: 0x1501, 0xac9: 0x1531, 0xaca: 0x1561, 0xacb: 0x1591, - 0xacc: 0x15c1, 0xacd: 0x15f1, 0xace: 0x1621, 0xacf: 0x1651, 0xad0: 0x1681, 0xad1: 0x16b1, - 0xad2: 0x16e1, 0xad3: 0x1711, 0xad4: 0x1741, 0xad5: 0x1771, 0xad6: 0x17a1, 0xad7: 0x17d1, - 0xad8: 0x1681, 0xad9: 0x16b1, 0xada: 0x16e1, 0xadb: 0x1711, 0xadc: 0x1741, 0xadd: 0x1771, - 0xade: 0x17a1, 0xadf: 0x17d1, 0xae0: 0x1801, 0xae1: 0x1831, 0xae2: 0x1861, 0xae3: 0x1891, - 0xae4: 0x18c1, 0xae5: 0x18f1, 0xae6: 0x1921, 0xae7: 0x1951, 0xae8: 0x1801, 0xae9: 0x1831, - 0xaea: 0x1861, 0xaeb: 0x1891, 0xaec: 0x18c1, 0xaed: 0x18f1, 0xaee: 0x1921, 0xaef: 0x1951, - 0xaf0: 0x0008, 0xaf1: 0x0008, 0xaf2: 0x1981, 0xaf3: 0x19b1, 0xaf4: 0x19d9, 0xaf5: 0x0040, - 0xaf6: 0x0008, 0xaf7: 0x1a01, 0xaf8: 0xe045, 0xaf9: 0xe045, 0xafa: 0x064d, 0xafb: 0x1459, - 0xafc: 0x19b1, 0xafd: 0x0666, 0xafe: 0x1a31, 0xaff: 0x0686, + 0xac0: 0xe00d, 0xac1: 0x0008, 0xac2: 0xe00d, 0xac3: 0x0008, 0xac4: 0xe00d, 0xac5: 0x0008, + 0xac6: 0xe00d, 0xac7: 0x0008, 0xac8: 0xe00d, 0xac9: 0x0008, 0xaca: 0xe00d, 0xacb: 0x0008, + 0xacc: 0xe00d, 0xacd: 0x0008, 0xace: 0xe00d, 0xacf: 0x0008, 0xad0: 0xe00d, 0xad1: 0x0008, + 0xad2: 0xe00d, 0xad3: 0x0008, 0xad4: 0xe00d, 0xad5: 0x0008, 0xad6: 0x0008, 0xad7: 0x0008, + 0xad8: 0x0008, 0xad9: 0x0008, 0xada: 0x0615, 0xadb: 0x0635, 0xadc: 0x0008, 0xadd: 0x0008, + 0xade: 0x1441, 0xadf: 0x0008, 0xae0: 0xe00d, 0xae1: 0x0008, 0xae2: 0xe00d, 0xae3: 0x0008, + 0xae4: 0xe00d, 0xae5: 0x0008, 0xae6: 0xe00d, 0xae7: 0x0008, 0xae8: 0xe00d, 0xae9: 0x0008, + 0xaea: 0xe00d, 0xaeb: 0x0008, 0xaec: 0xe00d, 0xaed: 0x0008, 0xaee: 0xe00d, 0xaef: 0x0008, + 0xaf0: 0xe00d, 0xaf1: 0x0008, 0xaf2: 0xe00d, 0xaf3: 0x0008, 0xaf4: 0xe00d, 0xaf5: 0x0008, + 0xaf6: 0xe00d, 0xaf7: 0x0008, 0xaf8: 0xe00d, 0xaf9: 0x0008, 0xafa: 0xe00d, 0xafb: 0x0008, + 0xafc: 0xe00d, 0xafd: 0x0008, 0xafe: 0xe00d, 0xaff: 0x0008, // Block 0x2c, offset 0xb00 - 0xb00: 0x06a6, 0xb01: 0x1a4a, 0xb02: 0x1a79, 0xb03: 0x1aa9, 0xb04: 0x1ad1, 0xb05: 0x0040, - 0xb06: 0x0008, 0xb07: 0x1af9, 0xb08: 0x06c5, 0xb09: 0x1471, 0xb0a: 0x06dd, 0xb0b: 0x1489, - 0xb0c: 0x1aa9, 0xb0d: 0x1b2a, 0xb0e: 0x1b5a, 0xb0f: 0x1b8a, 0xb10: 0x0008, 0xb11: 0x0008, - 0xb12: 0x0008, 0xb13: 0x1bb9, 0xb14: 0x0040, 0xb15: 0x0040, 0xb16: 0x0008, 0xb17: 0x0008, - 0xb18: 0xe045, 0xb19: 0xe045, 0xb1a: 0x06f5, 0xb1b: 0x14a1, 0xb1c: 0x0040, 0xb1d: 0x1bd2, - 0xb1e: 0x1c02, 0xb1f: 0x1c32, 0xb20: 0x0008, 0xb21: 0x0008, 0xb22: 0x0008, 0xb23: 0x1c61, + 0xb00: 0x0008, 0xb01: 0x0008, 0xb02: 0x0008, 0xb03: 0x0008, 0xb04: 0x0008, 0xb05: 0x0008, + 0xb06: 0x0040, 0xb07: 0x0040, 0xb08: 0xe045, 0xb09: 0xe045, 0xb0a: 0xe045, 0xb0b: 0xe045, + 0xb0c: 0xe045, 0xb0d: 0xe045, 0xb0e: 0x0040, 0xb0f: 0x0040, 0xb10: 0x0008, 0xb11: 0x0008, + 0xb12: 0x0008, 0xb13: 0x0008, 0xb14: 0x0008, 0xb15: 0x0008, 0xb16: 0x0008, 0xb17: 0x0008, + 0xb18: 0x0040, 0xb19: 0xe045, 0xb1a: 0x0040, 0xb1b: 0xe045, 0xb1c: 0x0040, 0xb1d: 0xe045, + 0xb1e: 0x0040, 0xb1f: 0xe045, 0xb20: 0x0008, 0xb21: 0x0008, 0xb22: 0x0008, 0xb23: 0x0008, 0xb24: 0x0008, 0xb25: 0x0008, 0xb26: 0x0008, 0xb27: 0x0008, 0xb28: 0xe045, 0xb29: 0xe045, - 0xb2a: 0x070d, 0xb2b: 0x14d1, 0xb2c: 0xe04d, 0xb2d: 0x1c7a, 0xb2e: 0x03d2, 0xb2f: 0x1caa, - 0xb30: 0x0040, 0xb31: 0x0040, 0xb32: 0x1cb9, 0xb33: 0x1ce9, 0xb34: 0x1d11, 0xb35: 0x0040, - 0xb36: 0x0008, 0xb37: 0x1d39, 0xb38: 0x0725, 0xb39: 0x14b9, 0xb3a: 0x0515, 0xb3b: 0x14e9, - 0xb3c: 0x1ce9, 0xb3d: 0x073e, 0xb3e: 0x075e, 0xb3f: 0x0040, + 0xb2a: 0xe045, 0xb2b: 0xe045, 0xb2c: 0xe045, 0xb2d: 0xe045, 0xb2e: 0xe045, 0xb2f: 0xe045, + 0xb30: 0x0008, 0xb31: 0x1459, 0xb32: 0x0008, 0xb33: 0x1471, 0xb34: 0x0008, 0xb35: 0x1489, + 0xb36: 0x0008, 0xb37: 0x14a1, 0xb38: 0x0008, 0xb39: 0x14b9, 0xb3a: 0x0008, 0xb3b: 0x14d1, + 0xb3c: 0x0008, 0xb3d: 0x14e9, 0xb3e: 0x0040, 0xb3f: 0x0040, // Block 0x2d, offset 0xb40 - 0xb40: 0x000a, 0xb41: 0x000a, 0xb42: 0x000a, 0xb43: 0x000a, 0xb44: 0x000a, 0xb45: 0x000a, - 0xb46: 0x000a, 0xb47: 0x000a, 0xb48: 0x000a, 0xb49: 0x000a, 0xb4a: 0x000a, 0xb4b: 0x03c0, - 0xb4c: 0x0003, 0xb4d: 0x0003, 0xb4e: 0x0340, 0xb4f: 0x0340, 0xb50: 0x0018, 0xb51: 0xe00d, - 0xb52: 0x0018, 0xb53: 0x0018, 0xb54: 0x0018, 0xb55: 0x0018, 0xb56: 0x0018, 0xb57: 0x077e, - 0xb58: 0x0018, 0xb59: 0x0018, 0xb5a: 0x0018, 0xb5b: 0x0018, 0xb5c: 0x0018, 0xb5d: 0x0018, - 0xb5e: 0x0018, 0xb5f: 0x0018, 0xb60: 0x0018, 0xb61: 0x0018, 0xb62: 0x0018, 0xb63: 0x0018, - 0xb64: 0x0040, 0xb65: 0x0040, 0xb66: 0x0040, 0xb67: 0x0018, 0xb68: 0x0040, 0xb69: 0x0040, - 0xb6a: 0x0340, 0xb6b: 0x0340, 0xb6c: 0x0340, 0xb6d: 0x0340, 0xb6e: 0x0340, 0xb6f: 0x000a, - 0xb70: 0x0018, 0xb71: 0x0018, 0xb72: 0x0018, 0xb73: 0x1d69, 0xb74: 0x1da1, 0xb75: 0x0018, - 0xb76: 0x1df1, 0xb77: 0x1e29, 0xb78: 0x0018, 0xb79: 0x0018, 0xb7a: 0x0018, 0xb7b: 0x0018, - 0xb7c: 0x1e7a, 0xb7d: 0x0018, 0xb7e: 0x079e, 0xb7f: 0x0018, + 0xb40: 0x1501, 0xb41: 0x1531, 0xb42: 0x1561, 0xb43: 0x1591, 0xb44: 0x15c1, 0xb45: 0x15f1, + 0xb46: 0x1621, 0xb47: 0x1651, 0xb48: 0x1501, 0xb49: 0x1531, 0xb4a: 0x1561, 0xb4b: 0x1591, + 0xb4c: 0x15c1, 0xb4d: 0x15f1, 0xb4e: 0x1621, 0xb4f: 0x1651, 0xb50: 0x1681, 0xb51: 0x16b1, + 0xb52: 0x16e1, 0xb53: 0x1711, 0xb54: 0x1741, 0xb55: 0x1771, 0xb56: 0x17a1, 0xb57: 0x17d1, + 0xb58: 0x1681, 0xb59: 0x16b1, 0xb5a: 0x16e1, 0xb5b: 0x1711, 0xb5c: 0x1741, 0xb5d: 0x1771, + 0xb5e: 0x17a1, 0xb5f: 0x17d1, 0xb60: 0x1801, 0xb61: 0x1831, 0xb62: 0x1861, 0xb63: 0x1891, + 0xb64: 0x18c1, 0xb65: 0x18f1, 0xb66: 0x1921, 0xb67: 0x1951, 0xb68: 0x1801, 0xb69: 0x1831, + 0xb6a: 0x1861, 0xb6b: 0x1891, 0xb6c: 0x18c1, 0xb6d: 0x18f1, 0xb6e: 0x1921, 0xb6f: 0x1951, + 0xb70: 0x0008, 0xb71: 0x0008, 0xb72: 0x1981, 0xb73: 0x19b1, 0xb74: 0x19d9, 0xb75: 0x0040, + 0xb76: 0x0008, 0xb77: 0x1a01, 0xb78: 0xe045, 0xb79: 0xe045, 0xb7a: 0x064d, 0xb7b: 0x1459, + 0xb7c: 0x19b1, 0xb7d: 0x0666, 0xb7e: 0x1a31, 0xb7f: 0x0686, // Block 0x2e, offset 0xb80 - 0xb80: 0x0018, 0xb81: 0x0018, 0xb82: 0x0018, 0xb83: 0x0018, 0xb84: 0x0018, 0xb85: 0x0018, - 0xb86: 0x0018, 0xb87: 0x1e92, 0xb88: 0x1eaa, 0xb89: 0x1ec2, 0xb8a: 0x0018, 0xb8b: 0x0018, - 0xb8c: 0x0018, 0xb8d: 0x0018, 0xb8e: 0x0018, 0xb8f: 0x0018, 0xb90: 0x0018, 0xb91: 0x0018, - 0xb92: 0x0018, 0xb93: 0x0018, 0xb94: 0x0018, 0xb95: 0x0018, 0xb96: 0x0018, 0xb97: 0x1ed9, - 0xb98: 0x0018, 0xb99: 0x0018, 0xb9a: 0x0018, 0xb9b: 0x0018, 0xb9c: 0x0018, 0xb9d: 0x0018, - 0xb9e: 0x0018, 0xb9f: 0x000a, 0xba0: 0x03c0, 0xba1: 0x0340, 0xba2: 0x0340, 0xba3: 0x0340, - 0xba4: 0x03c0, 0xba5: 0x0040, 0xba6: 0x0040, 0xba7: 0x0040, 0xba8: 0x0040, 0xba9: 0x0040, - 0xbaa: 0x0340, 0xbab: 0x0340, 0xbac: 0x0340, 0xbad: 0x0340, 0xbae: 0x0340, 0xbaf: 0x0340, - 0xbb0: 0x1f41, 0xbb1: 0x0f41, 0xbb2: 0x0040, 0xbb3: 0x0040, 0xbb4: 0x1f51, 0xbb5: 0x1f61, - 0xbb6: 0x1f71, 0xbb7: 0x1f81, 0xbb8: 0x1f91, 0xbb9: 0x1fa1, 0xbba: 0x1fb2, 0xbbb: 0x07bd, - 0xbbc: 0x1fc2, 0xbbd: 0x1fd2, 0xbbe: 0x1fe2, 0xbbf: 0x0f71, + 0xb80: 0x06a6, 0xb81: 0x1a4a, 0xb82: 0x1a79, 0xb83: 0x1aa9, 0xb84: 0x1ad1, 0xb85: 0x0040, + 0xb86: 0x0008, 0xb87: 0x1af9, 0xb88: 0x06c5, 0xb89: 0x1471, 0xb8a: 0x06dd, 0xb8b: 0x1489, + 0xb8c: 0x1aa9, 0xb8d: 0x1b2a, 0xb8e: 0x1b5a, 0xb8f: 0x1b8a, 0xb90: 0x0008, 0xb91: 0x0008, + 0xb92: 0x0008, 0xb93: 0x1bb9, 0xb94: 0x0040, 0xb95: 0x0040, 0xb96: 0x0008, 0xb97: 0x0008, + 0xb98: 0xe045, 0xb99: 0xe045, 0xb9a: 0x06f5, 0xb9b: 0x14a1, 0xb9c: 0x0040, 0xb9d: 0x1bd2, + 0xb9e: 0x1c02, 0xb9f: 0x1c32, 0xba0: 0x0008, 0xba1: 0x0008, 0xba2: 0x0008, 0xba3: 0x1c61, + 0xba4: 0x0008, 0xba5: 0x0008, 0xba6: 0x0008, 0xba7: 0x0008, 0xba8: 0xe045, 0xba9: 0xe045, + 0xbaa: 0x070d, 0xbab: 0x14d1, 0xbac: 0xe04d, 0xbad: 0x1c7a, 0xbae: 0x03d2, 0xbaf: 0x1caa, + 0xbb0: 0x0040, 0xbb1: 0x0040, 0xbb2: 0x1cb9, 0xbb3: 0x1ce9, 0xbb4: 0x1d11, 0xbb5: 0x0040, + 0xbb6: 0x0008, 0xbb7: 0x1d39, 0xbb8: 0x0725, 0xbb9: 0x14b9, 0xbba: 0x0515, 0xbbb: 0x14e9, + 0xbbc: 0x1ce9, 0xbbd: 0x073e, 0xbbe: 0x075e, 0xbbf: 0x0040, // Block 0x2f, offset 0xbc0 - 0xbc0: 0x1f41, 0xbc1: 0x00c9, 0xbc2: 0x0069, 0xbc3: 0x0079, 0xbc4: 0x1f51, 0xbc5: 0x1f61, - 0xbc6: 0x1f71, 0xbc7: 0x1f81, 0xbc8: 0x1f91, 0xbc9: 0x1fa1, 0xbca: 0x1fb2, 0xbcb: 0x07d5, - 0xbcc: 0x1fc2, 0xbcd: 0x1fd2, 0xbce: 0x1fe2, 0xbcf: 0x0040, 0xbd0: 0x0039, 0xbd1: 0x0f09, - 0xbd2: 0x00d9, 0xbd3: 0x0369, 0xbd4: 0x0ff9, 0xbd5: 0x0249, 0xbd6: 0x0f51, 0xbd7: 0x0359, - 0xbd8: 0x0f61, 0xbd9: 0x0f71, 0xbda: 0x0f99, 0xbdb: 0x01d9, 0xbdc: 0x0fa9, 0xbdd: 0x0040, - 0xbde: 0x0040, 0xbdf: 0x0040, 0xbe0: 0x0018, 0xbe1: 0x0018, 0xbe2: 0x0018, 0xbe3: 0x0018, - 0xbe4: 0x0018, 0xbe5: 0x0018, 0xbe6: 0x0018, 0xbe7: 0x0018, 0xbe8: 0x1ff1, 0xbe9: 0x0018, - 0xbea: 0x0018, 0xbeb: 0x0018, 0xbec: 0x0018, 0xbed: 0x0018, 0xbee: 0x0018, 0xbef: 0x0018, - 0xbf0: 0x0018, 0xbf1: 0x0018, 0xbf2: 0x0018, 0xbf3: 0x0018, 0xbf4: 0x0018, 0xbf5: 0x0018, - 0xbf6: 0x0018, 0xbf7: 0x0018, 0xbf8: 0x0018, 0xbf9: 0x0018, 0xbfa: 0x0018, 0xbfb: 0x0018, - 0xbfc: 0x0018, 0xbfd: 0x0018, 0xbfe: 0x0018, 0xbff: 0x0040, + 0xbc0: 0x000a, 0xbc1: 0x000a, 0xbc2: 0x000a, 0xbc3: 0x000a, 0xbc4: 0x000a, 0xbc5: 0x000a, + 0xbc6: 0x000a, 0xbc7: 0x000a, 0xbc8: 0x000a, 0xbc9: 0x000a, 0xbca: 0x000a, 0xbcb: 0x03c0, + 0xbcc: 0x0003, 0xbcd: 0x0003, 0xbce: 0x0340, 0xbcf: 0x0b40, 0xbd0: 0x0018, 0xbd1: 0xe00d, + 0xbd2: 0x0018, 0xbd3: 0x0018, 0xbd4: 0x0018, 0xbd5: 0x0018, 0xbd6: 0x0018, 0xbd7: 0x077e, + 0xbd8: 0x0018, 0xbd9: 0x0018, 0xbda: 0x0018, 0xbdb: 0x0018, 0xbdc: 0x0018, 0xbdd: 0x0018, + 0xbde: 0x0018, 0xbdf: 0x0018, 0xbe0: 0x0018, 0xbe1: 0x0018, 0xbe2: 0x0018, 0xbe3: 0x0018, + 0xbe4: 0x0040, 0xbe5: 0x0040, 0xbe6: 0x0040, 0xbe7: 0x0018, 0xbe8: 0x0040, 0xbe9: 0x0040, + 0xbea: 0x0340, 0xbeb: 0x0340, 0xbec: 0x0340, 0xbed: 0x0340, 0xbee: 0x0340, 0xbef: 0x000a, + 0xbf0: 0x0018, 0xbf1: 0x0018, 0xbf2: 0x0018, 0xbf3: 0x1d69, 0xbf4: 0x1da1, 0xbf5: 0x0018, + 0xbf6: 0x1df1, 0xbf7: 0x1e29, 0xbf8: 0x0018, 0xbf9: 0x0018, 0xbfa: 0x0018, 0xbfb: 0x0018, + 0xbfc: 0x1e7a, 0xbfd: 0x0018, 0xbfe: 0x079e, 0xbff: 0x0018, // Block 0x30, offset 0xc00 - 0xc00: 0x07ee, 0xc01: 0x080e, 0xc02: 0x1159, 0xc03: 0x082d, 0xc04: 0x0018, 0xc05: 0x084e, - 0xc06: 0x086e, 0xc07: 0x1011, 0xc08: 0x0018, 0xc09: 0x088d, 0xc0a: 0x0f31, 0xc0b: 0x0249, - 0xc0c: 0x0249, 0xc0d: 0x0249, 0xc0e: 0x0249, 0xc0f: 0x2009, 0xc10: 0x0f41, 0xc11: 0x0f41, - 0xc12: 0x0359, 0xc13: 0x0359, 0xc14: 0x0018, 0xc15: 0x0f71, 0xc16: 0x2021, 0xc17: 0x0018, - 0xc18: 0x0018, 0xc19: 0x0f99, 0xc1a: 0x2039, 0xc1b: 0x0269, 0xc1c: 0x0269, 0xc1d: 0x0269, - 0xc1e: 0x0018, 0xc1f: 0x0018, 0xc20: 0x2049, 0xc21: 0x08ad, 0xc22: 0x2061, 0xc23: 0x0018, - 0xc24: 0x13d1, 0xc25: 0x0018, 0xc26: 0x2079, 0xc27: 0x0018, 0xc28: 0x13d1, 0xc29: 0x0018, - 0xc2a: 0x0f51, 0xc2b: 0x2091, 0xc2c: 0x0ee9, 0xc2d: 0x1159, 0xc2e: 0x0018, 0xc2f: 0x0f09, - 0xc30: 0x0f09, 0xc31: 0x1199, 0xc32: 0x0040, 0xc33: 0x0f61, 0xc34: 0x00d9, 0xc35: 0x20a9, - 0xc36: 0x20c1, 0xc37: 0x20d9, 0xc38: 0x20f1, 0xc39: 0x0f41, 0xc3a: 0x0018, 0xc3b: 0x08cd, - 0xc3c: 0x2109, 0xc3d: 0x10b1, 0xc3e: 0x10b1, 0xc3f: 0x2109, + 0xc00: 0x0018, 0xc01: 0x0018, 0xc02: 0x0018, 0xc03: 0x0018, 0xc04: 0x0018, 0xc05: 0x0018, + 0xc06: 0x0018, 0xc07: 0x1e92, 0xc08: 0x1eaa, 0xc09: 0x1ec2, 0xc0a: 0x0018, 0xc0b: 0x0018, + 0xc0c: 0x0018, 0xc0d: 0x0018, 0xc0e: 0x0018, 0xc0f: 0x0018, 0xc10: 0x0018, 0xc11: 0x0018, + 0xc12: 0x0018, 0xc13: 0x0018, 0xc14: 0x0018, 0xc15: 0x0018, 0xc16: 0x0018, 0xc17: 0x1ed9, + 0xc18: 0x0018, 0xc19: 0x0018, 0xc1a: 0x0018, 0xc1b: 0x0018, 0xc1c: 0x0018, 0xc1d: 0x0018, + 0xc1e: 0x0018, 0xc1f: 0x000a, 0xc20: 0x03c0, 0xc21: 0x0340, 0xc22: 0x0340, 0xc23: 0x0340, + 0xc24: 0x03c0, 0xc25: 0x0040, 0xc26: 0x0040, 0xc27: 0x0040, 0xc28: 0x0040, 0xc29: 0x0040, + 0xc2a: 0x0340, 0xc2b: 0x0340, 0xc2c: 0x0340, 0xc2d: 0x0340, 0xc2e: 0x0340, 0xc2f: 0x0340, + 0xc30: 0x1f41, 0xc31: 0x0f41, 0xc32: 0x0040, 0xc33: 0x0040, 0xc34: 0x1f51, 0xc35: 0x1f61, + 0xc36: 0x1f71, 0xc37: 0x1f81, 0xc38: 0x1f91, 0xc39: 0x1fa1, 0xc3a: 0x1fb2, 0xc3b: 0x07bd, + 0xc3c: 0x1fc2, 0xc3d: 0x1fd2, 0xc3e: 0x1fe2, 0xc3f: 0x0f71, // Block 0x31, offset 0xc40 - 0xc40: 0x08ed, 0xc41: 0x0018, 0xc42: 0x0018, 0xc43: 0x0018, 0xc44: 0x0018, 0xc45: 0x0ef9, - 0xc46: 0x0ef9, 0xc47: 0x0f09, 0xc48: 0x0f41, 0xc49: 0x0259, 0xc4a: 0x0018, 0xc4b: 0x0018, - 0xc4c: 0x0018, 0xc4d: 0x0018, 0xc4e: 0x0008, 0xc4f: 0x0018, 0xc50: 0x2121, 0xc51: 0x2151, - 0xc52: 0x2181, 0xc53: 0x21b9, 0xc54: 0x21e9, 0xc55: 0x2219, 0xc56: 0x2249, 0xc57: 0x2279, - 0xc58: 0x22a9, 0xc59: 0x22d9, 0xc5a: 0x2309, 0xc5b: 0x2339, 0xc5c: 0x2369, 0xc5d: 0x2399, - 0xc5e: 0x23c9, 0xc5f: 0x23f9, 0xc60: 0x0f41, 0xc61: 0x2421, 0xc62: 0x0905, 0xc63: 0x2439, - 0xc64: 0x1089, 0xc65: 0x2451, 0xc66: 0x0925, 0xc67: 0x2469, 0xc68: 0x2491, 0xc69: 0x0369, - 0xc6a: 0x24a9, 0xc6b: 0x0945, 0xc6c: 0x0359, 0xc6d: 0x1159, 0xc6e: 0x0ef9, 0xc6f: 0x0f61, - 0xc70: 0x0f41, 0xc71: 0x2421, 0xc72: 0x0965, 0xc73: 0x2439, 0xc74: 0x1089, 0xc75: 0x2451, - 0xc76: 0x0985, 0xc77: 0x2469, 0xc78: 0x2491, 0xc79: 0x0369, 0xc7a: 0x24a9, 0xc7b: 0x09a5, - 0xc7c: 0x0359, 0xc7d: 0x1159, 0xc7e: 0x0ef9, 0xc7f: 0x0f61, + 0xc40: 0x1f41, 0xc41: 0x00c9, 0xc42: 0x0069, 0xc43: 0x0079, 0xc44: 0x1f51, 0xc45: 0x1f61, + 0xc46: 0x1f71, 0xc47: 0x1f81, 0xc48: 0x1f91, 0xc49: 0x1fa1, 0xc4a: 0x1fb2, 0xc4b: 0x07d5, + 0xc4c: 0x1fc2, 0xc4d: 0x1fd2, 0xc4e: 0x1fe2, 0xc4f: 0x0040, 0xc50: 0x0039, 0xc51: 0x0f09, + 0xc52: 0x00d9, 0xc53: 0x0369, 0xc54: 0x0ff9, 0xc55: 0x0249, 0xc56: 0x0f51, 0xc57: 0x0359, + 0xc58: 0x0f61, 0xc59: 0x0f71, 0xc5a: 0x0f99, 0xc5b: 0x01d9, 0xc5c: 0x0fa9, 0xc5d: 0x0040, + 0xc5e: 0x0040, 0xc5f: 0x0040, 0xc60: 0x0018, 0xc61: 0x0018, 0xc62: 0x0018, 0xc63: 0x0018, + 0xc64: 0x0018, 0xc65: 0x0018, 0xc66: 0x0018, 0xc67: 0x0018, 0xc68: 0x1ff1, 0xc69: 0x0018, + 0xc6a: 0x0018, 0xc6b: 0x0018, 0xc6c: 0x0018, 0xc6d: 0x0018, 0xc6e: 0x0018, 0xc6f: 0x0018, + 0xc70: 0x0018, 0xc71: 0x0018, 0xc72: 0x0018, 0xc73: 0x0018, 0xc74: 0x0018, 0xc75: 0x0018, + 0xc76: 0x0018, 0xc77: 0x0018, 0xc78: 0x0018, 0xc79: 0x0018, 0xc7a: 0x0018, 0xc7b: 0x0018, + 0xc7c: 0x0018, 0xc7d: 0x0018, 0xc7e: 0x0018, 0xc7f: 0x0018, // Block 0x32, offset 0xc80 - 0xc80: 0x0018, 0xc81: 0x0018, 0xc82: 0x0018, 0xc83: 0x0018, 0xc84: 0x0018, 0xc85: 0x0018, - 0xc86: 0x0018, 0xc87: 0x0018, 0xc88: 0x0018, 0xc89: 0x0018, 0xc8a: 0x0018, 0xc8b: 0x0040, - 0xc8c: 0x0040, 0xc8d: 0x0040, 0xc8e: 0x0040, 0xc8f: 0x0040, 0xc90: 0x0040, 0xc91: 0x0040, - 0xc92: 0x0040, 0xc93: 0x0040, 0xc94: 0x0040, 0xc95: 0x0040, 0xc96: 0x0040, 0xc97: 0x0040, - 0xc98: 0x0040, 0xc99: 0x0040, 0xc9a: 0x0040, 0xc9b: 0x0040, 0xc9c: 0x0040, 0xc9d: 0x0040, - 0xc9e: 0x0040, 0xc9f: 0x0040, 0xca0: 0x00c9, 0xca1: 0x0069, 0xca2: 0x0079, 0xca3: 0x1f51, - 0xca4: 0x1f61, 0xca5: 0x1f71, 0xca6: 0x1f81, 0xca7: 0x1f91, 0xca8: 0x1fa1, 0xca9: 0x2601, - 0xcaa: 0x2619, 0xcab: 0x2631, 0xcac: 0x2649, 0xcad: 0x2661, 0xcae: 0x2679, 0xcaf: 0x2691, - 0xcb0: 0x26a9, 0xcb1: 0x26c1, 0xcb2: 0x26d9, 0xcb3: 0x26f1, 0xcb4: 0x0a06, 0xcb5: 0x0a26, - 0xcb6: 0x0a46, 0xcb7: 0x0a66, 0xcb8: 0x0a86, 0xcb9: 0x0aa6, 0xcba: 0x0ac6, 0xcbb: 0x0ae6, - 0xcbc: 0x0b06, 0xcbd: 0x270a, 0xcbe: 0x2732, 0xcbf: 0x275a, + 0xc80: 0x07ee, 0xc81: 0x080e, 0xc82: 0x1159, 0xc83: 0x082d, 0xc84: 0x0018, 0xc85: 0x084e, + 0xc86: 0x086e, 0xc87: 0x1011, 0xc88: 0x0018, 0xc89: 0x088d, 0xc8a: 0x0f31, 0xc8b: 0x0249, + 0xc8c: 0x0249, 0xc8d: 0x0249, 0xc8e: 0x0249, 0xc8f: 0x2009, 0xc90: 0x0f41, 0xc91: 0x0f41, + 0xc92: 0x0359, 0xc93: 0x0359, 0xc94: 0x0018, 0xc95: 0x0f71, 0xc96: 0x2021, 0xc97: 0x0018, + 0xc98: 0x0018, 0xc99: 0x0f99, 0xc9a: 0x2039, 0xc9b: 0x0269, 0xc9c: 0x0269, 0xc9d: 0x0269, + 0xc9e: 0x0018, 0xc9f: 0x0018, 0xca0: 0x2049, 0xca1: 0x08ad, 0xca2: 0x2061, 0xca3: 0x0018, + 0xca4: 0x13d1, 0xca5: 0x0018, 0xca6: 0x2079, 0xca7: 0x0018, 0xca8: 0x13d1, 0xca9: 0x0018, + 0xcaa: 0x0f51, 0xcab: 0x2091, 0xcac: 0x0ee9, 0xcad: 0x1159, 0xcae: 0x0018, 0xcaf: 0x0f09, + 0xcb0: 0x0f09, 0xcb1: 0x1199, 0xcb2: 0x0040, 0xcb3: 0x0f61, 0xcb4: 0x00d9, 0xcb5: 0x20a9, + 0xcb6: 0x20c1, 0xcb7: 0x20d9, 0xcb8: 0x20f1, 0xcb9: 0x0f41, 0xcba: 0x0018, 0xcbb: 0x08cd, + 0xcbc: 0x2109, 0xcbd: 0x10b1, 0xcbe: 0x10b1, 0xcbf: 0x2109, // Block 0x33, offset 0xcc0 - 0xcc0: 0x2782, 0xcc1: 0x27aa, 0xcc2: 0x27d2, 0xcc3: 0x27fa, 0xcc4: 0x2822, 0xcc5: 0x284a, - 0xcc6: 0x2872, 0xcc7: 0x289a, 0xcc8: 0x0040, 0xcc9: 0x0040, 0xcca: 0x0040, 0xccb: 0x0040, - 0xccc: 0x0040, 0xccd: 0x0040, 0xcce: 0x0040, 0xccf: 0x0040, 0xcd0: 0x0040, 0xcd1: 0x0040, - 0xcd2: 0x0040, 0xcd3: 0x0040, 0xcd4: 0x0040, 0xcd5: 0x0040, 0xcd6: 0x0040, 0xcd7: 0x0040, - 0xcd8: 0x0040, 0xcd9: 0x0040, 0xcda: 0x0040, 0xcdb: 0x0040, 0xcdc: 0x0b26, 0xcdd: 0x0b46, - 0xcde: 0x0b66, 0xcdf: 0x0b86, 0xce0: 0x0ba6, 0xce1: 0x0bc6, 0xce2: 0x0be6, 0xce3: 0x0c06, - 0xce4: 0x0c26, 0xce5: 0x0c46, 0xce6: 0x0c66, 0xce7: 0x0c86, 0xce8: 0x0ca6, 0xce9: 0x0cc6, - 0xcea: 0x0ce6, 0xceb: 0x0d06, 0xcec: 0x0d26, 0xced: 0x0d46, 0xcee: 0x0d66, 0xcef: 0x0d86, - 0xcf0: 0x0da6, 0xcf1: 0x0dc6, 0xcf2: 0x0de6, 0xcf3: 0x0e06, 0xcf4: 0x0e26, 0xcf5: 0x0e46, - 0xcf6: 0x0039, 0xcf7: 0x0ee9, 0xcf8: 0x1159, 0xcf9: 0x0ef9, 0xcfa: 0x0f09, 0xcfb: 0x1199, - 0xcfc: 0x0f31, 0xcfd: 0x0249, 0xcfe: 0x0f41, 0xcff: 0x0259, + 0xcc0: 0x08ed, 0xcc1: 0x0018, 0xcc2: 0x0018, 0xcc3: 0x0018, 0xcc4: 0x0018, 0xcc5: 0x0ef9, + 0xcc6: 0x0ef9, 0xcc7: 0x0f09, 0xcc8: 0x0f41, 0xcc9: 0x0259, 0xcca: 0x0018, 0xccb: 0x0018, + 0xccc: 0x0018, 0xccd: 0x0018, 0xcce: 0x0008, 0xccf: 0x0018, 0xcd0: 0x2121, 0xcd1: 0x2151, + 0xcd2: 0x2181, 0xcd3: 0x21b9, 0xcd4: 0x21e9, 0xcd5: 0x2219, 0xcd6: 0x2249, 0xcd7: 0x2279, + 0xcd8: 0x22a9, 0xcd9: 0x22d9, 0xcda: 0x2309, 0xcdb: 0x2339, 0xcdc: 0x2369, 0xcdd: 0x2399, + 0xcde: 0x23c9, 0xcdf: 0x23f9, 0xce0: 0x0f41, 0xce1: 0x2421, 0xce2: 0x0905, 0xce3: 0x2439, + 0xce4: 0x1089, 0xce5: 0x2451, 0xce6: 0x0925, 0xce7: 0x2469, 0xce8: 0x2491, 0xce9: 0x0369, + 0xcea: 0x24a9, 0xceb: 0x0945, 0xcec: 0x0359, 0xced: 0x1159, 0xcee: 0x0ef9, 0xcef: 0x0f61, + 0xcf0: 0x0f41, 0xcf1: 0x2421, 0xcf2: 0x0965, 0xcf3: 0x2439, 0xcf4: 0x1089, 0xcf5: 0x2451, + 0xcf6: 0x0985, 0xcf7: 0x2469, 0xcf8: 0x2491, 0xcf9: 0x0369, 0xcfa: 0x24a9, 0xcfb: 0x09a5, + 0xcfc: 0x0359, 0xcfd: 0x1159, 0xcfe: 0x0ef9, 0xcff: 0x0f61, // Block 0x34, offset 0xd00 - 0xd00: 0x0f51, 0xd01: 0x0359, 0xd02: 0x0f61, 0xd03: 0x0f71, 0xd04: 0x00d9, 0xd05: 0x0f99, - 0xd06: 0x2039, 0xd07: 0x0269, 0xd08: 0x01d9, 0xd09: 0x0fa9, 0xd0a: 0x0fb9, 0xd0b: 0x1089, - 0xd0c: 0x0279, 0xd0d: 0x0369, 0xd0e: 0x0289, 0xd0f: 0x13d1, 0xd10: 0x0039, 0xd11: 0x0ee9, - 0xd12: 0x1159, 0xd13: 0x0ef9, 0xd14: 0x0f09, 0xd15: 0x1199, 0xd16: 0x0f31, 0xd17: 0x0249, - 0xd18: 0x0f41, 0xd19: 0x0259, 0xd1a: 0x0f51, 0xd1b: 0x0359, 0xd1c: 0x0f61, 0xd1d: 0x0f71, - 0xd1e: 0x00d9, 0xd1f: 0x0f99, 0xd20: 0x2039, 0xd21: 0x0269, 0xd22: 0x01d9, 0xd23: 0x0fa9, - 0xd24: 0x0fb9, 0xd25: 0x1089, 0xd26: 0x0279, 0xd27: 0x0369, 0xd28: 0x0289, 0xd29: 0x13d1, - 0xd2a: 0x1f41, 0xd2b: 0x0018, 0xd2c: 0x0018, 0xd2d: 0x0018, 0xd2e: 0x0018, 0xd2f: 0x0018, - 0xd30: 0x0018, 0xd31: 0x0018, 0xd32: 0x0018, 0xd33: 0x0018, 0xd34: 0x0018, 0xd35: 0x0018, - 0xd36: 0x0018, 0xd37: 0x0018, 0xd38: 0x0018, 0xd39: 0x0018, 0xd3a: 0x0018, 0xd3b: 0x0018, - 0xd3c: 0x0018, 0xd3d: 0x0018, 0xd3e: 0x0018, 0xd3f: 0x0018, + 0xd00: 0x0018, 0xd01: 0x0018, 0xd02: 0x0018, 0xd03: 0x0018, 0xd04: 0x0018, 0xd05: 0x0018, + 0xd06: 0x0018, 0xd07: 0x0018, 0xd08: 0x0018, 0xd09: 0x0018, 0xd0a: 0x0018, 0xd0b: 0x0040, + 0xd0c: 0x0040, 0xd0d: 0x0040, 0xd0e: 0x0040, 0xd0f: 0x0040, 0xd10: 0x0040, 0xd11: 0x0040, + 0xd12: 0x0040, 0xd13: 0x0040, 0xd14: 0x0040, 0xd15: 0x0040, 0xd16: 0x0040, 0xd17: 0x0040, + 0xd18: 0x0040, 0xd19: 0x0040, 0xd1a: 0x0040, 0xd1b: 0x0040, 0xd1c: 0x0040, 0xd1d: 0x0040, + 0xd1e: 0x0040, 0xd1f: 0x0040, 0xd20: 0x00c9, 0xd21: 0x0069, 0xd22: 0x0079, 0xd23: 0x1f51, + 0xd24: 0x1f61, 0xd25: 0x1f71, 0xd26: 0x1f81, 0xd27: 0x1f91, 0xd28: 0x1fa1, 0xd29: 0x2601, + 0xd2a: 0x2619, 0xd2b: 0x2631, 0xd2c: 0x2649, 0xd2d: 0x2661, 0xd2e: 0x2679, 0xd2f: 0x2691, + 0xd30: 0x26a9, 0xd31: 0x26c1, 0xd32: 0x26d9, 0xd33: 0x26f1, 0xd34: 0x0a06, 0xd35: 0x0a26, + 0xd36: 0x0a46, 0xd37: 0x0a66, 0xd38: 0x0a86, 0xd39: 0x0aa6, 0xd3a: 0x0ac6, 0xd3b: 0x0ae6, + 0xd3c: 0x0b06, 0xd3d: 0x270a, 0xd3e: 0x2732, 0xd3f: 0x275a, // Block 0x35, offset 0xd40 - 0xd40: 0x0008, 0xd41: 0x0008, 0xd42: 0x0008, 0xd43: 0x0008, 0xd44: 0x0008, 0xd45: 0x0008, - 0xd46: 0x0008, 0xd47: 0x0008, 0xd48: 0x0008, 0xd49: 0x0008, 0xd4a: 0x0008, 0xd4b: 0x0008, - 0xd4c: 0x0008, 0xd4d: 0x0008, 0xd4e: 0x0008, 0xd4f: 0x0008, 0xd50: 0x0008, 0xd51: 0x0008, - 0xd52: 0x0008, 0xd53: 0x0008, 0xd54: 0x0008, 0xd55: 0x0008, 0xd56: 0x0008, 0xd57: 0x0008, - 0xd58: 0x0008, 0xd59: 0x0008, 0xd5a: 0x0008, 0xd5b: 0x0008, 0xd5c: 0x0008, 0xd5d: 0x0008, - 0xd5e: 0x0008, 0xd5f: 0x0040, 0xd60: 0xe00d, 0xd61: 0x0008, 0xd62: 0x2971, 0xd63: 0x0ebd, - 0xd64: 0x2989, 0xd65: 0x0008, 0xd66: 0x0008, 0xd67: 0xe07d, 0xd68: 0x0008, 0xd69: 0xe01d, - 0xd6a: 0x0008, 0xd6b: 0xe03d, 0xd6c: 0x0008, 0xd6d: 0x0fe1, 0xd6e: 0x1281, 0xd6f: 0x0fc9, - 0xd70: 0x1141, 0xd71: 0x0008, 0xd72: 0xe00d, 0xd73: 0x0008, 0xd74: 0x0008, 0xd75: 0xe01d, - 0xd76: 0x0008, 0xd77: 0x0008, 0xd78: 0x0008, 0xd79: 0x0008, 0xd7a: 0x0008, 0xd7b: 0x0008, - 0xd7c: 0x0259, 0xd7d: 0x1089, 0xd7e: 0x29a1, 0xd7f: 0x29b9, + 0xd40: 0x2782, 0xd41: 0x27aa, 0xd42: 0x27d2, 0xd43: 0x27fa, 0xd44: 0x2822, 0xd45: 0x284a, + 0xd46: 0x2872, 0xd47: 0x289a, 0xd48: 0x0040, 0xd49: 0x0040, 0xd4a: 0x0040, 0xd4b: 0x0040, + 0xd4c: 0x0040, 0xd4d: 0x0040, 0xd4e: 0x0040, 0xd4f: 0x0040, 0xd50: 0x0040, 0xd51: 0x0040, + 0xd52: 0x0040, 0xd53: 0x0040, 0xd54: 0x0040, 0xd55: 0x0040, 0xd56: 0x0040, 0xd57: 0x0040, + 0xd58: 0x0040, 0xd59: 0x0040, 0xd5a: 0x0040, 0xd5b: 0x0040, 0xd5c: 0x0b26, 0xd5d: 0x0b46, + 0xd5e: 0x0b66, 0xd5f: 0x0b86, 0xd60: 0x0ba6, 0xd61: 0x0bc6, 0xd62: 0x0be6, 0xd63: 0x0c06, + 0xd64: 0x0c26, 0xd65: 0x0c46, 0xd66: 0x0c66, 0xd67: 0x0c86, 0xd68: 0x0ca6, 0xd69: 0x0cc6, + 0xd6a: 0x0ce6, 0xd6b: 0x0d06, 0xd6c: 0x0d26, 0xd6d: 0x0d46, 0xd6e: 0x0d66, 0xd6f: 0x0d86, + 0xd70: 0x0da6, 0xd71: 0x0dc6, 0xd72: 0x0de6, 0xd73: 0x0e06, 0xd74: 0x0e26, 0xd75: 0x0e46, + 0xd76: 0x0039, 0xd77: 0x0ee9, 0xd78: 0x1159, 0xd79: 0x0ef9, 0xd7a: 0x0f09, 0xd7b: 0x1199, + 0xd7c: 0x0f31, 0xd7d: 0x0249, 0xd7e: 0x0f41, 0xd7f: 0x0259, // Block 0x36, offset 0xd80 - 0xd80: 0xe00d, 0xd81: 0x0008, 0xd82: 0xe00d, 0xd83: 0x0008, 0xd84: 0xe00d, 0xd85: 0x0008, - 0xd86: 0xe00d, 0xd87: 0x0008, 0xd88: 0xe00d, 0xd89: 0x0008, 0xd8a: 0xe00d, 0xd8b: 0x0008, - 0xd8c: 0xe00d, 0xd8d: 0x0008, 0xd8e: 0xe00d, 0xd8f: 0x0008, 0xd90: 0xe00d, 0xd91: 0x0008, - 0xd92: 0xe00d, 0xd93: 0x0008, 0xd94: 0xe00d, 0xd95: 0x0008, 0xd96: 0xe00d, 0xd97: 0x0008, - 0xd98: 0xe00d, 0xd99: 0x0008, 0xd9a: 0xe00d, 0xd9b: 0x0008, 0xd9c: 0xe00d, 0xd9d: 0x0008, - 0xd9e: 0xe00d, 0xd9f: 0x0008, 0xda0: 0xe00d, 0xda1: 0x0008, 0xda2: 0xe00d, 0xda3: 0x0008, - 0xda4: 0x0008, 0xda5: 0x0018, 0xda6: 0x0018, 0xda7: 0x0018, 0xda8: 0x0018, 0xda9: 0x0018, - 0xdaa: 0x0018, 0xdab: 0xe03d, 0xdac: 0x0008, 0xdad: 0xe01d, 0xdae: 0x0008, 0xdaf: 0x1308, - 0xdb0: 0x1308, 0xdb1: 0x1308, 0xdb2: 0xe00d, 0xdb3: 0x0008, 0xdb4: 0x0040, 0xdb5: 0x0040, - 0xdb6: 0x0040, 0xdb7: 0x0040, 0xdb8: 0x0040, 0xdb9: 0x0018, 0xdba: 0x0018, 0xdbb: 0x0018, + 0xd80: 0x0f51, 0xd81: 0x0359, 0xd82: 0x0f61, 0xd83: 0x0f71, 0xd84: 0x00d9, 0xd85: 0x0f99, + 0xd86: 0x2039, 0xd87: 0x0269, 0xd88: 0x01d9, 0xd89: 0x0fa9, 0xd8a: 0x0fb9, 0xd8b: 0x1089, + 0xd8c: 0x0279, 0xd8d: 0x0369, 0xd8e: 0x0289, 0xd8f: 0x13d1, 0xd90: 0x0039, 0xd91: 0x0ee9, + 0xd92: 0x1159, 0xd93: 0x0ef9, 0xd94: 0x0f09, 0xd95: 0x1199, 0xd96: 0x0f31, 0xd97: 0x0249, + 0xd98: 0x0f41, 0xd99: 0x0259, 0xd9a: 0x0f51, 0xd9b: 0x0359, 0xd9c: 0x0f61, 0xd9d: 0x0f71, + 0xd9e: 0x00d9, 0xd9f: 0x0f99, 0xda0: 0x2039, 0xda1: 0x0269, 0xda2: 0x01d9, 0xda3: 0x0fa9, + 0xda4: 0x0fb9, 0xda5: 0x1089, 0xda6: 0x0279, 0xda7: 0x0369, 0xda8: 0x0289, 0xda9: 0x13d1, + 0xdaa: 0x1f41, 0xdab: 0x0018, 0xdac: 0x0018, 0xdad: 0x0018, 0xdae: 0x0018, 0xdaf: 0x0018, + 0xdb0: 0x0018, 0xdb1: 0x0018, 0xdb2: 0x0018, 0xdb3: 0x0018, 0xdb4: 0x0018, 0xdb5: 0x0018, + 0xdb6: 0x0018, 0xdb7: 0x0018, 0xdb8: 0x0018, 0xdb9: 0x0018, 0xdba: 0x0018, 0xdbb: 0x0018, 0xdbc: 0x0018, 0xdbd: 0x0018, 0xdbe: 0x0018, 0xdbf: 0x0018, // Block 0x37, offset 0xdc0 - 0xdc0: 0x26fd, 0xdc1: 0x271d, 0xdc2: 0x273d, 0xdc3: 0x275d, 0xdc4: 0x277d, 0xdc5: 0x279d, - 0xdc6: 0x27bd, 0xdc7: 0x27dd, 0xdc8: 0x27fd, 0xdc9: 0x281d, 0xdca: 0x283d, 0xdcb: 0x285d, - 0xdcc: 0x287d, 0xdcd: 0x289d, 0xdce: 0x28bd, 0xdcf: 0x28dd, 0xdd0: 0x28fd, 0xdd1: 0x291d, - 0xdd2: 0x293d, 0xdd3: 0x295d, 0xdd4: 0x297d, 0xdd5: 0x299d, 0xdd6: 0x0040, 0xdd7: 0x0040, - 0xdd8: 0x0040, 0xdd9: 0x0040, 0xdda: 0x0040, 0xddb: 0x0040, 0xddc: 0x0040, 0xddd: 0x0040, - 0xdde: 0x0040, 0xddf: 0x0040, 0xde0: 0x0040, 0xde1: 0x0040, 0xde2: 0x0040, 0xde3: 0x0040, - 0xde4: 0x0040, 0xde5: 0x0040, 0xde6: 0x0040, 0xde7: 0x0040, 0xde8: 0x0040, 0xde9: 0x0040, - 0xdea: 0x0040, 0xdeb: 0x0040, 0xdec: 0x0040, 0xded: 0x0040, 0xdee: 0x0040, 0xdef: 0x0040, - 0xdf0: 0x0040, 0xdf1: 0x0040, 0xdf2: 0x0040, 0xdf3: 0x0040, 0xdf4: 0x0040, 0xdf5: 0x0040, - 0xdf6: 0x0040, 0xdf7: 0x0040, 0xdf8: 0x0040, 0xdf9: 0x0040, 0xdfa: 0x0040, 0xdfb: 0x0040, - 0xdfc: 0x0040, 0xdfd: 0x0040, 0xdfe: 0x0040, 0xdff: 0x0040, + 0xdc0: 0x0008, 0xdc1: 0x0008, 0xdc2: 0x0008, 0xdc3: 0x0008, 0xdc4: 0x0008, 0xdc5: 0x0008, + 0xdc6: 0x0008, 0xdc7: 0x0008, 0xdc8: 0x0008, 0xdc9: 0x0008, 0xdca: 0x0008, 0xdcb: 0x0008, + 0xdcc: 0x0008, 0xdcd: 0x0008, 0xdce: 0x0008, 0xdcf: 0x0008, 0xdd0: 0x0008, 0xdd1: 0x0008, + 0xdd2: 0x0008, 0xdd3: 0x0008, 0xdd4: 0x0008, 0xdd5: 0x0008, 0xdd6: 0x0008, 0xdd7: 0x0008, + 0xdd8: 0x0008, 0xdd9: 0x0008, 0xdda: 0x0008, 0xddb: 0x0008, 0xddc: 0x0008, 0xddd: 0x0008, + 0xdde: 0x0008, 0xddf: 0x0040, 0xde0: 0xe00d, 0xde1: 0x0008, 0xde2: 0x2971, 0xde3: 0x0ebd, + 0xde4: 0x2989, 0xde5: 0x0008, 0xde6: 0x0008, 0xde7: 0xe07d, 0xde8: 0x0008, 0xde9: 0xe01d, + 0xdea: 0x0008, 0xdeb: 0xe03d, 0xdec: 0x0008, 0xded: 0x0fe1, 0xdee: 0x1281, 0xdef: 0x0fc9, + 0xdf0: 0x1141, 0xdf1: 0x0008, 0xdf2: 0xe00d, 0xdf3: 0x0008, 0xdf4: 0x0008, 0xdf5: 0xe01d, + 0xdf6: 0x0008, 0xdf7: 0x0008, 0xdf8: 0x0008, 0xdf9: 0x0008, 0xdfa: 0x0008, 0xdfb: 0x0008, + 0xdfc: 0x0259, 0xdfd: 0x1089, 0xdfe: 0x29a1, 0xdff: 0x29b9, // Block 0x38, offset 0xe00 - 0xe00: 0x000a, 0xe01: 0x0018, 0xe02: 0x29d1, 0xe03: 0x0018, 0xe04: 0x0018, 0xe05: 0x0008, - 0xe06: 0x0008, 0xe07: 0x0008, 0xe08: 0x0018, 0xe09: 0x0018, 0xe0a: 0x0018, 0xe0b: 0x0018, - 0xe0c: 0x0018, 0xe0d: 0x0018, 0xe0e: 0x0018, 0xe0f: 0x0018, 0xe10: 0x0018, 0xe11: 0x0018, - 0xe12: 0x0018, 0xe13: 0x0018, 0xe14: 0x0018, 0xe15: 0x0018, 0xe16: 0x0018, 0xe17: 0x0018, - 0xe18: 0x0018, 0xe19: 0x0018, 0xe1a: 0x0018, 0xe1b: 0x0018, 0xe1c: 0x0018, 0xe1d: 0x0018, - 0xe1e: 0x0018, 0xe1f: 0x0018, 0xe20: 0x0018, 0xe21: 0x0018, 0xe22: 0x0018, 0xe23: 0x0018, - 0xe24: 0x0018, 0xe25: 0x0018, 0xe26: 0x0018, 0xe27: 0x0018, 0xe28: 0x0018, 0xe29: 0x0018, - 0xe2a: 0x1308, 0xe2b: 0x1308, 0xe2c: 0x1308, 0xe2d: 0x1308, 0xe2e: 0x1018, 0xe2f: 0x1018, - 0xe30: 0x0018, 0xe31: 0x0018, 0xe32: 0x0018, 0xe33: 0x0018, 0xe34: 0x0018, 0xe35: 0x0018, - 0xe36: 0xe125, 0xe37: 0x0018, 0xe38: 0x29bd, 0xe39: 0x29dd, 0xe3a: 0x29fd, 0xe3b: 0x0018, - 0xe3c: 0x0008, 0xe3d: 0x0018, 0xe3e: 0x0018, 0xe3f: 0x0018, + 0xe00: 0xe00d, 0xe01: 0x0008, 0xe02: 0xe00d, 0xe03: 0x0008, 0xe04: 0xe00d, 0xe05: 0x0008, + 0xe06: 0xe00d, 0xe07: 0x0008, 0xe08: 0xe00d, 0xe09: 0x0008, 0xe0a: 0xe00d, 0xe0b: 0x0008, + 0xe0c: 0xe00d, 0xe0d: 0x0008, 0xe0e: 0xe00d, 0xe0f: 0x0008, 0xe10: 0xe00d, 0xe11: 0x0008, + 0xe12: 0xe00d, 0xe13: 0x0008, 0xe14: 0xe00d, 0xe15: 0x0008, 0xe16: 0xe00d, 0xe17: 0x0008, + 0xe18: 0xe00d, 0xe19: 0x0008, 0xe1a: 0xe00d, 0xe1b: 0x0008, 0xe1c: 0xe00d, 0xe1d: 0x0008, + 0xe1e: 0xe00d, 0xe1f: 0x0008, 0xe20: 0xe00d, 0xe21: 0x0008, 0xe22: 0xe00d, 0xe23: 0x0008, + 0xe24: 0x0008, 0xe25: 0x0018, 0xe26: 0x0018, 0xe27: 0x0018, 0xe28: 0x0018, 0xe29: 0x0018, + 0xe2a: 0x0018, 0xe2b: 0xe03d, 0xe2c: 0x0008, 0xe2d: 0xe01d, 0xe2e: 0x0008, 0xe2f: 0x3308, + 0xe30: 0x3308, 0xe31: 0x3308, 0xe32: 0xe00d, 0xe33: 0x0008, 0xe34: 0x0040, 0xe35: 0x0040, + 0xe36: 0x0040, 0xe37: 0x0040, 0xe38: 0x0040, 0xe39: 0x0018, 0xe3a: 0x0018, 0xe3b: 0x0018, + 0xe3c: 0x0018, 0xe3d: 0x0018, 0xe3e: 0x0018, 0xe3f: 0x0018, // Block 0x39, offset 0xe40 - 0xe40: 0x2b3d, 0xe41: 0x2b5d, 0xe42: 0x2b7d, 0xe43: 0x2b9d, 0xe44: 0x2bbd, 0xe45: 0x2bdd, - 0xe46: 0x2bdd, 0xe47: 0x2bdd, 0xe48: 0x2bfd, 0xe49: 0x2bfd, 0xe4a: 0x2bfd, 0xe4b: 0x2bfd, - 0xe4c: 0x2c1d, 0xe4d: 0x2c1d, 0xe4e: 0x2c1d, 0xe4f: 0x2c3d, 0xe50: 0x2c5d, 0xe51: 0x2c5d, - 0xe52: 0x2a7d, 0xe53: 0x2a7d, 0xe54: 0x2c5d, 0xe55: 0x2c5d, 0xe56: 0x2c7d, 0xe57: 0x2c7d, - 0xe58: 0x2c5d, 0xe59: 0x2c5d, 0xe5a: 0x2a7d, 0xe5b: 0x2a7d, 0xe5c: 0x2c5d, 0xe5d: 0x2c5d, - 0xe5e: 0x2c3d, 0xe5f: 0x2c3d, 0xe60: 0x2c9d, 0xe61: 0x2c9d, 0xe62: 0x2cbd, 0xe63: 0x2cbd, - 0xe64: 0x0040, 0xe65: 0x2cdd, 0xe66: 0x2cfd, 0xe67: 0x2d1d, 0xe68: 0x2d1d, 0xe69: 0x2d3d, - 0xe6a: 0x2d5d, 0xe6b: 0x2d7d, 0xe6c: 0x2d9d, 0xe6d: 0x2dbd, 0xe6e: 0x2ddd, 0xe6f: 0x2dfd, - 0xe70: 0x2e1d, 0xe71: 0x2e3d, 0xe72: 0x2e3d, 0xe73: 0x2e5d, 0xe74: 0x2e7d, 0xe75: 0x2e7d, - 0xe76: 0x2e9d, 0xe77: 0x2ebd, 0xe78: 0x2e5d, 0xe79: 0x2edd, 0xe7a: 0x2efd, 0xe7b: 0x2edd, - 0xe7c: 0x2e5d, 0xe7d: 0x2f1d, 0xe7e: 0x2f3d, 0xe7f: 0x2f5d, + 0xe40: 0x26fd, 0xe41: 0x271d, 0xe42: 0x273d, 0xe43: 0x275d, 0xe44: 0x277d, 0xe45: 0x279d, + 0xe46: 0x27bd, 0xe47: 0x27dd, 0xe48: 0x27fd, 0xe49: 0x281d, 0xe4a: 0x283d, 0xe4b: 0x285d, + 0xe4c: 0x287d, 0xe4d: 0x289d, 0xe4e: 0x28bd, 0xe4f: 0x28dd, 0xe50: 0x28fd, 0xe51: 0x291d, + 0xe52: 0x293d, 0xe53: 0x295d, 0xe54: 0x297d, 0xe55: 0x299d, 0xe56: 0x0040, 0xe57: 0x0040, + 0xe58: 0x0040, 0xe59: 0x0040, 0xe5a: 0x0040, 0xe5b: 0x0040, 0xe5c: 0x0040, 0xe5d: 0x0040, + 0xe5e: 0x0040, 0xe5f: 0x0040, 0xe60: 0x0040, 0xe61: 0x0040, 0xe62: 0x0040, 0xe63: 0x0040, + 0xe64: 0x0040, 0xe65: 0x0040, 0xe66: 0x0040, 0xe67: 0x0040, 0xe68: 0x0040, 0xe69: 0x0040, + 0xe6a: 0x0040, 0xe6b: 0x0040, 0xe6c: 0x0040, 0xe6d: 0x0040, 0xe6e: 0x0040, 0xe6f: 0x0040, + 0xe70: 0x0040, 0xe71: 0x0040, 0xe72: 0x0040, 0xe73: 0x0040, 0xe74: 0x0040, 0xe75: 0x0040, + 0xe76: 0x0040, 0xe77: 0x0040, 0xe78: 0x0040, 0xe79: 0x0040, 0xe7a: 0x0040, 0xe7b: 0x0040, + 0xe7c: 0x0040, 0xe7d: 0x0040, 0xe7e: 0x0040, 0xe7f: 0x0040, // Block 0x3a, offset 0xe80 - 0xe80: 0x2f7d, 0xe81: 0x2f9d, 0xe82: 0x2cfd, 0xe83: 0x2cdd, 0xe84: 0x2fbd, 0xe85: 0x2fdd, - 0xe86: 0x2ffd, 0xe87: 0x301d, 0xe88: 0x303d, 0xe89: 0x305d, 0xe8a: 0x307d, 0xe8b: 0x309d, - 0xe8c: 0x30bd, 0xe8d: 0x30dd, 0xe8e: 0x30fd, 0xe8f: 0x0040, 0xe90: 0x0018, 0xe91: 0x0018, - 0xe92: 0x311d, 0xe93: 0x313d, 0xe94: 0x315d, 0xe95: 0x317d, 0xe96: 0x319d, 0xe97: 0x31bd, - 0xe98: 0x31dd, 0xe99: 0x31fd, 0xe9a: 0x321d, 0xe9b: 0x323d, 0xe9c: 0x315d, 0xe9d: 0x325d, - 0xe9e: 0x327d, 0xe9f: 0x329d, 0xea0: 0x0008, 0xea1: 0x0008, 0xea2: 0x0008, 0xea3: 0x0008, - 0xea4: 0x0008, 0xea5: 0x0008, 0xea6: 0x0008, 0xea7: 0x0008, 0xea8: 0x0008, 0xea9: 0x0008, - 0xeaa: 0x0008, 0xeab: 0x0008, 0xeac: 0x0008, 0xead: 0x0008, 0xeae: 0x0008, 0xeaf: 0x0008, - 0xeb0: 0x0008, 0xeb1: 0x0008, 0xeb2: 0x0008, 0xeb3: 0x0008, 0xeb4: 0x0008, 0xeb5: 0x0008, - 0xeb6: 0x0008, 0xeb7: 0x0008, 0xeb8: 0x0008, 0xeb9: 0x0008, 0xeba: 0x0008, 0xebb: 0x0040, - 0xebc: 0x0040, 0xebd: 0x0040, 0xebe: 0x0040, 0xebf: 0x0040, + 0xe80: 0x000a, 0xe81: 0x0018, 0xe82: 0x29d1, 0xe83: 0x0018, 0xe84: 0x0018, 0xe85: 0x0008, + 0xe86: 0x0008, 0xe87: 0x0008, 0xe88: 0x0018, 0xe89: 0x0018, 0xe8a: 0x0018, 0xe8b: 0x0018, + 0xe8c: 0x0018, 0xe8d: 0x0018, 0xe8e: 0x0018, 0xe8f: 0x0018, 0xe90: 0x0018, 0xe91: 0x0018, + 0xe92: 0x0018, 0xe93: 0x0018, 0xe94: 0x0018, 0xe95: 0x0018, 0xe96: 0x0018, 0xe97: 0x0018, + 0xe98: 0x0018, 0xe99: 0x0018, 0xe9a: 0x0018, 0xe9b: 0x0018, 0xe9c: 0x0018, 0xe9d: 0x0018, + 0xe9e: 0x0018, 0xe9f: 0x0018, 0xea0: 0x0018, 0xea1: 0x0018, 0xea2: 0x0018, 0xea3: 0x0018, + 0xea4: 0x0018, 0xea5: 0x0018, 0xea6: 0x0018, 0xea7: 0x0018, 0xea8: 0x0018, 0xea9: 0x0018, + 0xeaa: 0x3308, 0xeab: 0x3308, 0xeac: 0x3308, 0xead: 0x3308, 0xeae: 0x3018, 0xeaf: 0x3018, + 0xeb0: 0x0018, 0xeb1: 0x0018, 0xeb2: 0x0018, 0xeb3: 0x0018, 0xeb4: 0x0018, 0xeb5: 0x0018, + 0xeb6: 0xe125, 0xeb7: 0x0018, 0xeb8: 0x29bd, 0xeb9: 0x29dd, 0xeba: 0x29fd, 0xebb: 0x0018, + 0xebc: 0x0008, 0xebd: 0x0018, 0xebe: 0x0018, 0xebf: 0x0018, // Block 0x3b, offset 0xec0 - 0xec0: 0x36a2, 0xec1: 0x36d2, 0xec2: 0x3702, 0xec3: 0x3732, 0xec4: 0x32bd, 0xec5: 0x32dd, - 0xec6: 0x32fd, 0xec7: 0x331d, 0xec8: 0x0018, 0xec9: 0x0018, 0xeca: 0x0018, 0xecb: 0x0018, - 0xecc: 0x0018, 0xecd: 0x0018, 0xece: 0x0018, 0xecf: 0x0018, 0xed0: 0x333d, 0xed1: 0x3761, - 0xed2: 0x3779, 0xed3: 0x3791, 0xed4: 0x37a9, 0xed5: 0x37c1, 0xed6: 0x37d9, 0xed7: 0x37f1, - 0xed8: 0x3809, 0xed9: 0x3821, 0xeda: 0x3839, 0xedb: 0x3851, 0xedc: 0x3869, 0xedd: 0x3881, - 0xede: 0x3899, 0xedf: 0x38b1, 0xee0: 0x335d, 0xee1: 0x337d, 0xee2: 0x339d, 0xee3: 0x33bd, - 0xee4: 0x33dd, 0xee5: 0x33dd, 0xee6: 0x33fd, 0xee7: 0x341d, 0xee8: 0x343d, 0xee9: 0x345d, - 0xeea: 0x347d, 0xeeb: 0x349d, 0xeec: 0x34bd, 0xeed: 0x34dd, 0xeee: 0x34fd, 0xeef: 0x351d, - 0xef0: 0x353d, 0xef1: 0x355d, 0xef2: 0x357d, 0xef3: 0x359d, 0xef4: 0x35bd, 0xef5: 0x35dd, - 0xef6: 0x35fd, 0xef7: 0x361d, 0xef8: 0x363d, 0xef9: 0x365d, 0xefa: 0x367d, 0xefb: 0x369d, - 0xefc: 0x38c9, 0xefd: 0x3901, 0xefe: 0x36bd, 0xeff: 0x0018, + 0xec0: 0x2b3d, 0xec1: 0x2b5d, 0xec2: 0x2b7d, 0xec3: 0x2b9d, 0xec4: 0x2bbd, 0xec5: 0x2bdd, + 0xec6: 0x2bdd, 0xec7: 0x2bdd, 0xec8: 0x2bfd, 0xec9: 0x2bfd, 0xeca: 0x2bfd, 0xecb: 0x2bfd, + 0xecc: 0x2c1d, 0xecd: 0x2c1d, 0xece: 0x2c1d, 0xecf: 0x2c3d, 0xed0: 0x2c5d, 0xed1: 0x2c5d, + 0xed2: 0x2a7d, 0xed3: 0x2a7d, 0xed4: 0x2c5d, 0xed5: 0x2c5d, 0xed6: 0x2c7d, 0xed7: 0x2c7d, + 0xed8: 0x2c5d, 0xed9: 0x2c5d, 0xeda: 0x2a7d, 0xedb: 0x2a7d, 0xedc: 0x2c5d, 0xedd: 0x2c5d, + 0xede: 0x2c3d, 0xedf: 0x2c3d, 0xee0: 0x2c9d, 0xee1: 0x2c9d, 0xee2: 0x2cbd, 0xee3: 0x2cbd, + 0xee4: 0x0040, 0xee5: 0x2cdd, 0xee6: 0x2cfd, 0xee7: 0x2d1d, 0xee8: 0x2d1d, 0xee9: 0x2d3d, + 0xeea: 0x2d5d, 0xeeb: 0x2d7d, 0xeec: 0x2d9d, 0xeed: 0x2dbd, 0xeee: 0x2ddd, 0xeef: 0x2dfd, + 0xef0: 0x2e1d, 0xef1: 0x2e3d, 0xef2: 0x2e3d, 0xef3: 0x2e5d, 0xef4: 0x2e7d, 0xef5: 0x2e7d, + 0xef6: 0x2e9d, 0xef7: 0x2ebd, 0xef8: 0x2e5d, 0xef9: 0x2edd, 0xefa: 0x2efd, 0xefb: 0x2edd, + 0xefc: 0x2e5d, 0xefd: 0x2f1d, 0xefe: 0x2f3d, 0xeff: 0x2f5d, // Block 0x3c, offset 0xf00 - 0xf00: 0x36dd, 0xf01: 0x36fd, 0xf02: 0x371d, 0xf03: 0x373d, 0xf04: 0x375d, 0xf05: 0x377d, - 0xf06: 0x379d, 0xf07: 0x37bd, 0xf08: 0x37dd, 0xf09: 0x37fd, 0xf0a: 0x381d, 0xf0b: 0x383d, - 0xf0c: 0x385d, 0xf0d: 0x387d, 0xf0e: 0x389d, 0xf0f: 0x38bd, 0xf10: 0x38dd, 0xf11: 0x38fd, - 0xf12: 0x391d, 0xf13: 0x393d, 0xf14: 0x395d, 0xf15: 0x397d, 0xf16: 0x399d, 0xf17: 0x39bd, - 0xf18: 0x39dd, 0xf19: 0x39fd, 0xf1a: 0x3a1d, 0xf1b: 0x3a3d, 0xf1c: 0x3a5d, 0xf1d: 0x3a7d, - 0xf1e: 0x3a9d, 0xf1f: 0x3abd, 0xf20: 0x3add, 0xf21: 0x3afd, 0xf22: 0x3b1d, 0xf23: 0x3b3d, - 0xf24: 0x3b5d, 0xf25: 0x3b7d, 0xf26: 0x127d, 0xf27: 0x3b9d, 0xf28: 0x3bbd, 0xf29: 0x3bdd, - 0xf2a: 0x3bfd, 0xf2b: 0x3c1d, 0xf2c: 0x3c3d, 0xf2d: 0x3c5d, 0xf2e: 0x239d, 0xf2f: 0x3c7d, - 0xf30: 0x3c9d, 0xf31: 0x3939, 0xf32: 0x3951, 0xf33: 0x3969, 0xf34: 0x3981, 0xf35: 0x3999, - 0xf36: 0x39b1, 0xf37: 0x39c9, 0xf38: 0x39e1, 0xf39: 0x39f9, 0xf3a: 0x3a11, 0xf3b: 0x3a29, - 0xf3c: 0x3a41, 0xf3d: 0x3a59, 0xf3e: 0x3a71, 0xf3f: 0x3a89, + 0xf00: 0x2f7d, 0xf01: 0x2f9d, 0xf02: 0x2cfd, 0xf03: 0x2cdd, 0xf04: 0x2fbd, 0xf05: 0x2fdd, + 0xf06: 0x2ffd, 0xf07: 0x301d, 0xf08: 0x303d, 0xf09: 0x305d, 0xf0a: 0x307d, 0xf0b: 0x309d, + 0xf0c: 0x30bd, 0xf0d: 0x30dd, 0xf0e: 0x30fd, 0xf0f: 0x0040, 0xf10: 0x0018, 0xf11: 0x0018, + 0xf12: 0x311d, 0xf13: 0x313d, 0xf14: 0x315d, 0xf15: 0x317d, 0xf16: 0x319d, 0xf17: 0x31bd, + 0xf18: 0x31dd, 0xf19: 0x31fd, 0xf1a: 0x321d, 0xf1b: 0x323d, 0xf1c: 0x315d, 0xf1d: 0x325d, + 0xf1e: 0x327d, 0xf1f: 0x329d, 0xf20: 0x0008, 0xf21: 0x0008, 0xf22: 0x0008, 0xf23: 0x0008, + 0xf24: 0x0008, 0xf25: 0x0008, 0xf26: 0x0008, 0xf27: 0x0008, 0xf28: 0x0008, 0xf29: 0x0008, + 0xf2a: 0x0008, 0xf2b: 0x0008, 0xf2c: 0x0008, 0xf2d: 0x0008, 0xf2e: 0x0008, 0xf2f: 0x0008, + 0xf30: 0x0008, 0xf31: 0x0008, 0xf32: 0x0008, 0xf33: 0x0008, 0xf34: 0x0008, 0xf35: 0x0008, + 0xf36: 0x0008, 0xf37: 0x0008, 0xf38: 0x0008, 0xf39: 0x0008, 0xf3a: 0x0008, 0xf3b: 0x0040, + 0xf3c: 0x0040, 0xf3d: 0x0040, 0xf3e: 0x0040, 0xf3f: 0x0040, // Block 0x3d, offset 0xf40 - 0xf40: 0x3aa1, 0xf41: 0x3ac9, 0xf42: 0x3af1, 0xf43: 0x3b19, 0xf44: 0x3b41, 0xf45: 0x3b69, - 0xf46: 0x3b91, 0xf47: 0x3bb9, 0xf48: 0x3be1, 0xf49: 0x3c09, 0xf4a: 0x3c39, 0xf4b: 0x3c69, - 0xf4c: 0x3c99, 0xf4d: 0x3cbd, 0xf4e: 0x3cb1, 0xf4f: 0x3cdd, 0xf50: 0x3cfd, 0xf51: 0x3d15, - 0xf52: 0x3d2d, 0xf53: 0x3d45, 0xf54: 0x3d5d, 0xf55: 0x3d5d, 0xf56: 0x3d45, 0xf57: 0x3d75, - 0xf58: 0x07bd, 0xf59: 0x3d8d, 0xf5a: 0x3da5, 0xf5b: 0x3dbd, 0xf5c: 0x3dd5, 0xf5d: 0x3ded, - 0xf5e: 0x3e05, 0xf5f: 0x3e1d, 0xf60: 0x3e35, 0xf61: 0x3e4d, 0xf62: 0x3e65, 0xf63: 0x3e7d, - 0xf64: 0x3e95, 0xf65: 0x3e95, 0xf66: 0x3ead, 0xf67: 0x3ead, 0xf68: 0x3ec5, 0xf69: 0x3ec5, - 0xf6a: 0x3edd, 0xf6b: 0x3ef5, 0xf6c: 0x3f0d, 0xf6d: 0x3f25, 0xf6e: 0x3f3d, 0xf6f: 0x3f3d, - 0xf70: 0x3f55, 0xf71: 0x3f55, 0xf72: 0x3f55, 0xf73: 0x3f6d, 0xf74: 0x3f85, 0xf75: 0x3f9d, - 0xf76: 0x3fb5, 0xf77: 0x3f9d, 0xf78: 0x3fcd, 0xf79: 0x3fe5, 0xf7a: 0x3f6d, 0xf7b: 0x3ffd, - 0xf7c: 0x4015, 0xf7d: 0x4015, 0xf7e: 0x4015, 0xf7f: 0x0040, + 0xf40: 0x36a2, 0xf41: 0x36d2, 0xf42: 0x3702, 0xf43: 0x3732, 0xf44: 0x32bd, 0xf45: 0x32dd, + 0xf46: 0x32fd, 0xf47: 0x331d, 0xf48: 0x0018, 0xf49: 0x0018, 0xf4a: 0x0018, 0xf4b: 0x0018, + 0xf4c: 0x0018, 0xf4d: 0x0018, 0xf4e: 0x0018, 0xf4f: 0x0018, 0xf50: 0x333d, 0xf51: 0x3761, + 0xf52: 0x3779, 0xf53: 0x3791, 0xf54: 0x37a9, 0xf55: 0x37c1, 0xf56: 0x37d9, 0xf57: 0x37f1, + 0xf58: 0x3809, 0xf59: 0x3821, 0xf5a: 0x3839, 0xf5b: 0x3851, 0xf5c: 0x3869, 0xf5d: 0x3881, + 0xf5e: 0x3899, 0xf5f: 0x38b1, 0xf60: 0x335d, 0xf61: 0x337d, 0xf62: 0x339d, 0xf63: 0x33bd, + 0xf64: 0x33dd, 0xf65: 0x33dd, 0xf66: 0x33fd, 0xf67: 0x341d, 0xf68: 0x343d, 0xf69: 0x345d, + 0xf6a: 0x347d, 0xf6b: 0x349d, 0xf6c: 0x34bd, 0xf6d: 0x34dd, 0xf6e: 0x34fd, 0xf6f: 0x351d, + 0xf70: 0x353d, 0xf71: 0x355d, 0xf72: 0x357d, 0xf73: 0x359d, 0xf74: 0x35bd, 0xf75: 0x35dd, + 0xf76: 0x35fd, 0xf77: 0x361d, 0xf78: 0x363d, 0xf79: 0x365d, 0xf7a: 0x367d, 0xf7b: 0x369d, + 0xf7c: 0x38c9, 0xf7d: 0x3901, 0xf7e: 0x36bd, 0xf7f: 0x0018, // Block 0x3e, offset 0xf80 - 0xf80: 0x3cc9, 0xf81: 0x3d31, 0xf82: 0x3d99, 0xf83: 0x3e01, 0xf84: 0x3e51, 0xf85: 0x3eb9, - 0xf86: 0x3f09, 0xf87: 0x3f59, 0xf88: 0x3fd9, 0xf89: 0x4041, 0xf8a: 0x4091, 0xf8b: 0x40e1, - 0xf8c: 0x4131, 0xf8d: 0x4199, 0xf8e: 0x4201, 0xf8f: 0x4251, 0xf90: 0x42a1, 0xf91: 0x42d9, - 0xf92: 0x4329, 0xf93: 0x4391, 0xf94: 0x43f9, 0xf95: 0x4431, 0xf96: 0x44b1, 0xf97: 0x4549, - 0xf98: 0x45c9, 0xf99: 0x4619, 0xf9a: 0x4699, 0xf9b: 0x4719, 0xf9c: 0x4781, 0xf9d: 0x47d1, - 0xf9e: 0x4821, 0xf9f: 0x4871, 0xfa0: 0x48d9, 0xfa1: 0x4959, 0xfa2: 0x49c1, 0xfa3: 0x4a11, - 0xfa4: 0x4a61, 0xfa5: 0x4ab1, 0xfa6: 0x4ae9, 0xfa7: 0x4b21, 0xfa8: 0x4b59, 0xfa9: 0x4b91, - 0xfaa: 0x4be1, 0xfab: 0x4c31, 0xfac: 0x4cb1, 0xfad: 0x4d01, 0xfae: 0x4d69, 0xfaf: 0x4de9, - 0xfb0: 0x4e39, 0xfb1: 0x4e71, 0xfb2: 0x4ea9, 0xfb3: 0x4f29, 0xfb4: 0x4f91, 0xfb5: 0x5011, - 0xfb6: 0x5061, 0xfb7: 0x50e1, 0xfb8: 0x5119, 0xfb9: 0x5169, 0xfba: 0x51b9, 0xfbb: 0x5209, - 0xfbc: 0x5259, 0xfbd: 0x52a9, 0xfbe: 0x5311, 0xfbf: 0x5361, + 0xf80: 0x36dd, 0xf81: 0x36fd, 0xf82: 0x371d, 0xf83: 0x373d, 0xf84: 0x375d, 0xf85: 0x377d, + 0xf86: 0x379d, 0xf87: 0x37bd, 0xf88: 0x37dd, 0xf89: 0x37fd, 0xf8a: 0x381d, 0xf8b: 0x383d, + 0xf8c: 0x385d, 0xf8d: 0x387d, 0xf8e: 0x389d, 0xf8f: 0x38bd, 0xf90: 0x38dd, 0xf91: 0x38fd, + 0xf92: 0x391d, 0xf93: 0x393d, 0xf94: 0x395d, 0xf95: 0x397d, 0xf96: 0x399d, 0xf97: 0x39bd, + 0xf98: 0x39dd, 0xf99: 0x39fd, 0xf9a: 0x3a1d, 0xf9b: 0x3a3d, 0xf9c: 0x3a5d, 0xf9d: 0x3a7d, + 0xf9e: 0x3a9d, 0xf9f: 0x3abd, 0xfa0: 0x3add, 0xfa1: 0x3afd, 0xfa2: 0x3b1d, 0xfa3: 0x3b3d, + 0xfa4: 0x3b5d, 0xfa5: 0x3b7d, 0xfa6: 0x127d, 0xfa7: 0x3b9d, 0xfa8: 0x3bbd, 0xfa9: 0x3bdd, + 0xfaa: 0x3bfd, 0xfab: 0x3c1d, 0xfac: 0x3c3d, 0xfad: 0x3c5d, 0xfae: 0x239d, 0xfaf: 0x3c7d, + 0xfb0: 0x3c9d, 0xfb1: 0x3939, 0xfb2: 0x3951, 0xfb3: 0x3969, 0xfb4: 0x3981, 0xfb5: 0x3999, + 0xfb6: 0x39b1, 0xfb7: 0x39c9, 0xfb8: 0x39e1, 0xfb9: 0x39f9, 0xfba: 0x3a11, 0xfbb: 0x3a29, + 0xfbc: 0x3a41, 0xfbd: 0x3a59, 0xfbe: 0x3a71, 0xfbf: 0x3a89, // Block 0x3f, offset 0xfc0 - 0xfc0: 0x5399, 0xfc1: 0x53e9, 0xfc2: 0x5439, 0xfc3: 0x5489, 0xfc4: 0x54f1, 0xfc5: 0x5541, - 0xfc6: 0x5591, 0xfc7: 0x55e1, 0xfc8: 0x5661, 0xfc9: 0x56c9, 0xfca: 0x5701, 0xfcb: 0x5781, - 0xfcc: 0x57b9, 0xfcd: 0x5821, 0xfce: 0x5889, 0xfcf: 0x58d9, 0xfd0: 0x5929, 0xfd1: 0x5979, - 0xfd2: 0x59e1, 0xfd3: 0x5a19, 0xfd4: 0x5a69, 0xfd5: 0x5ad1, 0xfd6: 0x5b09, 0xfd7: 0x5b89, - 0xfd8: 0x5bd9, 0xfd9: 0x5c01, 0xfda: 0x5c29, 0xfdb: 0x5c51, 0xfdc: 0x5c79, 0xfdd: 0x5ca1, - 0xfde: 0x5cc9, 0xfdf: 0x5cf1, 0xfe0: 0x5d19, 0xfe1: 0x5d41, 0xfe2: 0x5d69, 0xfe3: 0x5d99, - 0xfe4: 0x5dc9, 0xfe5: 0x5df9, 0xfe6: 0x5e29, 0xfe7: 0x5e59, 0xfe8: 0x5e89, 0xfe9: 0x5eb9, - 0xfea: 0x5ee9, 0xfeb: 0x5f19, 0xfec: 0x5f49, 0xfed: 0x5f79, 0xfee: 0x5fa9, 0xfef: 0x5fd9, - 0xff0: 0x6009, 0xff1: 0x402d, 0xff2: 0x6039, 0xff3: 0x6051, 0xff4: 0x404d, 0xff5: 0x6069, - 0xff6: 0x6081, 0xff7: 0x6099, 0xff8: 0x406d, 0xff9: 0x406d, 0xffa: 0x60b1, 0xffb: 0x60c9, - 0xffc: 0x6101, 0xffd: 0x6139, 0xffe: 0x6171, 0xfff: 0x61a9, + 0xfc0: 0x3aa1, 0xfc1: 0x3ac9, 0xfc2: 0x3af1, 0xfc3: 0x3b19, 0xfc4: 0x3b41, 0xfc5: 0x3b69, + 0xfc6: 0x3b91, 0xfc7: 0x3bb9, 0xfc8: 0x3be1, 0xfc9: 0x3c09, 0xfca: 0x3c39, 0xfcb: 0x3c69, + 0xfcc: 0x3c99, 0xfcd: 0x3cbd, 0xfce: 0x3cb1, 0xfcf: 0x3cdd, 0xfd0: 0x3cfd, 0xfd1: 0x3d15, + 0xfd2: 0x3d2d, 0xfd3: 0x3d45, 0xfd4: 0x3d5d, 0xfd5: 0x3d5d, 0xfd6: 0x3d45, 0xfd7: 0x3d75, + 0xfd8: 0x07bd, 0xfd9: 0x3d8d, 0xfda: 0x3da5, 0xfdb: 0x3dbd, 0xfdc: 0x3dd5, 0xfdd: 0x3ded, + 0xfde: 0x3e05, 0xfdf: 0x3e1d, 0xfe0: 0x3e35, 0xfe1: 0x3e4d, 0xfe2: 0x3e65, 0xfe3: 0x3e7d, + 0xfe4: 0x3e95, 0xfe5: 0x3e95, 0xfe6: 0x3ead, 0xfe7: 0x3ead, 0xfe8: 0x3ec5, 0xfe9: 0x3ec5, + 0xfea: 0x3edd, 0xfeb: 0x3ef5, 0xfec: 0x3f0d, 0xfed: 0x3f25, 0xfee: 0x3f3d, 0xfef: 0x3f3d, + 0xff0: 0x3f55, 0xff1: 0x3f55, 0xff2: 0x3f55, 0xff3: 0x3f6d, 0xff4: 0x3f85, 0xff5: 0x3f9d, + 0xff6: 0x3fb5, 0xff7: 0x3f9d, 0xff8: 0x3fcd, 0xff9: 0x3fe5, 0xffa: 0x3f6d, 0xffb: 0x3ffd, + 0xffc: 0x4015, 0xffd: 0x4015, 0xffe: 0x4015, 0xfff: 0x0040, // Block 0x40, offset 0x1000 - 0x1000: 0x6211, 0x1001: 0x6229, 0x1002: 0x408d, 0x1003: 0x6241, 0x1004: 0x6259, 0x1005: 0x6271, - 0x1006: 0x6289, 0x1007: 0x62a1, 0x1008: 0x40ad, 0x1009: 0x62b9, 0x100a: 0x62e1, 0x100b: 0x62f9, - 0x100c: 0x40cd, 0x100d: 0x40cd, 0x100e: 0x6311, 0x100f: 0x6329, 0x1010: 0x6341, 0x1011: 0x40ed, - 0x1012: 0x410d, 0x1013: 0x412d, 0x1014: 0x414d, 0x1015: 0x416d, 0x1016: 0x6359, 0x1017: 0x6371, - 0x1018: 0x6389, 0x1019: 0x63a1, 0x101a: 0x63b9, 0x101b: 0x418d, 0x101c: 0x63d1, 0x101d: 0x63e9, - 0x101e: 0x6401, 0x101f: 0x41ad, 0x1020: 0x41cd, 0x1021: 0x6419, 0x1022: 0x41ed, 0x1023: 0x420d, - 0x1024: 0x422d, 0x1025: 0x6431, 0x1026: 0x424d, 0x1027: 0x6449, 0x1028: 0x6479, 0x1029: 0x6211, - 0x102a: 0x426d, 0x102b: 0x428d, 0x102c: 0x42ad, 0x102d: 0x42cd, 0x102e: 0x64b1, 0x102f: 0x64f1, - 0x1030: 0x6539, 0x1031: 0x6551, 0x1032: 0x42ed, 0x1033: 0x6569, 0x1034: 0x6581, 0x1035: 0x6599, - 0x1036: 0x430d, 0x1037: 0x65b1, 0x1038: 0x65c9, 0x1039: 0x65b1, 0x103a: 0x65e1, 0x103b: 0x65f9, - 0x103c: 0x432d, 0x103d: 0x6611, 0x103e: 0x6629, 0x103f: 0x6611, + 0x1000: 0x3cc9, 0x1001: 0x3d31, 0x1002: 0x3d99, 0x1003: 0x3e01, 0x1004: 0x3e51, 0x1005: 0x3eb9, + 0x1006: 0x3f09, 0x1007: 0x3f59, 0x1008: 0x3fd9, 0x1009: 0x4041, 0x100a: 0x4091, 0x100b: 0x40e1, + 0x100c: 0x4131, 0x100d: 0x4199, 0x100e: 0x4201, 0x100f: 0x4251, 0x1010: 0x42a1, 0x1011: 0x42d9, + 0x1012: 0x4329, 0x1013: 0x4391, 0x1014: 0x43f9, 0x1015: 0x4431, 0x1016: 0x44b1, 0x1017: 0x4549, + 0x1018: 0x45c9, 0x1019: 0x4619, 0x101a: 0x4699, 0x101b: 0x4719, 0x101c: 0x4781, 0x101d: 0x47d1, + 0x101e: 0x4821, 0x101f: 0x4871, 0x1020: 0x48d9, 0x1021: 0x4959, 0x1022: 0x49c1, 0x1023: 0x4a11, + 0x1024: 0x4a61, 0x1025: 0x4ab1, 0x1026: 0x4ae9, 0x1027: 0x4b21, 0x1028: 0x4b59, 0x1029: 0x4b91, + 0x102a: 0x4be1, 0x102b: 0x4c31, 0x102c: 0x4cb1, 0x102d: 0x4d01, 0x102e: 0x4d69, 0x102f: 0x4de9, + 0x1030: 0x4e39, 0x1031: 0x4e71, 0x1032: 0x4ea9, 0x1033: 0x4f29, 0x1034: 0x4f91, 0x1035: 0x5011, + 0x1036: 0x5061, 0x1037: 0x50e1, 0x1038: 0x5119, 0x1039: 0x5169, 0x103a: 0x51b9, 0x103b: 0x5209, + 0x103c: 0x5259, 0x103d: 0x52a9, 0x103e: 0x5311, 0x103f: 0x5361, // Block 0x41, offset 0x1040 - 0x1040: 0x434d, 0x1041: 0x436d, 0x1042: 0x0040, 0x1043: 0x6641, 0x1044: 0x6659, 0x1045: 0x6671, - 0x1046: 0x6689, 0x1047: 0x0040, 0x1048: 0x66c1, 0x1049: 0x66d9, 0x104a: 0x66f1, 0x104b: 0x6709, - 0x104c: 0x6721, 0x104d: 0x6739, 0x104e: 0x6401, 0x104f: 0x6751, 0x1050: 0x6769, 0x1051: 0x6781, - 0x1052: 0x438d, 0x1053: 0x6799, 0x1054: 0x6289, 0x1055: 0x43ad, 0x1056: 0x43cd, 0x1057: 0x67b1, - 0x1058: 0x0040, 0x1059: 0x43ed, 0x105a: 0x67c9, 0x105b: 0x67e1, 0x105c: 0x67f9, 0x105d: 0x6811, - 0x105e: 0x6829, 0x105f: 0x6859, 0x1060: 0x6889, 0x1061: 0x68b1, 0x1062: 0x68d9, 0x1063: 0x6901, - 0x1064: 0x6929, 0x1065: 0x6951, 0x1066: 0x6979, 0x1067: 0x69a1, 0x1068: 0x69c9, 0x1069: 0x69f1, - 0x106a: 0x6a21, 0x106b: 0x6a51, 0x106c: 0x6a81, 0x106d: 0x6ab1, 0x106e: 0x6ae1, 0x106f: 0x6b11, - 0x1070: 0x6b41, 0x1071: 0x6b71, 0x1072: 0x6ba1, 0x1073: 0x6bd1, 0x1074: 0x6c01, 0x1075: 0x6c31, - 0x1076: 0x6c61, 0x1077: 0x6c91, 0x1078: 0x6cc1, 0x1079: 0x6cf1, 0x107a: 0x6d21, 0x107b: 0x6d51, - 0x107c: 0x6d81, 0x107d: 0x6db1, 0x107e: 0x6de1, 0x107f: 0x440d, + 0x1040: 0x5399, 0x1041: 0x53e9, 0x1042: 0x5439, 0x1043: 0x5489, 0x1044: 0x54f1, 0x1045: 0x5541, + 0x1046: 0x5591, 0x1047: 0x55e1, 0x1048: 0x5661, 0x1049: 0x56c9, 0x104a: 0x5701, 0x104b: 0x5781, + 0x104c: 0x57b9, 0x104d: 0x5821, 0x104e: 0x5889, 0x104f: 0x58d9, 0x1050: 0x5929, 0x1051: 0x5979, + 0x1052: 0x59e1, 0x1053: 0x5a19, 0x1054: 0x5a69, 0x1055: 0x5ad1, 0x1056: 0x5b09, 0x1057: 0x5b89, + 0x1058: 0x5bd9, 0x1059: 0x5c01, 0x105a: 0x5c29, 0x105b: 0x5c51, 0x105c: 0x5c79, 0x105d: 0x5ca1, + 0x105e: 0x5cc9, 0x105f: 0x5cf1, 0x1060: 0x5d19, 0x1061: 0x5d41, 0x1062: 0x5d69, 0x1063: 0x5d99, + 0x1064: 0x5dc9, 0x1065: 0x5df9, 0x1066: 0x5e29, 0x1067: 0x5e59, 0x1068: 0x5e89, 0x1069: 0x5eb9, + 0x106a: 0x5ee9, 0x106b: 0x5f19, 0x106c: 0x5f49, 0x106d: 0x5f79, 0x106e: 0x5fa9, 0x106f: 0x5fd9, + 0x1070: 0x6009, 0x1071: 0x402d, 0x1072: 0x6039, 0x1073: 0x6051, 0x1074: 0x404d, 0x1075: 0x6069, + 0x1076: 0x6081, 0x1077: 0x6099, 0x1078: 0x406d, 0x1079: 0x406d, 0x107a: 0x60b1, 0x107b: 0x60c9, + 0x107c: 0x6101, 0x107d: 0x6139, 0x107e: 0x6171, 0x107f: 0x61a9, // Block 0x42, offset 0x1080 - 0x1080: 0xe00d, 0x1081: 0x0008, 0x1082: 0xe00d, 0x1083: 0x0008, 0x1084: 0xe00d, 0x1085: 0x0008, - 0x1086: 0xe00d, 0x1087: 0x0008, 0x1088: 0xe00d, 0x1089: 0x0008, 0x108a: 0xe00d, 0x108b: 0x0008, - 0x108c: 0xe00d, 0x108d: 0x0008, 0x108e: 0xe00d, 0x108f: 0x0008, 0x1090: 0xe00d, 0x1091: 0x0008, - 0x1092: 0xe00d, 0x1093: 0x0008, 0x1094: 0xe00d, 0x1095: 0x0008, 0x1096: 0xe00d, 0x1097: 0x0008, - 0x1098: 0xe00d, 0x1099: 0x0008, 0x109a: 0xe00d, 0x109b: 0x0008, 0x109c: 0xe00d, 0x109d: 0x0008, - 0x109e: 0xe00d, 0x109f: 0x0008, 0x10a0: 0xe00d, 0x10a1: 0x0008, 0x10a2: 0xe00d, 0x10a3: 0x0008, - 0x10a4: 0xe00d, 0x10a5: 0x0008, 0x10a6: 0xe00d, 0x10a7: 0x0008, 0x10a8: 0xe00d, 0x10a9: 0x0008, - 0x10aa: 0xe00d, 0x10ab: 0x0008, 0x10ac: 0xe00d, 0x10ad: 0x0008, 0x10ae: 0x0008, 0x10af: 0x1308, - 0x10b0: 0x1318, 0x10b1: 0x1318, 0x10b2: 0x1318, 0x10b3: 0x0018, 0x10b4: 0x1308, 0x10b5: 0x1308, - 0x10b6: 0x1308, 0x10b7: 0x1308, 0x10b8: 0x1308, 0x10b9: 0x1308, 0x10ba: 0x1308, 0x10bb: 0x1308, - 0x10bc: 0x1308, 0x10bd: 0x1308, 0x10be: 0x0018, 0x10bf: 0x0008, + 0x1080: 0x6211, 0x1081: 0x6229, 0x1082: 0x408d, 0x1083: 0x6241, 0x1084: 0x6259, 0x1085: 0x6271, + 0x1086: 0x6289, 0x1087: 0x62a1, 0x1088: 0x40ad, 0x1089: 0x62b9, 0x108a: 0x62e1, 0x108b: 0x62f9, + 0x108c: 0x40cd, 0x108d: 0x40cd, 0x108e: 0x6311, 0x108f: 0x6329, 0x1090: 0x6341, 0x1091: 0x40ed, + 0x1092: 0x410d, 0x1093: 0x412d, 0x1094: 0x414d, 0x1095: 0x416d, 0x1096: 0x6359, 0x1097: 0x6371, + 0x1098: 0x6389, 0x1099: 0x63a1, 0x109a: 0x63b9, 0x109b: 0x418d, 0x109c: 0x63d1, 0x109d: 0x63e9, + 0x109e: 0x6401, 0x109f: 0x41ad, 0x10a0: 0x41cd, 0x10a1: 0x6419, 0x10a2: 0x41ed, 0x10a3: 0x420d, + 0x10a4: 0x422d, 0x10a5: 0x6431, 0x10a6: 0x424d, 0x10a7: 0x6449, 0x10a8: 0x6479, 0x10a9: 0x6211, + 0x10aa: 0x426d, 0x10ab: 0x428d, 0x10ac: 0x42ad, 0x10ad: 0x42cd, 0x10ae: 0x64b1, 0x10af: 0x64f1, + 0x10b0: 0x6539, 0x10b1: 0x6551, 0x10b2: 0x42ed, 0x10b3: 0x6569, 0x10b4: 0x6581, 0x10b5: 0x6599, + 0x10b6: 0x430d, 0x10b7: 0x65b1, 0x10b8: 0x65c9, 0x10b9: 0x65b1, 0x10ba: 0x65e1, 0x10bb: 0x65f9, + 0x10bc: 0x432d, 0x10bd: 0x6611, 0x10be: 0x6629, 0x10bf: 0x6611, // Block 0x43, offset 0x10c0 - 0x10c0: 0xe00d, 0x10c1: 0x0008, 0x10c2: 0xe00d, 0x10c3: 0x0008, 0x10c4: 0xe00d, 0x10c5: 0x0008, - 0x10c6: 0xe00d, 0x10c7: 0x0008, 0x10c8: 0xe00d, 0x10c9: 0x0008, 0x10ca: 0xe00d, 0x10cb: 0x0008, - 0x10cc: 0xe00d, 0x10cd: 0x0008, 0x10ce: 0xe00d, 0x10cf: 0x0008, 0x10d0: 0xe00d, 0x10d1: 0x0008, - 0x10d2: 0xe00d, 0x10d3: 0x0008, 0x10d4: 0xe00d, 0x10d5: 0x0008, 0x10d6: 0xe00d, 0x10d7: 0x0008, - 0x10d8: 0xe00d, 0x10d9: 0x0008, 0x10da: 0xe00d, 0x10db: 0x0008, 0x10dc: 0x0ea1, 0x10dd: 0x6e11, - 0x10de: 0x1308, 0x10df: 0x1308, 0x10e0: 0x0008, 0x10e1: 0x0008, 0x10e2: 0x0008, 0x10e3: 0x0008, - 0x10e4: 0x0008, 0x10e5: 0x0008, 0x10e6: 0x0008, 0x10e7: 0x0008, 0x10e8: 0x0008, 0x10e9: 0x0008, - 0x10ea: 0x0008, 0x10eb: 0x0008, 0x10ec: 0x0008, 0x10ed: 0x0008, 0x10ee: 0x0008, 0x10ef: 0x0008, - 0x10f0: 0x0008, 0x10f1: 0x0008, 0x10f2: 0x0008, 0x10f3: 0x0008, 0x10f4: 0x0008, 0x10f5: 0x0008, - 0x10f6: 0x0008, 0x10f7: 0x0008, 0x10f8: 0x0008, 0x10f9: 0x0008, 0x10fa: 0x0008, 0x10fb: 0x0008, - 0x10fc: 0x0008, 0x10fd: 0x0008, 0x10fe: 0x0008, 0x10ff: 0x0008, + 0x10c0: 0x434d, 0x10c1: 0x436d, 0x10c2: 0x0040, 0x10c3: 0x6641, 0x10c4: 0x6659, 0x10c5: 0x6671, + 0x10c6: 0x6689, 0x10c7: 0x0040, 0x10c8: 0x66c1, 0x10c9: 0x66d9, 0x10ca: 0x66f1, 0x10cb: 0x6709, + 0x10cc: 0x6721, 0x10cd: 0x6739, 0x10ce: 0x6401, 0x10cf: 0x6751, 0x10d0: 0x6769, 0x10d1: 0x6781, + 0x10d2: 0x438d, 0x10d3: 0x6799, 0x10d4: 0x6289, 0x10d5: 0x43ad, 0x10d6: 0x43cd, 0x10d7: 0x67b1, + 0x10d8: 0x0040, 0x10d9: 0x43ed, 0x10da: 0x67c9, 0x10db: 0x67e1, 0x10dc: 0x67f9, 0x10dd: 0x6811, + 0x10de: 0x6829, 0x10df: 0x6859, 0x10e0: 0x6889, 0x10e1: 0x68b1, 0x10e2: 0x68d9, 0x10e3: 0x6901, + 0x10e4: 0x6929, 0x10e5: 0x6951, 0x10e6: 0x6979, 0x10e7: 0x69a1, 0x10e8: 0x69c9, 0x10e9: 0x69f1, + 0x10ea: 0x6a21, 0x10eb: 0x6a51, 0x10ec: 0x6a81, 0x10ed: 0x6ab1, 0x10ee: 0x6ae1, 0x10ef: 0x6b11, + 0x10f0: 0x6b41, 0x10f1: 0x6b71, 0x10f2: 0x6ba1, 0x10f3: 0x6bd1, 0x10f4: 0x6c01, 0x10f5: 0x6c31, + 0x10f6: 0x6c61, 0x10f7: 0x6c91, 0x10f8: 0x6cc1, 0x10f9: 0x6cf1, 0x10fa: 0x6d21, 0x10fb: 0x6d51, + 0x10fc: 0x6d81, 0x10fd: 0x6db1, 0x10fe: 0x6de1, 0x10ff: 0x440d, // Block 0x44, offset 0x1100 - 0x1100: 0x0018, 0x1101: 0x0018, 0x1102: 0x0018, 0x1103: 0x0018, 0x1104: 0x0018, 0x1105: 0x0018, - 0x1106: 0x0018, 0x1107: 0x0018, 0x1108: 0x0018, 0x1109: 0x0018, 0x110a: 0x0018, 0x110b: 0x0018, - 0x110c: 0x0018, 0x110d: 0x0018, 0x110e: 0x0018, 0x110f: 0x0018, 0x1110: 0x0018, 0x1111: 0x0018, - 0x1112: 0x0018, 0x1113: 0x0018, 0x1114: 0x0018, 0x1115: 0x0018, 0x1116: 0x0018, 0x1117: 0x0008, - 0x1118: 0x0008, 0x1119: 0x0008, 0x111a: 0x0008, 0x111b: 0x0008, 0x111c: 0x0008, 0x111d: 0x0008, - 0x111e: 0x0008, 0x111f: 0x0008, 0x1120: 0x0018, 0x1121: 0x0018, 0x1122: 0xe00d, 0x1123: 0x0008, + 0x1100: 0xe00d, 0x1101: 0x0008, 0x1102: 0xe00d, 0x1103: 0x0008, 0x1104: 0xe00d, 0x1105: 0x0008, + 0x1106: 0xe00d, 0x1107: 0x0008, 0x1108: 0xe00d, 0x1109: 0x0008, 0x110a: 0xe00d, 0x110b: 0x0008, + 0x110c: 0xe00d, 0x110d: 0x0008, 0x110e: 0xe00d, 0x110f: 0x0008, 0x1110: 0xe00d, 0x1111: 0x0008, + 0x1112: 0xe00d, 0x1113: 0x0008, 0x1114: 0xe00d, 0x1115: 0x0008, 0x1116: 0xe00d, 0x1117: 0x0008, + 0x1118: 0xe00d, 0x1119: 0x0008, 0x111a: 0xe00d, 0x111b: 0x0008, 0x111c: 0xe00d, 0x111d: 0x0008, + 0x111e: 0xe00d, 0x111f: 0x0008, 0x1120: 0xe00d, 0x1121: 0x0008, 0x1122: 0xe00d, 0x1123: 0x0008, 0x1124: 0xe00d, 0x1125: 0x0008, 0x1126: 0xe00d, 0x1127: 0x0008, 0x1128: 0xe00d, 0x1129: 0x0008, - 0x112a: 0xe00d, 0x112b: 0x0008, 0x112c: 0xe00d, 0x112d: 0x0008, 0x112e: 0xe00d, 0x112f: 0x0008, - 0x1130: 0x0008, 0x1131: 0x0008, 0x1132: 0xe00d, 0x1133: 0x0008, 0x1134: 0xe00d, 0x1135: 0x0008, - 0x1136: 0xe00d, 0x1137: 0x0008, 0x1138: 0xe00d, 0x1139: 0x0008, 0x113a: 0xe00d, 0x113b: 0x0008, - 0x113c: 0xe00d, 0x113d: 0x0008, 0x113e: 0xe00d, 0x113f: 0x0008, + 0x112a: 0xe00d, 0x112b: 0x0008, 0x112c: 0xe00d, 0x112d: 0x0008, 0x112e: 0x0008, 0x112f: 0x3308, + 0x1130: 0x3318, 0x1131: 0x3318, 0x1132: 0x3318, 0x1133: 0x0018, 0x1134: 0x3308, 0x1135: 0x3308, + 0x1136: 0x3308, 0x1137: 0x3308, 0x1138: 0x3308, 0x1139: 0x3308, 0x113a: 0x3308, 0x113b: 0x3308, + 0x113c: 0x3308, 0x113d: 0x3308, 0x113e: 0x0018, 0x113f: 0x0008, // Block 0x45, offset 0x1140 0x1140: 0xe00d, 0x1141: 0x0008, 0x1142: 0xe00d, 0x1143: 0x0008, 0x1144: 0xe00d, 0x1145: 0x0008, 0x1146: 0xe00d, 0x1147: 0x0008, 0x1148: 0xe00d, 0x1149: 0x0008, 0x114a: 0xe00d, 0x114b: 0x0008, 0x114c: 0xe00d, 0x114d: 0x0008, 0x114e: 0xe00d, 0x114f: 0x0008, 0x1150: 0xe00d, 0x1151: 0x0008, 0x1152: 0xe00d, 0x1153: 0x0008, 0x1154: 0xe00d, 0x1155: 0x0008, 0x1156: 0xe00d, 0x1157: 0x0008, - 0x1158: 0xe00d, 0x1159: 0x0008, 0x115a: 0xe00d, 0x115b: 0x0008, 0x115c: 0xe00d, 0x115d: 0x0008, - 0x115e: 0xe00d, 0x115f: 0x0008, 0x1160: 0xe00d, 0x1161: 0x0008, 0x1162: 0xe00d, 0x1163: 0x0008, - 0x1164: 0xe00d, 0x1165: 0x0008, 0x1166: 0xe00d, 0x1167: 0x0008, 0x1168: 0xe00d, 0x1169: 0x0008, - 0x116a: 0xe00d, 0x116b: 0x0008, 0x116c: 0xe00d, 0x116d: 0x0008, 0x116e: 0xe00d, 0x116f: 0x0008, - 0x1170: 0xe0fd, 0x1171: 0x0008, 0x1172: 0x0008, 0x1173: 0x0008, 0x1174: 0x0008, 0x1175: 0x0008, - 0x1176: 0x0008, 0x1177: 0x0008, 0x1178: 0x0008, 0x1179: 0xe01d, 0x117a: 0x0008, 0x117b: 0xe03d, - 0x117c: 0x0008, 0x117d: 0x442d, 0x117e: 0xe00d, 0x117f: 0x0008, + 0x1158: 0xe00d, 0x1159: 0x0008, 0x115a: 0xe00d, 0x115b: 0x0008, 0x115c: 0x0ea1, 0x115d: 0x6e11, + 0x115e: 0x3308, 0x115f: 0x3308, 0x1160: 0x0008, 0x1161: 0x0008, 0x1162: 0x0008, 0x1163: 0x0008, + 0x1164: 0x0008, 0x1165: 0x0008, 0x1166: 0x0008, 0x1167: 0x0008, 0x1168: 0x0008, 0x1169: 0x0008, + 0x116a: 0x0008, 0x116b: 0x0008, 0x116c: 0x0008, 0x116d: 0x0008, 0x116e: 0x0008, 0x116f: 0x0008, + 0x1170: 0x0008, 0x1171: 0x0008, 0x1172: 0x0008, 0x1173: 0x0008, 0x1174: 0x0008, 0x1175: 0x0008, + 0x1176: 0x0008, 0x1177: 0x0008, 0x1178: 0x0008, 0x1179: 0x0008, 0x117a: 0x0008, 0x117b: 0x0008, + 0x117c: 0x0008, 0x117d: 0x0008, 0x117e: 0x0008, 0x117f: 0x0008, // Block 0x46, offset 0x1180 - 0x1180: 0xe00d, 0x1181: 0x0008, 0x1182: 0xe00d, 0x1183: 0x0008, 0x1184: 0xe00d, 0x1185: 0x0008, - 0x1186: 0xe00d, 0x1187: 0x0008, 0x1188: 0x0008, 0x1189: 0x0018, 0x118a: 0x0018, 0x118b: 0xe03d, - 0x118c: 0x0008, 0x118d: 0x11d9, 0x118e: 0x0008, 0x118f: 0x0008, 0x1190: 0xe00d, 0x1191: 0x0008, - 0x1192: 0xe00d, 0x1193: 0x0008, 0x1194: 0x0008, 0x1195: 0x0008, 0x1196: 0xe00d, 0x1197: 0x0008, - 0x1198: 0xe00d, 0x1199: 0x0008, 0x119a: 0xe00d, 0x119b: 0x0008, 0x119c: 0xe00d, 0x119d: 0x0008, - 0x119e: 0xe00d, 0x119f: 0x0008, 0x11a0: 0xe00d, 0x11a1: 0x0008, 0x11a2: 0xe00d, 0x11a3: 0x0008, + 0x1180: 0x0018, 0x1181: 0x0018, 0x1182: 0x0018, 0x1183: 0x0018, 0x1184: 0x0018, 0x1185: 0x0018, + 0x1186: 0x0018, 0x1187: 0x0018, 0x1188: 0x0018, 0x1189: 0x0018, 0x118a: 0x0018, 0x118b: 0x0018, + 0x118c: 0x0018, 0x118d: 0x0018, 0x118e: 0x0018, 0x118f: 0x0018, 0x1190: 0x0018, 0x1191: 0x0018, + 0x1192: 0x0018, 0x1193: 0x0018, 0x1194: 0x0018, 0x1195: 0x0018, 0x1196: 0x0018, 0x1197: 0x0008, + 0x1198: 0x0008, 0x1199: 0x0008, 0x119a: 0x0008, 0x119b: 0x0008, 0x119c: 0x0008, 0x119d: 0x0008, + 0x119e: 0x0008, 0x119f: 0x0008, 0x11a0: 0x0018, 0x11a1: 0x0018, 0x11a2: 0xe00d, 0x11a3: 0x0008, 0x11a4: 0xe00d, 0x11a5: 0x0008, 0x11a6: 0xe00d, 0x11a7: 0x0008, 0x11a8: 0xe00d, 0x11a9: 0x0008, - 0x11aa: 0x6e29, 0x11ab: 0x1029, 0x11ac: 0x11c1, 0x11ad: 0x6e41, 0x11ae: 0x1221, 0x11af: 0x0040, - 0x11b0: 0x6e59, 0x11b1: 0x6e71, 0x11b2: 0x1239, 0x11b3: 0x444d, 0x11b4: 0xe00d, 0x11b5: 0x0008, - 0x11b6: 0xe00d, 0x11b7: 0x0008, 0x11b8: 0x0040, 0x11b9: 0x0040, 0x11ba: 0x0040, 0x11bb: 0x0040, - 0x11bc: 0x0040, 0x11bd: 0x0040, 0x11be: 0x0040, 0x11bf: 0x0040, + 0x11aa: 0xe00d, 0x11ab: 0x0008, 0x11ac: 0xe00d, 0x11ad: 0x0008, 0x11ae: 0xe00d, 0x11af: 0x0008, + 0x11b0: 0x0008, 0x11b1: 0x0008, 0x11b2: 0xe00d, 0x11b3: 0x0008, 0x11b4: 0xe00d, 0x11b5: 0x0008, + 0x11b6: 0xe00d, 0x11b7: 0x0008, 0x11b8: 0xe00d, 0x11b9: 0x0008, 0x11ba: 0xe00d, 0x11bb: 0x0008, + 0x11bc: 0xe00d, 0x11bd: 0x0008, 0x11be: 0xe00d, 0x11bf: 0x0008, // Block 0x47, offset 0x11c0 - 0x11c0: 0x64d5, 0x11c1: 0x64f5, 0x11c2: 0x6515, 0x11c3: 0x6535, 0x11c4: 0x6555, 0x11c5: 0x6575, - 0x11c6: 0x6595, 0x11c7: 0x65b5, 0x11c8: 0x65d5, 0x11c9: 0x65f5, 0x11ca: 0x6615, 0x11cb: 0x6635, - 0x11cc: 0x6655, 0x11cd: 0x6675, 0x11ce: 0x0008, 0x11cf: 0x0008, 0x11d0: 0x6695, 0x11d1: 0x0008, - 0x11d2: 0x66b5, 0x11d3: 0x0008, 0x11d4: 0x0008, 0x11d5: 0x66d5, 0x11d6: 0x66f5, 0x11d7: 0x6715, - 0x11d8: 0x6735, 0x11d9: 0x6755, 0x11da: 0x6775, 0x11db: 0x6795, 0x11dc: 0x67b5, 0x11dd: 0x67d5, - 0x11de: 0x67f5, 0x11df: 0x0008, 0x11e0: 0x6815, 0x11e1: 0x0008, 0x11e2: 0x6835, 0x11e3: 0x0008, - 0x11e4: 0x0008, 0x11e5: 0x6855, 0x11e6: 0x6875, 0x11e7: 0x0008, 0x11e8: 0x0008, 0x11e9: 0x0008, - 0x11ea: 0x6895, 0x11eb: 0x68b5, 0x11ec: 0x68d5, 0x11ed: 0x68f5, 0x11ee: 0x6915, 0x11ef: 0x6935, - 0x11f0: 0x6955, 0x11f1: 0x6975, 0x11f2: 0x6995, 0x11f3: 0x69b5, 0x11f4: 0x69d5, 0x11f5: 0x69f5, - 0x11f6: 0x6a15, 0x11f7: 0x6a35, 0x11f8: 0x6a55, 0x11f9: 0x6a75, 0x11fa: 0x6a95, 0x11fb: 0x6ab5, - 0x11fc: 0x6ad5, 0x11fd: 0x6af5, 0x11fe: 0x6b15, 0x11ff: 0x6b35, + 0x11c0: 0xe00d, 0x11c1: 0x0008, 0x11c2: 0xe00d, 0x11c3: 0x0008, 0x11c4: 0xe00d, 0x11c5: 0x0008, + 0x11c6: 0xe00d, 0x11c7: 0x0008, 0x11c8: 0xe00d, 0x11c9: 0x0008, 0x11ca: 0xe00d, 0x11cb: 0x0008, + 0x11cc: 0xe00d, 0x11cd: 0x0008, 0x11ce: 0xe00d, 0x11cf: 0x0008, 0x11d0: 0xe00d, 0x11d1: 0x0008, + 0x11d2: 0xe00d, 0x11d3: 0x0008, 0x11d4: 0xe00d, 0x11d5: 0x0008, 0x11d6: 0xe00d, 0x11d7: 0x0008, + 0x11d8: 0xe00d, 0x11d9: 0x0008, 0x11da: 0xe00d, 0x11db: 0x0008, 0x11dc: 0xe00d, 0x11dd: 0x0008, + 0x11de: 0xe00d, 0x11df: 0x0008, 0x11e0: 0xe00d, 0x11e1: 0x0008, 0x11e2: 0xe00d, 0x11e3: 0x0008, + 0x11e4: 0xe00d, 0x11e5: 0x0008, 0x11e6: 0xe00d, 0x11e7: 0x0008, 0x11e8: 0xe00d, 0x11e9: 0x0008, + 0x11ea: 0xe00d, 0x11eb: 0x0008, 0x11ec: 0xe00d, 0x11ed: 0x0008, 0x11ee: 0xe00d, 0x11ef: 0x0008, + 0x11f0: 0xe0fd, 0x11f1: 0x0008, 0x11f2: 0x0008, 0x11f3: 0x0008, 0x11f4: 0x0008, 0x11f5: 0x0008, + 0x11f6: 0x0008, 0x11f7: 0x0008, 0x11f8: 0x0008, 0x11f9: 0xe01d, 0x11fa: 0x0008, 0x11fb: 0xe03d, + 0x11fc: 0x0008, 0x11fd: 0x442d, 0x11fe: 0xe00d, 0x11ff: 0x0008, // Block 0x48, offset 0x1200 - 0x1200: 0x7a95, 0x1201: 0x7ab5, 0x1202: 0x7ad5, 0x1203: 0x7af5, 0x1204: 0x7b15, 0x1205: 0x7b35, - 0x1206: 0x7b55, 0x1207: 0x7b75, 0x1208: 0x7b95, 0x1209: 0x7bb5, 0x120a: 0x7bd5, 0x120b: 0x7bf5, - 0x120c: 0x7c15, 0x120d: 0x7c35, 0x120e: 0x7c55, 0x120f: 0x6ec9, 0x1210: 0x6ef1, 0x1211: 0x6f19, - 0x1212: 0x7c75, 0x1213: 0x7c95, 0x1214: 0x7cb5, 0x1215: 0x6f41, 0x1216: 0x6f69, 0x1217: 0x6f91, - 0x1218: 0x7cd5, 0x1219: 0x7cf5, 0x121a: 0x0040, 0x121b: 0x0040, 0x121c: 0x0040, 0x121d: 0x0040, - 0x121e: 0x0040, 0x121f: 0x0040, 0x1220: 0x0040, 0x1221: 0x0040, 0x1222: 0x0040, 0x1223: 0x0040, - 0x1224: 0x0040, 0x1225: 0x0040, 0x1226: 0x0040, 0x1227: 0x0040, 0x1228: 0x0040, 0x1229: 0x0040, - 0x122a: 0x0040, 0x122b: 0x0040, 0x122c: 0x0040, 0x122d: 0x0040, 0x122e: 0x0040, 0x122f: 0x0040, - 0x1230: 0x0040, 0x1231: 0x0040, 0x1232: 0x0040, 0x1233: 0x0040, 0x1234: 0x0040, 0x1235: 0x0040, - 0x1236: 0x0040, 0x1237: 0x0040, 0x1238: 0x0040, 0x1239: 0x0040, 0x123a: 0x0040, 0x123b: 0x0040, + 0x1200: 0xe00d, 0x1201: 0x0008, 0x1202: 0xe00d, 0x1203: 0x0008, 0x1204: 0xe00d, 0x1205: 0x0008, + 0x1206: 0xe00d, 0x1207: 0x0008, 0x1208: 0x0008, 0x1209: 0x0018, 0x120a: 0x0018, 0x120b: 0xe03d, + 0x120c: 0x0008, 0x120d: 0x11d9, 0x120e: 0x0008, 0x120f: 0x0008, 0x1210: 0xe00d, 0x1211: 0x0008, + 0x1212: 0xe00d, 0x1213: 0x0008, 0x1214: 0x0008, 0x1215: 0x0008, 0x1216: 0xe00d, 0x1217: 0x0008, + 0x1218: 0xe00d, 0x1219: 0x0008, 0x121a: 0xe00d, 0x121b: 0x0008, 0x121c: 0xe00d, 0x121d: 0x0008, + 0x121e: 0xe00d, 0x121f: 0x0008, 0x1220: 0xe00d, 0x1221: 0x0008, 0x1222: 0xe00d, 0x1223: 0x0008, + 0x1224: 0xe00d, 0x1225: 0x0008, 0x1226: 0xe00d, 0x1227: 0x0008, 0x1228: 0xe00d, 0x1229: 0x0008, + 0x122a: 0x6e29, 0x122b: 0x1029, 0x122c: 0x11c1, 0x122d: 0x6e41, 0x122e: 0x1221, 0x122f: 0x0040, + 0x1230: 0x6e59, 0x1231: 0x6e71, 0x1232: 0x1239, 0x1233: 0x444d, 0x1234: 0xe00d, 0x1235: 0x0008, + 0x1236: 0xe00d, 0x1237: 0x0008, 0x1238: 0x0040, 0x1239: 0x0040, 0x123a: 0x0040, 0x123b: 0x0040, 0x123c: 0x0040, 0x123d: 0x0040, 0x123e: 0x0040, 0x123f: 0x0040, // Block 0x49, offset 0x1240 - 0x1240: 0x6fb9, 0x1241: 0x6fd1, 0x1242: 0x6fe9, 0x1243: 0x7d15, 0x1244: 0x7d35, 0x1245: 0x7001, - 0x1246: 0x7001, 0x1247: 0x0040, 0x1248: 0x0040, 0x1249: 0x0040, 0x124a: 0x0040, 0x124b: 0x0040, - 0x124c: 0x0040, 0x124d: 0x0040, 0x124e: 0x0040, 0x124f: 0x0040, 0x1250: 0x0040, 0x1251: 0x0040, - 0x1252: 0x0040, 0x1253: 0x7019, 0x1254: 0x7041, 0x1255: 0x7069, 0x1256: 0x7091, 0x1257: 0x70b9, - 0x1258: 0x0040, 0x1259: 0x0040, 0x125a: 0x0040, 0x125b: 0x0040, 0x125c: 0x0040, 0x125d: 0x70e1, - 0x125e: 0x1308, 0x125f: 0x7109, 0x1260: 0x7131, 0x1261: 0x20a9, 0x1262: 0x20f1, 0x1263: 0x7149, - 0x1264: 0x7161, 0x1265: 0x7179, 0x1266: 0x7191, 0x1267: 0x71a9, 0x1268: 0x71c1, 0x1269: 0x1fb2, - 0x126a: 0x71d9, 0x126b: 0x7201, 0x126c: 0x7229, 0x126d: 0x7261, 0x126e: 0x7299, 0x126f: 0x72c1, - 0x1270: 0x72e9, 0x1271: 0x7311, 0x1272: 0x7339, 0x1273: 0x7361, 0x1274: 0x7389, 0x1275: 0x73b1, - 0x1276: 0x73d9, 0x1277: 0x0040, 0x1278: 0x7401, 0x1279: 0x7429, 0x127a: 0x7451, 0x127b: 0x7479, - 0x127c: 0x74a1, 0x127d: 0x0040, 0x127e: 0x74c9, 0x127f: 0x0040, + 0x1240: 0x64d5, 0x1241: 0x64f5, 0x1242: 0x6515, 0x1243: 0x6535, 0x1244: 0x6555, 0x1245: 0x6575, + 0x1246: 0x6595, 0x1247: 0x65b5, 0x1248: 0x65d5, 0x1249: 0x65f5, 0x124a: 0x6615, 0x124b: 0x6635, + 0x124c: 0x6655, 0x124d: 0x6675, 0x124e: 0x0008, 0x124f: 0x0008, 0x1250: 0x6695, 0x1251: 0x0008, + 0x1252: 0x66b5, 0x1253: 0x0008, 0x1254: 0x0008, 0x1255: 0x66d5, 0x1256: 0x66f5, 0x1257: 0x6715, + 0x1258: 0x6735, 0x1259: 0x6755, 0x125a: 0x6775, 0x125b: 0x6795, 0x125c: 0x67b5, 0x125d: 0x67d5, + 0x125e: 0x67f5, 0x125f: 0x0008, 0x1260: 0x6815, 0x1261: 0x0008, 0x1262: 0x6835, 0x1263: 0x0008, + 0x1264: 0x0008, 0x1265: 0x6855, 0x1266: 0x6875, 0x1267: 0x0008, 0x1268: 0x0008, 0x1269: 0x0008, + 0x126a: 0x6895, 0x126b: 0x68b5, 0x126c: 0x68d5, 0x126d: 0x68f5, 0x126e: 0x6915, 0x126f: 0x6935, + 0x1270: 0x6955, 0x1271: 0x6975, 0x1272: 0x6995, 0x1273: 0x69b5, 0x1274: 0x69d5, 0x1275: 0x69f5, + 0x1276: 0x6a15, 0x1277: 0x6a35, 0x1278: 0x6a55, 0x1279: 0x6a75, 0x127a: 0x6a95, 0x127b: 0x6ab5, + 0x127c: 0x6ad5, 0x127d: 0x6af5, 0x127e: 0x6b15, 0x127f: 0x6b35, // Block 0x4a, offset 0x1280 - 0x1280: 0x74f1, 0x1281: 0x7519, 0x1282: 0x0040, 0x1283: 0x7541, 0x1284: 0x7569, 0x1285: 0x0040, - 0x1286: 0x7591, 0x1287: 0x75b9, 0x1288: 0x75e1, 0x1289: 0x7609, 0x128a: 0x7631, 0x128b: 0x7659, - 0x128c: 0x7681, 0x128d: 0x76a9, 0x128e: 0x76d1, 0x128f: 0x76f9, 0x1290: 0x7721, 0x1291: 0x7721, - 0x1292: 0x7739, 0x1293: 0x7739, 0x1294: 0x7739, 0x1295: 0x7739, 0x1296: 0x7751, 0x1297: 0x7751, - 0x1298: 0x7751, 0x1299: 0x7751, 0x129a: 0x7769, 0x129b: 0x7769, 0x129c: 0x7769, 0x129d: 0x7769, - 0x129e: 0x7781, 0x129f: 0x7781, 0x12a0: 0x7781, 0x12a1: 0x7781, 0x12a2: 0x7799, 0x12a3: 0x7799, - 0x12a4: 0x7799, 0x12a5: 0x7799, 0x12a6: 0x77b1, 0x12a7: 0x77b1, 0x12a8: 0x77b1, 0x12a9: 0x77b1, - 0x12aa: 0x77c9, 0x12ab: 0x77c9, 0x12ac: 0x77c9, 0x12ad: 0x77c9, 0x12ae: 0x77e1, 0x12af: 0x77e1, - 0x12b0: 0x77e1, 0x12b1: 0x77e1, 0x12b2: 0x77f9, 0x12b3: 0x77f9, 0x12b4: 0x77f9, 0x12b5: 0x77f9, - 0x12b6: 0x7811, 0x12b7: 0x7811, 0x12b8: 0x7811, 0x12b9: 0x7811, 0x12ba: 0x7829, 0x12bb: 0x7829, - 0x12bc: 0x7829, 0x12bd: 0x7829, 0x12be: 0x7841, 0x12bf: 0x7841, + 0x1280: 0x7a95, 0x1281: 0x7ab5, 0x1282: 0x7ad5, 0x1283: 0x7af5, 0x1284: 0x7b15, 0x1285: 0x7b35, + 0x1286: 0x7b55, 0x1287: 0x7b75, 0x1288: 0x7b95, 0x1289: 0x7bb5, 0x128a: 0x7bd5, 0x128b: 0x7bf5, + 0x128c: 0x7c15, 0x128d: 0x7c35, 0x128e: 0x7c55, 0x128f: 0x6ec9, 0x1290: 0x6ef1, 0x1291: 0x6f19, + 0x1292: 0x7c75, 0x1293: 0x7c95, 0x1294: 0x7cb5, 0x1295: 0x6f41, 0x1296: 0x6f69, 0x1297: 0x6f91, + 0x1298: 0x7cd5, 0x1299: 0x7cf5, 0x129a: 0x0040, 0x129b: 0x0040, 0x129c: 0x0040, 0x129d: 0x0040, + 0x129e: 0x0040, 0x129f: 0x0040, 0x12a0: 0x0040, 0x12a1: 0x0040, 0x12a2: 0x0040, 0x12a3: 0x0040, + 0x12a4: 0x0040, 0x12a5: 0x0040, 0x12a6: 0x0040, 0x12a7: 0x0040, 0x12a8: 0x0040, 0x12a9: 0x0040, + 0x12aa: 0x0040, 0x12ab: 0x0040, 0x12ac: 0x0040, 0x12ad: 0x0040, 0x12ae: 0x0040, 0x12af: 0x0040, + 0x12b0: 0x0040, 0x12b1: 0x0040, 0x12b2: 0x0040, 0x12b3: 0x0040, 0x12b4: 0x0040, 0x12b5: 0x0040, + 0x12b6: 0x0040, 0x12b7: 0x0040, 0x12b8: 0x0040, 0x12b9: 0x0040, 0x12ba: 0x0040, 0x12bb: 0x0040, + 0x12bc: 0x0040, 0x12bd: 0x0040, 0x12be: 0x0040, 0x12bf: 0x0040, // Block 0x4b, offset 0x12c0 - 0x12c0: 0x7841, 0x12c1: 0x7841, 0x12c2: 0x7859, 0x12c3: 0x7859, 0x12c4: 0x7871, 0x12c5: 0x7871, - 0x12c6: 0x7889, 0x12c7: 0x7889, 0x12c8: 0x78a1, 0x12c9: 0x78a1, 0x12ca: 0x78b9, 0x12cb: 0x78b9, - 0x12cc: 0x78d1, 0x12cd: 0x78d1, 0x12ce: 0x78e9, 0x12cf: 0x78e9, 0x12d0: 0x78e9, 0x12d1: 0x78e9, - 0x12d2: 0x7901, 0x12d3: 0x7901, 0x12d4: 0x7901, 0x12d5: 0x7901, 0x12d6: 0x7919, 0x12d7: 0x7919, - 0x12d8: 0x7919, 0x12d9: 0x7919, 0x12da: 0x7931, 0x12db: 0x7931, 0x12dc: 0x7931, 0x12dd: 0x7931, - 0x12de: 0x7949, 0x12df: 0x7949, 0x12e0: 0x7961, 0x12e1: 0x7961, 0x12e2: 0x7961, 0x12e3: 0x7961, - 0x12e4: 0x7979, 0x12e5: 0x7979, 0x12e6: 0x7991, 0x12e7: 0x7991, 0x12e8: 0x7991, 0x12e9: 0x7991, - 0x12ea: 0x79a9, 0x12eb: 0x79a9, 0x12ec: 0x79a9, 0x12ed: 0x79a9, 0x12ee: 0x79c1, 0x12ef: 0x79c1, - 0x12f0: 0x79d9, 0x12f1: 0x79d9, 0x12f2: 0x0018, 0x12f3: 0x0018, 0x12f4: 0x0018, 0x12f5: 0x0018, - 0x12f6: 0x0018, 0x12f7: 0x0018, 0x12f8: 0x0018, 0x12f9: 0x0018, 0x12fa: 0x0018, 0x12fb: 0x0018, - 0x12fc: 0x0018, 0x12fd: 0x0018, 0x12fe: 0x0018, 0x12ff: 0x0018, + 0x12c0: 0x6fb9, 0x12c1: 0x6fd1, 0x12c2: 0x6fe9, 0x12c3: 0x7d15, 0x12c4: 0x7d35, 0x12c5: 0x7001, + 0x12c6: 0x7001, 0x12c7: 0x0040, 0x12c8: 0x0040, 0x12c9: 0x0040, 0x12ca: 0x0040, 0x12cb: 0x0040, + 0x12cc: 0x0040, 0x12cd: 0x0040, 0x12ce: 0x0040, 0x12cf: 0x0040, 0x12d0: 0x0040, 0x12d1: 0x0040, + 0x12d2: 0x0040, 0x12d3: 0x7019, 0x12d4: 0x7041, 0x12d5: 0x7069, 0x12d6: 0x7091, 0x12d7: 0x70b9, + 0x12d8: 0x0040, 0x12d9: 0x0040, 0x12da: 0x0040, 0x12db: 0x0040, 0x12dc: 0x0040, 0x12dd: 0x70e1, + 0x12de: 0x3308, 0x12df: 0x7109, 0x12e0: 0x7131, 0x12e1: 0x20a9, 0x12e2: 0x20f1, 0x12e3: 0x7149, + 0x12e4: 0x7161, 0x12e5: 0x7179, 0x12e6: 0x7191, 0x12e7: 0x71a9, 0x12e8: 0x71c1, 0x12e9: 0x1fb2, + 0x12ea: 0x71d9, 0x12eb: 0x7201, 0x12ec: 0x7229, 0x12ed: 0x7261, 0x12ee: 0x7299, 0x12ef: 0x72c1, + 0x12f0: 0x72e9, 0x12f1: 0x7311, 0x12f2: 0x7339, 0x12f3: 0x7361, 0x12f4: 0x7389, 0x12f5: 0x73b1, + 0x12f6: 0x73d9, 0x12f7: 0x0040, 0x12f8: 0x7401, 0x12f9: 0x7429, 0x12fa: 0x7451, 0x12fb: 0x7479, + 0x12fc: 0x74a1, 0x12fd: 0x0040, 0x12fe: 0x74c9, 0x12ff: 0x0040, // Block 0x4c, offset 0x1300 - 0x1300: 0x0018, 0x1301: 0x0018, 0x1302: 0x0040, 0x1303: 0x0040, 0x1304: 0x0040, 0x1305: 0x0040, - 0x1306: 0x0040, 0x1307: 0x0040, 0x1308: 0x0040, 0x1309: 0x0040, 0x130a: 0x0040, 0x130b: 0x0040, - 0x130c: 0x0040, 0x130d: 0x0040, 0x130e: 0x0040, 0x130f: 0x0040, 0x1310: 0x0040, 0x1311: 0x0040, - 0x1312: 0x0040, 0x1313: 0x79f1, 0x1314: 0x79f1, 0x1315: 0x79f1, 0x1316: 0x79f1, 0x1317: 0x7a09, - 0x1318: 0x7a09, 0x1319: 0x7a21, 0x131a: 0x7a21, 0x131b: 0x7a39, 0x131c: 0x7a39, 0x131d: 0x0479, - 0x131e: 0x7a51, 0x131f: 0x7a51, 0x1320: 0x7a69, 0x1321: 0x7a69, 0x1322: 0x7a81, 0x1323: 0x7a81, - 0x1324: 0x7a99, 0x1325: 0x7a99, 0x1326: 0x7a99, 0x1327: 0x7a99, 0x1328: 0x7ab1, 0x1329: 0x7ab1, - 0x132a: 0x7ac9, 0x132b: 0x7ac9, 0x132c: 0x7af1, 0x132d: 0x7af1, 0x132e: 0x7b19, 0x132f: 0x7b19, - 0x1330: 0x7b41, 0x1331: 0x7b41, 0x1332: 0x7b69, 0x1333: 0x7b69, 0x1334: 0x7b91, 0x1335: 0x7b91, - 0x1336: 0x7bb9, 0x1337: 0x7bb9, 0x1338: 0x7bb9, 0x1339: 0x7be1, 0x133a: 0x7be1, 0x133b: 0x7be1, - 0x133c: 0x7c09, 0x133d: 0x7c09, 0x133e: 0x7c09, 0x133f: 0x7c09, + 0x1300: 0x74f1, 0x1301: 0x7519, 0x1302: 0x0040, 0x1303: 0x7541, 0x1304: 0x7569, 0x1305: 0x0040, + 0x1306: 0x7591, 0x1307: 0x75b9, 0x1308: 0x75e1, 0x1309: 0x7609, 0x130a: 0x7631, 0x130b: 0x7659, + 0x130c: 0x7681, 0x130d: 0x76a9, 0x130e: 0x76d1, 0x130f: 0x76f9, 0x1310: 0x7721, 0x1311: 0x7721, + 0x1312: 0x7739, 0x1313: 0x7739, 0x1314: 0x7739, 0x1315: 0x7739, 0x1316: 0x7751, 0x1317: 0x7751, + 0x1318: 0x7751, 0x1319: 0x7751, 0x131a: 0x7769, 0x131b: 0x7769, 0x131c: 0x7769, 0x131d: 0x7769, + 0x131e: 0x7781, 0x131f: 0x7781, 0x1320: 0x7781, 0x1321: 0x7781, 0x1322: 0x7799, 0x1323: 0x7799, + 0x1324: 0x7799, 0x1325: 0x7799, 0x1326: 0x77b1, 0x1327: 0x77b1, 0x1328: 0x77b1, 0x1329: 0x77b1, + 0x132a: 0x77c9, 0x132b: 0x77c9, 0x132c: 0x77c9, 0x132d: 0x77c9, 0x132e: 0x77e1, 0x132f: 0x77e1, + 0x1330: 0x77e1, 0x1331: 0x77e1, 0x1332: 0x77f9, 0x1333: 0x77f9, 0x1334: 0x77f9, 0x1335: 0x77f9, + 0x1336: 0x7811, 0x1337: 0x7811, 0x1338: 0x7811, 0x1339: 0x7811, 0x133a: 0x7829, 0x133b: 0x7829, + 0x133c: 0x7829, 0x133d: 0x7829, 0x133e: 0x7841, 0x133f: 0x7841, // Block 0x4d, offset 0x1340 - 0x1340: 0x85f9, 0x1341: 0x8621, 0x1342: 0x8649, 0x1343: 0x8671, 0x1344: 0x8699, 0x1345: 0x86c1, - 0x1346: 0x86e9, 0x1347: 0x8711, 0x1348: 0x8739, 0x1349: 0x8761, 0x134a: 0x8789, 0x134b: 0x87b1, - 0x134c: 0x87d9, 0x134d: 0x8801, 0x134e: 0x8829, 0x134f: 0x8851, 0x1350: 0x8879, 0x1351: 0x88a1, - 0x1352: 0x88c9, 0x1353: 0x88f1, 0x1354: 0x8919, 0x1355: 0x8941, 0x1356: 0x8969, 0x1357: 0x8991, - 0x1358: 0x89b9, 0x1359: 0x89e1, 0x135a: 0x8a09, 0x135b: 0x8a31, 0x135c: 0x8a59, 0x135d: 0x8a81, - 0x135e: 0x8aaa, 0x135f: 0x8ada, 0x1360: 0x8b0a, 0x1361: 0x8b3a, 0x1362: 0x8b6a, 0x1363: 0x8b9a, - 0x1364: 0x8bc9, 0x1365: 0x8bf1, 0x1366: 0x7c71, 0x1367: 0x8c19, 0x1368: 0x7be1, 0x1369: 0x7c99, - 0x136a: 0x8c41, 0x136b: 0x8c69, 0x136c: 0x7d39, 0x136d: 0x8c91, 0x136e: 0x7d61, 0x136f: 0x7d89, - 0x1370: 0x8cb9, 0x1371: 0x8ce1, 0x1372: 0x7e29, 0x1373: 0x8d09, 0x1374: 0x7e51, 0x1375: 0x7e79, - 0x1376: 0x8d31, 0x1377: 0x8d59, 0x1378: 0x7ec9, 0x1379: 0x8d81, 0x137a: 0x7ef1, 0x137b: 0x7f19, - 0x137c: 0x83a1, 0x137d: 0x83c9, 0x137e: 0x8441, 0x137f: 0x8469, + 0x1340: 0x7841, 0x1341: 0x7841, 0x1342: 0x7859, 0x1343: 0x7859, 0x1344: 0x7871, 0x1345: 0x7871, + 0x1346: 0x7889, 0x1347: 0x7889, 0x1348: 0x78a1, 0x1349: 0x78a1, 0x134a: 0x78b9, 0x134b: 0x78b9, + 0x134c: 0x78d1, 0x134d: 0x78d1, 0x134e: 0x78e9, 0x134f: 0x78e9, 0x1350: 0x78e9, 0x1351: 0x78e9, + 0x1352: 0x7901, 0x1353: 0x7901, 0x1354: 0x7901, 0x1355: 0x7901, 0x1356: 0x7919, 0x1357: 0x7919, + 0x1358: 0x7919, 0x1359: 0x7919, 0x135a: 0x7931, 0x135b: 0x7931, 0x135c: 0x7931, 0x135d: 0x7931, + 0x135e: 0x7949, 0x135f: 0x7949, 0x1360: 0x7961, 0x1361: 0x7961, 0x1362: 0x7961, 0x1363: 0x7961, + 0x1364: 0x7979, 0x1365: 0x7979, 0x1366: 0x7991, 0x1367: 0x7991, 0x1368: 0x7991, 0x1369: 0x7991, + 0x136a: 0x79a9, 0x136b: 0x79a9, 0x136c: 0x79a9, 0x136d: 0x79a9, 0x136e: 0x79c1, 0x136f: 0x79c1, + 0x1370: 0x79d9, 0x1371: 0x79d9, 0x1372: 0x0818, 0x1373: 0x0818, 0x1374: 0x0818, 0x1375: 0x0818, + 0x1376: 0x0818, 0x1377: 0x0818, 0x1378: 0x0818, 0x1379: 0x0818, 0x137a: 0x0818, 0x137b: 0x0818, + 0x137c: 0x0818, 0x137d: 0x0818, 0x137e: 0x0818, 0x137f: 0x0818, // Block 0x4e, offset 0x1380 - 0x1380: 0x8491, 0x1381: 0x8531, 0x1382: 0x8559, 0x1383: 0x8581, 0x1384: 0x85a9, 0x1385: 0x8649, - 0x1386: 0x8671, 0x1387: 0x8699, 0x1388: 0x8da9, 0x1389: 0x8739, 0x138a: 0x8dd1, 0x138b: 0x8df9, - 0x138c: 0x8829, 0x138d: 0x8e21, 0x138e: 0x8851, 0x138f: 0x8879, 0x1390: 0x8a81, 0x1391: 0x8e49, - 0x1392: 0x8e71, 0x1393: 0x89b9, 0x1394: 0x8e99, 0x1395: 0x89e1, 0x1396: 0x8a09, 0x1397: 0x7c21, - 0x1398: 0x7c49, 0x1399: 0x8ec1, 0x139a: 0x7c71, 0x139b: 0x8ee9, 0x139c: 0x7cc1, 0x139d: 0x7ce9, - 0x139e: 0x7d11, 0x139f: 0x7d39, 0x13a0: 0x8f11, 0x13a1: 0x7db1, 0x13a2: 0x7dd9, 0x13a3: 0x7e01, - 0x13a4: 0x7e29, 0x13a5: 0x8f39, 0x13a6: 0x7ec9, 0x13a7: 0x7f41, 0x13a8: 0x7f69, 0x13a9: 0x7f91, - 0x13aa: 0x7fb9, 0x13ab: 0x7fe1, 0x13ac: 0x8031, 0x13ad: 0x8059, 0x13ae: 0x8081, 0x13af: 0x80a9, - 0x13b0: 0x80d1, 0x13b1: 0x80f9, 0x13b2: 0x8f61, 0x13b3: 0x8121, 0x13b4: 0x8149, 0x13b5: 0x8171, - 0x13b6: 0x8199, 0x13b7: 0x81c1, 0x13b8: 0x81e9, 0x13b9: 0x8239, 0x13ba: 0x8261, 0x13bb: 0x8289, - 0x13bc: 0x82b1, 0x13bd: 0x82d9, 0x13be: 0x8301, 0x13bf: 0x8329, + 0x1380: 0x0818, 0x1381: 0x0818, 0x1382: 0x0040, 0x1383: 0x0040, 0x1384: 0x0040, 0x1385: 0x0040, + 0x1386: 0x0040, 0x1387: 0x0040, 0x1388: 0x0040, 0x1389: 0x0040, 0x138a: 0x0040, 0x138b: 0x0040, + 0x138c: 0x0040, 0x138d: 0x0040, 0x138e: 0x0040, 0x138f: 0x0040, 0x1390: 0x0040, 0x1391: 0x0040, + 0x1392: 0x0040, 0x1393: 0x79f1, 0x1394: 0x79f1, 0x1395: 0x79f1, 0x1396: 0x79f1, 0x1397: 0x7a09, + 0x1398: 0x7a09, 0x1399: 0x7a21, 0x139a: 0x7a21, 0x139b: 0x7a39, 0x139c: 0x7a39, 0x139d: 0x0479, + 0x139e: 0x7a51, 0x139f: 0x7a51, 0x13a0: 0x7a69, 0x13a1: 0x7a69, 0x13a2: 0x7a81, 0x13a3: 0x7a81, + 0x13a4: 0x7a99, 0x13a5: 0x7a99, 0x13a6: 0x7a99, 0x13a7: 0x7a99, 0x13a8: 0x7ab1, 0x13a9: 0x7ab1, + 0x13aa: 0x7ac9, 0x13ab: 0x7ac9, 0x13ac: 0x7af1, 0x13ad: 0x7af1, 0x13ae: 0x7b19, 0x13af: 0x7b19, + 0x13b0: 0x7b41, 0x13b1: 0x7b41, 0x13b2: 0x7b69, 0x13b3: 0x7b69, 0x13b4: 0x7b91, 0x13b5: 0x7b91, + 0x13b6: 0x7bb9, 0x13b7: 0x7bb9, 0x13b8: 0x7bb9, 0x13b9: 0x7be1, 0x13ba: 0x7be1, 0x13bb: 0x7be1, + 0x13bc: 0x7c09, 0x13bd: 0x7c09, 0x13be: 0x7c09, 0x13bf: 0x7c09, // Block 0x4f, offset 0x13c0 - 0x13c0: 0x8351, 0x13c1: 0x8379, 0x13c2: 0x83f1, 0x13c3: 0x8419, 0x13c4: 0x84b9, 0x13c5: 0x84e1, - 0x13c6: 0x8509, 0x13c7: 0x8531, 0x13c8: 0x8559, 0x13c9: 0x85d1, 0x13ca: 0x85f9, 0x13cb: 0x8621, - 0x13cc: 0x8649, 0x13cd: 0x8f89, 0x13ce: 0x86c1, 0x13cf: 0x86e9, 0x13d0: 0x8711, 0x13d1: 0x8739, - 0x13d2: 0x87b1, 0x13d3: 0x87d9, 0x13d4: 0x8801, 0x13d5: 0x8829, 0x13d6: 0x8fb1, 0x13d7: 0x88a1, - 0x13d8: 0x88c9, 0x13d9: 0x8fd9, 0x13da: 0x8941, 0x13db: 0x8969, 0x13dc: 0x8991, 0x13dd: 0x89b9, - 0x13de: 0x9001, 0x13df: 0x7c71, 0x13e0: 0x8ee9, 0x13e1: 0x7d39, 0x13e2: 0x8f11, 0x13e3: 0x7e29, - 0x13e4: 0x8f39, 0x13e5: 0x7ec9, 0x13e6: 0x9029, 0x13e7: 0x80d1, 0x13e8: 0x9051, 0x13e9: 0x9079, - 0x13ea: 0x90a1, 0x13eb: 0x8531, 0x13ec: 0x8559, 0x13ed: 0x8649, 0x13ee: 0x8829, 0x13ef: 0x8fb1, - 0x13f0: 0x89b9, 0x13f1: 0x9001, 0x13f2: 0x90c9, 0x13f3: 0x9101, 0x13f4: 0x9139, 0x13f5: 0x9171, - 0x13f6: 0x9199, 0x13f7: 0x91c1, 0x13f8: 0x91e9, 0x13f9: 0x9211, 0x13fa: 0x9239, 0x13fb: 0x9261, - 0x13fc: 0x9289, 0x13fd: 0x92b1, 0x13fe: 0x92d9, 0x13ff: 0x9301, + 0x13c0: 0x85f9, 0x13c1: 0x8621, 0x13c2: 0x8649, 0x13c3: 0x8671, 0x13c4: 0x8699, 0x13c5: 0x86c1, + 0x13c6: 0x86e9, 0x13c7: 0x8711, 0x13c8: 0x8739, 0x13c9: 0x8761, 0x13ca: 0x8789, 0x13cb: 0x87b1, + 0x13cc: 0x87d9, 0x13cd: 0x8801, 0x13ce: 0x8829, 0x13cf: 0x8851, 0x13d0: 0x8879, 0x13d1: 0x88a1, + 0x13d2: 0x88c9, 0x13d3: 0x88f1, 0x13d4: 0x8919, 0x13d5: 0x8941, 0x13d6: 0x8969, 0x13d7: 0x8991, + 0x13d8: 0x89b9, 0x13d9: 0x89e1, 0x13da: 0x8a09, 0x13db: 0x8a31, 0x13dc: 0x8a59, 0x13dd: 0x8a81, + 0x13de: 0x8aaa, 0x13df: 0x8ada, 0x13e0: 0x8b0a, 0x13e1: 0x8b3a, 0x13e2: 0x8b6a, 0x13e3: 0x8b9a, + 0x13e4: 0x8bc9, 0x13e5: 0x8bf1, 0x13e6: 0x7c71, 0x13e7: 0x8c19, 0x13e8: 0x7be1, 0x13e9: 0x7c99, + 0x13ea: 0x8c41, 0x13eb: 0x8c69, 0x13ec: 0x7d39, 0x13ed: 0x8c91, 0x13ee: 0x7d61, 0x13ef: 0x7d89, + 0x13f0: 0x8cb9, 0x13f1: 0x8ce1, 0x13f2: 0x7e29, 0x13f3: 0x8d09, 0x13f4: 0x7e51, 0x13f5: 0x7e79, + 0x13f6: 0x8d31, 0x13f7: 0x8d59, 0x13f8: 0x7ec9, 0x13f9: 0x8d81, 0x13fa: 0x7ef1, 0x13fb: 0x7f19, + 0x13fc: 0x83a1, 0x13fd: 0x83c9, 0x13fe: 0x8441, 0x13ff: 0x8469, // Block 0x50, offset 0x1400 - 0x1400: 0x9329, 0x1401: 0x9351, 0x1402: 0x9379, 0x1403: 0x93a1, 0x1404: 0x93c9, 0x1405: 0x93f1, - 0x1406: 0x9419, 0x1407: 0x9441, 0x1408: 0x9469, 0x1409: 0x9491, 0x140a: 0x94b9, 0x140b: 0x94e1, - 0x140c: 0x9079, 0x140d: 0x9509, 0x140e: 0x9531, 0x140f: 0x9559, 0x1410: 0x9581, 0x1411: 0x9171, - 0x1412: 0x9199, 0x1413: 0x91c1, 0x1414: 0x91e9, 0x1415: 0x9211, 0x1416: 0x9239, 0x1417: 0x9261, - 0x1418: 0x9289, 0x1419: 0x92b1, 0x141a: 0x92d9, 0x141b: 0x9301, 0x141c: 0x9329, 0x141d: 0x9351, - 0x141e: 0x9379, 0x141f: 0x93a1, 0x1420: 0x93c9, 0x1421: 0x93f1, 0x1422: 0x9419, 0x1423: 0x9441, - 0x1424: 0x9469, 0x1425: 0x9491, 0x1426: 0x94b9, 0x1427: 0x94e1, 0x1428: 0x9079, 0x1429: 0x9509, - 0x142a: 0x9531, 0x142b: 0x9559, 0x142c: 0x9581, 0x142d: 0x9491, 0x142e: 0x94b9, 0x142f: 0x94e1, - 0x1430: 0x9079, 0x1431: 0x9051, 0x1432: 0x90a1, 0x1433: 0x8211, 0x1434: 0x8059, 0x1435: 0x8081, - 0x1436: 0x80a9, 0x1437: 0x9491, 0x1438: 0x94b9, 0x1439: 0x94e1, 0x143a: 0x8211, 0x143b: 0x8239, - 0x143c: 0x95a9, 0x143d: 0x95a9, 0x143e: 0x0018, 0x143f: 0x0018, + 0x1400: 0x8491, 0x1401: 0x8531, 0x1402: 0x8559, 0x1403: 0x8581, 0x1404: 0x85a9, 0x1405: 0x8649, + 0x1406: 0x8671, 0x1407: 0x8699, 0x1408: 0x8da9, 0x1409: 0x8739, 0x140a: 0x8dd1, 0x140b: 0x8df9, + 0x140c: 0x8829, 0x140d: 0x8e21, 0x140e: 0x8851, 0x140f: 0x8879, 0x1410: 0x8a81, 0x1411: 0x8e49, + 0x1412: 0x8e71, 0x1413: 0x89b9, 0x1414: 0x8e99, 0x1415: 0x89e1, 0x1416: 0x8a09, 0x1417: 0x7c21, + 0x1418: 0x7c49, 0x1419: 0x8ec1, 0x141a: 0x7c71, 0x141b: 0x8ee9, 0x141c: 0x7cc1, 0x141d: 0x7ce9, + 0x141e: 0x7d11, 0x141f: 0x7d39, 0x1420: 0x8f11, 0x1421: 0x7db1, 0x1422: 0x7dd9, 0x1423: 0x7e01, + 0x1424: 0x7e29, 0x1425: 0x8f39, 0x1426: 0x7ec9, 0x1427: 0x7f41, 0x1428: 0x7f69, 0x1429: 0x7f91, + 0x142a: 0x7fb9, 0x142b: 0x7fe1, 0x142c: 0x8031, 0x142d: 0x8059, 0x142e: 0x8081, 0x142f: 0x80a9, + 0x1430: 0x80d1, 0x1431: 0x80f9, 0x1432: 0x8f61, 0x1433: 0x8121, 0x1434: 0x8149, 0x1435: 0x8171, + 0x1436: 0x8199, 0x1437: 0x81c1, 0x1438: 0x81e9, 0x1439: 0x8239, 0x143a: 0x8261, 0x143b: 0x8289, + 0x143c: 0x82b1, 0x143d: 0x82d9, 0x143e: 0x8301, 0x143f: 0x8329, // Block 0x51, offset 0x1440 - 0x1440: 0x0040, 0x1441: 0x0040, 0x1442: 0x0040, 0x1443: 0x0040, 0x1444: 0x0040, 0x1445: 0x0040, - 0x1446: 0x0040, 0x1447: 0x0040, 0x1448: 0x0040, 0x1449: 0x0040, 0x144a: 0x0040, 0x144b: 0x0040, - 0x144c: 0x0040, 0x144d: 0x0040, 0x144e: 0x0040, 0x144f: 0x0040, 0x1450: 0x95d1, 0x1451: 0x9609, - 0x1452: 0x9609, 0x1453: 0x9641, 0x1454: 0x9679, 0x1455: 0x96b1, 0x1456: 0x96e9, 0x1457: 0x9721, - 0x1458: 0x9759, 0x1459: 0x9759, 0x145a: 0x9791, 0x145b: 0x97c9, 0x145c: 0x9801, 0x145d: 0x9839, - 0x145e: 0x9871, 0x145f: 0x98a9, 0x1460: 0x98a9, 0x1461: 0x98e1, 0x1462: 0x9919, 0x1463: 0x9919, - 0x1464: 0x9951, 0x1465: 0x9951, 0x1466: 0x9989, 0x1467: 0x99c1, 0x1468: 0x99c1, 0x1469: 0x99f9, - 0x146a: 0x9a31, 0x146b: 0x9a31, 0x146c: 0x9a69, 0x146d: 0x9a69, 0x146e: 0x9aa1, 0x146f: 0x9ad9, - 0x1470: 0x9ad9, 0x1471: 0x9b11, 0x1472: 0x9b11, 0x1473: 0x9b49, 0x1474: 0x9b81, 0x1475: 0x9bb9, - 0x1476: 0x9bf1, 0x1477: 0x9bf1, 0x1478: 0x9c29, 0x1479: 0x9c61, 0x147a: 0x9c99, 0x147b: 0x9cd1, - 0x147c: 0x9d09, 0x147d: 0x9d09, 0x147e: 0x9d41, 0x147f: 0x9d79, + 0x1440: 0x8351, 0x1441: 0x8379, 0x1442: 0x83f1, 0x1443: 0x8419, 0x1444: 0x84b9, 0x1445: 0x84e1, + 0x1446: 0x8509, 0x1447: 0x8531, 0x1448: 0x8559, 0x1449: 0x85d1, 0x144a: 0x85f9, 0x144b: 0x8621, + 0x144c: 0x8649, 0x144d: 0x8f89, 0x144e: 0x86c1, 0x144f: 0x86e9, 0x1450: 0x8711, 0x1451: 0x8739, + 0x1452: 0x87b1, 0x1453: 0x87d9, 0x1454: 0x8801, 0x1455: 0x8829, 0x1456: 0x8fb1, 0x1457: 0x88a1, + 0x1458: 0x88c9, 0x1459: 0x8fd9, 0x145a: 0x8941, 0x145b: 0x8969, 0x145c: 0x8991, 0x145d: 0x89b9, + 0x145e: 0x9001, 0x145f: 0x7c71, 0x1460: 0x8ee9, 0x1461: 0x7d39, 0x1462: 0x8f11, 0x1463: 0x7e29, + 0x1464: 0x8f39, 0x1465: 0x7ec9, 0x1466: 0x9029, 0x1467: 0x80d1, 0x1468: 0x9051, 0x1469: 0x9079, + 0x146a: 0x90a1, 0x146b: 0x8531, 0x146c: 0x8559, 0x146d: 0x8649, 0x146e: 0x8829, 0x146f: 0x8fb1, + 0x1470: 0x89b9, 0x1471: 0x9001, 0x1472: 0x90c9, 0x1473: 0x9101, 0x1474: 0x9139, 0x1475: 0x9171, + 0x1476: 0x9199, 0x1477: 0x91c1, 0x1478: 0x91e9, 0x1479: 0x9211, 0x147a: 0x9239, 0x147b: 0x9261, + 0x147c: 0x9289, 0x147d: 0x92b1, 0x147e: 0x92d9, 0x147f: 0x9301, // Block 0x52, offset 0x1480 - 0x1480: 0xa949, 0x1481: 0xa981, 0x1482: 0xa9b9, 0x1483: 0xa8a1, 0x1484: 0x9bb9, 0x1485: 0x9989, - 0x1486: 0xa9f1, 0x1487: 0xaa29, 0x1488: 0x0040, 0x1489: 0x0040, 0x148a: 0x0040, 0x148b: 0x0040, - 0x148c: 0x0040, 0x148d: 0x0040, 0x148e: 0x0040, 0x148f: 0x0040, 0x1490: 0x0040, 0x1491: 0x0040, - 0x1492: 0x0040, 0x1493: 0x0040, 0x1494: 0x0040, 0x1495: 0x0040, 0x1496: 0x0040, 0x1497: 0x0040, - 0x1498: 0x0040, 0x1499: 0x0040, 0x149a: 0x0040, 0x149b: 0x0040, 0x149c: 0x0040, 0x149d: 0x0040, - 0x149e: 0x0040, 0x149f: 0x0040, 0x14a0: 0x0040, 0x14a1: 0x0040, 0x14a2: 0x0040, 0x14a3: 0x0040, - 0x14a4: 0x0040, 0x14a5: 0x0040, 0x14a6: 0x0040, 0x14a7: 0x0040, 0x14a8: 0x0040, 0x14a9: 0x0040, - 0x14aa: 0x0040, 0x14ab: 0x0040, 0x14ac: 0x0040, 0x14ad: 0x0040, 0x14ae: 0x0040, 0x14af: 0x0040, - 0x14b0: 0xaa61, 0x14b1: 0xaa99, 0x14b2: 0xaad1, 0x14b3: 0xab19, 0x14b4: 0xab61, 0x14b5: 0xaba9, - 0x14b6: 0xabf1, 0x14b7: 0xac39, 0x14b8: 0xac81, 0x14b9: 0xacc9, 0x14ba: 0xad02, 0x14bb: 0xae12, - 0x14bc: 0xae91, 0x14bd: 0x0018, 0x14be: 0x0040, 0x14bf: 0x0040, + 0x1480: 0x9329, 0x1481: 0x9351, 0x1482: 0x9379, 0x1483: 0x93a1, 0x1484: 0x93c9, 0x1485: 0x93f1, + 0x1486: 0x9419, 0x1487: 0x9441, 0x1488: 0x9469, 0x1489: 0x9491, 0x148a: 0x94b9, 0x148b: 0x94e1, + 0x148c: 0x9079, 0x148d: 0x9509, 0x148e: 0x9531, 0x148f: 0x9559, 0x1490: 0x9581, 0x1491: 0x9171, + 0x1492: 0x9199, 0x1493: 0x91c1, 0x1494: 0x91e9, 0x1495: 0x9211, 0x1496: 0x9239, 0x1497: 0x9261, + 0x1498: 0x9289, 0x1499: 0x92b1, 0x149a: 0x92d9, 0x149b: 0x9301, 0x149c: 0x9329, 0x149d: 0x9351, + 0x149e: 0x9379, 0x149f: 0x93a1, 0x14a0: 0x93c9, 0x14a1: 0x93f1, 0x14a2: 0x9419, 0x14a3: 0x9441, + 0x14a4: 0x9469, 0x14a5: 0x9491, 0x14a6: 0x94b9, 0x14a7: 0x94e1, 0x14a8: 0x9079, 0x14a9: 0x9509, + 0x14aa: 0x9531, 0x14ab: 0x9559, 0x14ac: 0x9581, 0x14ad: 0x9491, 0x14ae: 0x94b9, 0x14af: 0x94e1, + 0x14b0: 0x9079, 0x14b1: 0x9051, 0x14b2: 0x90a1, 0x14b3: 0x8211, 0x14b4: 0x8059, 0x14b5: 0x8081, + 0x14b6: 0x80a9, 0x14b7: 0x9491, 0x14b8: 0x94b9, 0x14b9: 0x94e1, 0x14ba: 0x8211, 0x14bb: 0x8239, + 0x14bc: 0x95a9, 0x14bd: 0x95a9, 0x14be: 0x0018, 0x14bf: 0x0018, // Block 0x53, offset 0x14c0 - 0x14c0: 0x13c0, 0x14c1: 0x13c0, 0x14c2: 0x13c0, 0x14c3: 0x13c0, 0x14c4: 0x13c0, 0x14c5: 0x13c0, - 0x14c6: 0x13c0, 0x14c7: 0x13c0, 0x14c8: 0x13c0, 0x14c9: 0x13c0, 0x14ca: 0x13c0, 0x14cb: 0x13c0, - 0x14cc: 0x13c0, 0x14cd: 0x13c0, 0x14ce: 0x13c0, 0x14cf: 0x13c0, 0x14d0: 0xaeda, 0x14d1: 0x7d55, - 0x14d2: 0x0040, 0x14d3: 0xaeea, 0x14d4: 0x03c2, 0x14d5: 0xaefa, 0x14d6: 0xaf0a, 0x14d7: 0x7d75, - 0x14d8: 0x7d95, 0x14d9: 0x0040, 0x14da: 0x0040, 0x14db: 0x0040, 0x14dc: 0x0040, 0x14dd: 0x0040, - 0x14de: 0x0040, 0x14df: 0x0040, 0x14e0: 0x1308, 0x14e1: 0x1308, 0x14e2: 0x1308, 0x14e3: 0x1308, - 0x14e4: 0x1308, 0x14e5: 0x1308, 0x14e6: 0x1308, 0x14e7: 0x1308, 0x14e8: 0x1308, 0x14e9: 0x1308, - 0x14ea: 0x1308, 0x14eb: 0x1308, 0x14ec: 0x1308, 0x14ed: 0x1308, 0x14ee: 0x1308, 0x14ef: 0x1308, - 0x14f0: 0x0040, 0x14f1: 0x7db5, 0x14f2: 0x7dd5, 0x14f3: 0xaf1a, 0x14f4: 0xaf1a, 0x14f5: 0x1fd2, - 0x14f6: 0x1fe2, 0x14f7: 0xaf2a, 0x14f8: 0xaf3a, 0x14f9: 0x7df5, 0x14fa: 0x7e15, 0x14fb: 0x7e35, - 0x14fc: 0x7df5, 0x14fd: 0x7e55, 0x14fe: 0x7e75, 0x14ff: 0x7e55, + 0x14c0: 0x0040, 0x14c1: 0x0040, 0x14c2: 0x0040, 0x14c3: 0x0040, 0x14c4: 0x0040, 0x14c5: 0x0040, + 0x14c6: 0x0040, 0x14c7: 0x0040, 0x14c8: 0x0040, 0x14c9: 0x0040, 0x14ca: 0x0040, 0x14cb: 0x0040, + 0x14cc: 0x0040, 0x14cd: 0x0040, 0x14ce: 0x0040, 0x14cf: 0x0040, 0x14d0: 0x95d1, 0x14d1: 0x9609, + 0x14d2: 0x9609, 0x14d3: 0x9641, 0x14d4: 0x9679, 0x14d5: 0x96b1, 0x14d6: 0x96e9, 0x14d7: 0x9721, + 0x14d8: 0x9759, 0x14d9: 0x9759, 0x14da: 0x9791, 0x14db: 0x97c9, 0x14dc: 0x9801, 0x14dd: 0x9839, + 0x14de: 0x9871, 0x14df: 0x98a9, 0x14e0: 0x98a9, 0x14e1: 0x98e1, 0x14e2: 0x9919, 0x14e3: 0x9919, + 0x14e4: 0x9951, 0x14e5: 0x9951, 0x14e6: 0x9989, 0x14e7: 0x99c1, 0x14e8: 0x99c1, 0x14e9: 0x99f9, + 0x14ea: 0x9a31, 0x14eb: 0x9a31, 0x14ec: 0x9a69, 0x14ed: 0x9a69, 0x14ee: 0x9aa1, 0x14ef: 0x9ad9, + 0x14f0: 0x9ad9, 0x14f1: 0x9b11, 0x14f2: 0x9b11, 0x14f3: 0x9b49, 0x14f4: 0x9b81, 0x14f5: 0x9bb9, + 0x14f6: 0x9bf1, 0x14f7: 0x9bf1, 0x14f8: 0x9c29, 0x14f9: 0x9c61, 0x14fa: 0x9c99, 0x14fb: 0x9cd1, + 0x14fc: 0x9d09, 0x14fd: 0x9d09, 0x14fe: 0x9d41, 0x14ff: 0x9d79, // Block 0x54, offset 0x1500 - 0x1500: 0x7e95, 0x1501: 0x7eb5, 0x1502: 0x7ed5, 0x1503: 0x7eb5, 0x1504: 0x7ef5, 0x1505: 0x0018, - 0x1506: 0x0018, 0x1507: 0xaf4a, 0x1508: 0xaf5a, 0x1509: 0x7f16, 0x150a: 0x7f36, 0x150b: 0x7f56, - 0x150c: 0x7f76, 0x150d: 0xaf1a, 0x150e: 0xaf1a, 0x150f: 0xaf1a, 0x1510: 0xaeda, 0x1511: 0x7f95, - 0x1512: 0x0040, 0x1513: 0x0040, 0x1514: 0x03c2, 0x1515: 0xaeea, 0x1516: 0xaf0a, 0x1517: 0xaefa, - 0x1518: 0x7fb5, 0x1519: 0x1fd2, 0x151a: 0x1fe2, 0x151b: 0xaf2a, 0x151c: 0xaf3a, 0x151d: 0x7e95, - 0x151e: 0x7ef5, 0x151f: 0xaf6a, 0x1520: 0xaf7a, 0x1521: 0xaf8a, 0x1522: 0x1fb2, 0x1523: 0xaf99, - 0x1524: 0xafaa, 0x1525: 0xafba, 0x1526: 0x1fc2, 0x1527: 0x0040, 0x1528: 0xafca, 0x1529: 0xafda, - 0x152a: 0xafea, 0x152b: 0xaffa, 0x152c: 0x0040, 0x152d: 0x0040, 0x152e: 0x0040, 0x152f: 0x0040, - 0x1530: 0x7fd6, 0x1531: 0xb009, 0x1532: 0x7ff6, 0x1533: 0x0008, 0x1534: 0x8016, 0x1535: 0x0040, - 0x1536: 0x8036, 0x1537: 0xb031, 0x1538: 0x8056, 0x1539: 0xb059, 0x153a: 0x8076, 0x153b: 0xb081, - 0x153c: 0x8096, 0x153d: 0xb0a9, 0x153e: 0x80b6, 0x153f: 0xb0d1, + 0x1500: 0xa949, 0x1501: 0xa981, 0x1502: 0xa9b9, 0x1503: 0xa8a1, 0x1504: 0x9bb9, 0x1505: 0x9989, + 0x1506: 0xa9f1, 0x1507: 0xaa29, 0x1508: 0x0040, 0x1509: 0x0040, 0x150a: 0x0040, 0x150b: 0x0040, + 0x150c: 0x0040, 0x150d: 0x0040, 0x150e: 0x0040, 0x150f: 0x0040, 0x1510: 0x0040, 0x1511: 0x0040, + 0x1512: 0x0040, 0x1513: 0x0040, 0x1514: 0x0040, 0x1515: 0x0040, 0x1516: 0x0040, 0x1517: 0x0040, + 0x1518: 0x0040, 0x1519: 0x0040, 0x151a: 0x0040, 0x151b: 0x0040, 0x151c: 0x0040, 0x151d: 0x0040, + 0x151e: 0x0040, 0x151f: 0x0040, 0x1520: 0x0040, 0x1521: 0x0040, 0x1522: 0x0040, 0x1523: 0x0040, + 0x1524: 0x0040, 0x1525: 0x0040, 0x1526: 0x0040, 0x1527: 0x0040, 0x1528: 0x0040, 0x1529: 0x0040, + 0x152a: 0x0040, 0x152b: 0x0040, 0x152c: 0x0040, 0x152d: 0x0040, 0x152e: 0x0040, 0x152f: 0x0040, + 0x1530: 0xaa61, 0x1531: 0xaa99, 0x1532: 0xaad1, 0x1533: 0xab19, 0x1534: 0xab61, 0x1535: 0xaba9, + 0x1536: 0xabf1, 0x1537: 0xac39, 0x1538: 0xac81, 0x1539: 0xacc9, 0x153a: 0xad02, 0x153b: 0xae12, + 0x153c: 0xae91, 0x153d: 0x0018, 0x153e: 0x0040, 0x153f: 0x0040, // Block 0x55, offset 0x1540 - 0x1540: 0xb0f9, 0x1541: 0xb111, 0x1542: 0xb111, 0x1543: 0xb129, 0x1544: 0xb129, 0x1545: 0xb141, - 0x1546: 0xb141, 0x1547: 0xb159, 0x1548: 0xb159, 0x1549: 0xb171, 0x154a: 0xb171, 0x154b: 0xb171, - 0x154c: 0xb171, 0x154d: 0xb189, 0x154e: 0xb189, 0x154f: 0xb1a1, 0x1550: 0xb1a1, 0x1551: 0xb1a1, - 0x1552: 0xb1a1, 0x1553: 0xb1b9, 0x1554: 0xb1b9, 0x1555: 0xb1d1, 0x1556: 0xb1d1, 0x1557: 0xb1d1, - 0x1558: 0xb1d1, 0x1559: 0xb1e9, 0x155a: 0xb1e9, 0x155b: 0xb1e9, 0x155c: 0xb1e9, 0x155d: 0xb201, - 0x155e: 0xb201, 0x155f: 0xb201, 0x1560: 0xb201, 0x1561: 0xb219, 0x1562: 0xb219, 0x1563: 0xb219, - 0x1564: 0xb219, 0x1565: 0xb231, 0x1566: 0xb231, 0x1567: 0xb231, 0x1568: 0xb231, 0x1569: 0xb249, - 0x156a: 0xb249, 0x156b: 0xb261, 0x156c: 0xb261, 0x156d: 0xb279, 0x156e: 0xb279, 0x156f: 0xb291, - 0x1570: 0xb291, 0x1571: 0xb2a9, 0x1572: 0xb2a9, 0x1573: 0xb2a9, 0x1574: 0xb2a9, 0x1575: 0xb2c1, - 0x1576: 0xb2c1, 0x1577: 0xb2c1, 0x1578: 0xb2c1, 0x1579: 0xb2d9, 0x157a: 0xb2d9, 0x157b: 0xb2d9, - 0x157c: 0xb2d9, 0x157d: 0xb2f1, 0x157e: 0xb2f1, 0x157f: 0xb2f1, + 0x1540: 0x33c0, 0x1541: 0x33c0, 0x1542: 0x33c0, 0x1543: 0x33c0, 0x1544: 0x33c0, 0x1545: 0x33c0, + 0x1546: 0x33c0, 0x1547: 0x33c0, 0x1548: 0x33c0, 0x1549: 0x33c0, 0x154a: 0x33c0, 0x154b: 0x33c0, + 0x154c: 0x33c0, 0x154d: 0x33c0, 0x154e: 0x33c0, 0x154f: 0x33c0, 0x1550: 0xaeda, 0x1551: 0x7d55, + 0x1552: 0x0040, 0x1553: 0xaeea, 0x1554: 0x03c2, 0x1555: 0xaefa, 0x1556: 0xaf0a, 0x1557: 0x7d75, + 0x1558: 0x7d95, 0x1559: 0x0040, 0x155a: 0x0040, 0x155b: 0x0040, 0x155c: 0x0040, 0x155d: 0x0040, + 0x155e: 0x0040, 0x155f: 0x0040, 0x1560: 0x3308, 0x1561: 0x3308, 0x1562: 0x3308, 0x1563: 0x3308, + 0x1564: 0x3308, 0x1565: 0x3308, 0x1566: 0x3308, 0x1567: 0x3308, 0x1568: 0x3308, 0x1569: 0x3308, + 0x156a: 0x3308, 0x156b: 0x3308, 0x156c: 0x3308, 0x156d: 0x3308, 0x156e: 0x3308, 0x156f: 0x3308, + 0x1570: 0x0040, 0x1571: 0x7db5, 0x1572: 0x7dd5, 0x1573: 0xaf1a, 0x1574: 0xaf1a, 0x1575: 0x1fd2, + 0x1576: 0x1fe2, 0x1577: 0xaf2a, 0x1578: 0xaf3a, 0x1579: 0x7df5, 0x157a: 0x7e15, 0x157b: 0x7e35, + 0x157c: 0x7df5, 0x157d: 0x7e55, 0x157e: 0x7e75, 0x157f: 0x7e55, // Block 0x56, offset 0x1580 - 0x1580: 0xb2f1, 0x1581: 0xb309, 0x1582: 0xb309, 0x1583: 0xb309, 0x1584: 0xb309, 0x1585: 0xb321, - 0x1586: 0xb321, 0x1587: 0xb321, 0x1588: 0xb321, 0x1589: 0xb339, 0x158a: 0xb339, 0x158b: 0xb339, - 0x158c: 0xb339, 0x158d: 0xb351, 0x158e: 0xb351, 0x158f: 0xb351, 0x1590: 0xb351, 0x1591: 0xb369, - 0x1592: 0xb369, 0x1593: 0xb369, 0x1594: 0xb369, 0x1595: 0xb381, 0x1596: 0xb381, 0x1597: 0xb381, - 0x1598: 0xb381, 0x1599: 0xb399, 0x159a: 0xb399, 0x159b: 0xb399, 0x159c: 0xb399, 0x159d: 0xb3b1, - 0x159e: 0xb3b1, 0x159f: 0xb3b1, 0x15a0: 0xb3b1, 0x15a1: 0xb3c9, 0x15a2: 0xb3c9, 0x15a3: 0xb3c9, - 0x15a4: 0xb3c9, 0x15a5: 0xb3e1, 0x15a6: 0xb3e1, 0x15a7: 0xb3e1, 0x15a8: 0xb3e1, 0x15a9: 0xb3f9, - 0x15aa: 0xb3f9, 0x15ab: 0xb3f9, 0x15ac: 0xb3f9, 0x15ad: 0xb411, 0x15ae: 0xb411, 0x15af: 0x7ab1, - 0x15b0: 0x7ab1, 0x15b1: 0xb429, 0x15b2: 0xb429, 0x15b3: 0xb429, 0x15b4: 0xb429, 0x15b5: 0xb441, - 0x15b6: 0xb441, 0x15b7: 0xb469, 0x15b8: 0xb469, 0x15b9: 0xb491, 0x15ba: 0xb491, 0x15bb: 0xb4b9, - 0x15bc: 0xb4b9, 0x15bd: 0x0040, 0x15be: 0x0040, 0x15bf: 0x03c0, + 0x1580: 0x7e95, 0x1581: 0x7eb5, 0x1582: 0x7ed5, 0x1583: 0x7eb5, 0x1584: 0x7ef5, 0x1585: 0x0018, + 0x1586: 0x0018, 0x1587: 0xaf4a, 0x1588: 0xaf5a, 0x1589: 0x7f16, 0x158a: 0x7f36, 0x158b: 0x7f56, + 0x158c: 0x7f76, 0x158d: 0xaf1a, 0x158e: 0xaf1a, 0x158f: 0xaf1a, 0x1590: 0xaeda, 0x1591: 0x7f95, + 0x1592: 0x0040, 0x1593: 0x0040, 0x1594: 0x03c2, 0x1595: 0xaeea, 0x1596: 0xaf0a, 0x1597: 0xaefa, + 0x1598: 0x7fb5, 0x1599: 0x1fd2, 0x159a: 0x1fe2, 0x159b: 0xaf2a, 0x159c: 0xaf3a, 0x159d: 0x7e95, + 0x159e: 0x7ef5, 0x159f: 0xaf6a, 0x15a0: 0xaf7a, 0x15a1: 0xaf8a, 0x15a2: 0x1fb2, 0x15a3: 0xaf99, + 0x15a4: 0xafaa, 0x15a5: 0xafba, 0x15a6: 0x1fc2, 0x15a7: 0x0040, 0x15a8: 0xafca, 0x15a9: 0xafda, + 0x15aa: 0xafea, 0x15ab: 0xaffa, 0x15ac: 0x0040, 0x15ad: 0x0040, 0x15ae: 0x0040, 0x15af: 0x0040, + 0x15b0: 0x7fd6, 0x15b1: 0xb009, 0x15b2: 0x7ff6, 0x15b3: 0x0808, 0x15b4: 0x8016, 0x15b5: 0x0040, + 0x15b6: 0x8036, 0x15b7: 0xb031, 0x15b8: 0x8056, 0x15b9: 0xb059, 0x15ba: 0x8076, 0x15bb: 0xb081, + 0x15bc: 0x8096, 0x15bd: 0xb0a9, 0x15be: 0x80b6, 0x15bf: 0xb0d1, // Block 0x57, offset 0x15c0 - 0x15c0: 0x0040, 0x15c1: 0xaefa, 0x15c2: 0xb4e2, 0x15c3: 0xaf6a, 0x15c4: 0xafda, 0x15c5: 0xafea, - 0x15c6: 0xaf7a, 0x15c7: 0xb4f2, 0x15c8: 0x1fd2, 0x15c9: 0x1fe2, 0x15ca: 0xaf8a, 0x15cb: 0x1fb2, - 0x15cc: 0xaeda, 0x15cd: 0xaf99, 0x15ce: 0x29d1, 0x15cf: 0xb502, 0x15d0: 0x1f41, 0x15d1: 0x00c9, - 0x15d2: 0x0069, 0x15d3: 0x0079, 0x15d4: 0x1f51, 0x15d5: 0x1f61, 0x15d6: 0x1f71, 0x15d7: 0x1f81, - 0x15d8: 0x1f91, 0x15d9: 0x1fa1, 0x15da: 0xaeea, 0x15db: 0x03c2, 0x15dc: 0xafaa, 0x15dd: 0x1fc2, - 0x15de: 0xafba, 0x15df: 0xaf0a, 0x15e0: 0xaffa, 0x15e1: 0x0039, 0x15e2: 0x0ee9, 0x15e3: 0x1159, - 0x15e4: 0x0ef9, 0x15e5: 0x0f09, 0x15e6: 0x1199, 0x15e7: 0x0f31, 0x15e8: 0x0249, 0x15e9: 0x0f41, - 0x15ea: 0x0259, 0x15eb: 0x0f51, 0x15ec: 0x0359, 0x15ed: 0x0f61, 0x15ee: 0x0f71, 0x15ef: 0x00d9, - 0x15f0: 0x0f99, 0x15f1: 0x2039, 0x15f2: 0x0269, 0x15f3: 0x01d9, 0x15f4: 0x0fa9, 0x15f5: 0x0fb9, - 0x15f6: 0x1089, 0x15f7: 0x0279, 0x15f8: 0x0369, 0x15f9: 0x0289, 0x15fa: 0x13d1, 0x15fb: 0xaf4a, - 0x15fc: 0xafca, 0x15fd: 0xaf5a, 0x15fe: 0xb512, 0x15ff: 0xaf1a, + 0x15c0: 0xb0f9, 0x15c1: 0xb111, 0x15c2: 0xb111, 0x15c3: 0xb129, 0x15c4: 0xb129, 0x15c5: 0xb141, + 0x15c6: 0xb141, 0x15c7: 0xb159, 0x15c8: 0xb159, 0x15c9: 0xb171, 0x15ca: 0xb171, 0x15cb: 0xb171, + 0x15cc: 0xb171, 0x15cd: 0xb189, 0x15ce: 0xb189, 0x15cf: 0xb1a1, 0x15d0: 0xb1a1, 0x15d1: 0xb1a1, + 0x15d2: 0xb1a1, 0x15d3: 0xb1b9, 0x15d4: 0xb1b9, 0x15d5: 0xb1d1, 0x15d6: 0xb1d1, 0x15d7: 0xb1d1, + 0x15d8: 0xb1d1, 0x15d9: 0xb1e9, 0x15da: 0xb1e9, 0x15db: 0xb1e9, 0x15dc: 0xb1e9, 0x15dd: 0xb201, + 0x15de: 0xb201, 0x15df: 0xb201, 0x15e0: 0xb201, 0x15e1: 0xb219, 0x15e2: 0xb219, 0x15e3: 0xb219, + 0x15e4: 0xb219, 0x15e5: 0xb231, 0x15e6: 0xb231, 0x15e7: 0xb231, 0x15e8: 0xb231, 0x15e9: 0xb249, + 0x15ea: 0xb249, 0x15eb: 0xb261, 0x15ec: 0xb261, 0x15ed: 0xb279, 0x15ee: 0xb279, 0x15ef: 0xb291, + 0x15f0: 0xb291, 0x15f1: 0xb2a9, 0x15f2: 0xb2a9, 0x15f3: 0xb2a9, 0x15f4: 0xb2a9, 0x15f5: 0xb2c1, + 0x15f6: 0xb2c1, 0x15f7: 0xb2c1, 0x15f8: 0xb2c1, 0x15f9: 0xb2d9, 0x15fa: 0xb2d9, 0x15fb: 0xb2d9, + 0x15fc: 0xb2d9, 0x15fd: 0xb2f1, 0x15fe: 0xb2f1, 0x15ff: 0xb2f1, // Block 0x58, offset 0x1600 - 0x1600: 0x1caa, 0x1601: 0x0039, 0x1602: 0x0ee9, 0x1603: 0x1159, 0x1604: 0x0ef9, 0x1605: 0x0f09, - 0x1606: 0x1199, 0x1607: 0x0f31, 0x1608: 0x0249, 0x1609: 0x0f41, 0x160a: 0x0259, 0x160b: 0x0f51, - 0x160c: 0x0359, 0x160d: 0x0f61, 0x160e: 0x0f71, 0x160f: 0x00d9, 0x1610: 0x0f99, 0x1611: 0x2039, - 0x1612: 0x0269, 0x1613: 0x01d9, 0x1614: 0x0fa9, 0x1615: 0x0fb9, 0x1616: 0x1089, 0x1617: 0x0279, - 0x1618: 0x0369, 0x1619: 0x0289, 0x161a: 0x13d1, 0x161b: 0xaf2a, 0x161c: 0xb522, 0x161d: 0xaf3a, - 0x161e: 0xb532, 0x161f: 0x80d5, 0x1620: 0x80f5, 0x1621: 0x29d1, 0x1622: 0x8115, 0x1623: 0x8115, - 0x1624: 0x8135, 0x1625: 0x8155, 0x1626: 0x8175, 0x1627: 0x8195, 0x1628: 0x81b5, 0x1629: 0x81d5, - 0x162a: 0x81f5, 0x162b: 0x8215, 0x162c: 0x8235, 0x162d: 0x8255, 0x162e: 0x8275, 0x162f: 0x8295, - 0x1630: 0x82b5, 0x1631: 0x82d5, 0x1632: 0x82f5, 0x1633: 0x8315, 0x1634: 0x8335, 0x1635: 0x8355, - 0x1636: 0x8375, 0x1637: 0x8395, 0x1638: 0x83b5, 0x1639: 0x83d5, 0x163a: 0x83f5, 0x163b: 0x8415, - 0x163c: 0x81b5, 0x163d: 0x8435, 0x163e: 0x8455, 0x163f: 0x8215, + 0x1600: 0xb2f1, 0x1601: 0xb309, 0x1602: 0xb309, 0x1603: 0xb309, 0x1604: 0xb309, 0x1605: 0xb321, + 0x1606: 0xb321, 0x1607: 0xb321, 0x1608: 0xb321, 0x1609: 0xb339, 0x160a: 0xb339, 0x160b: 0xb339, + 0x160c: 0xb339, 0x160d: 0xb351, 0x160e: 0xb351, 0x160f: 0xb351, 0x1610: 0xb351, 0x1611: 0xb369, + 0x1612: 0xb369, 0x1613: 0xb369, 0x1614: 0xb369, 0x1615: 0xb381, 0x1616: 0xb381, 0x1617: 0xb381, + 0x1618: 0xb381, 0x1619: 0xb399, 0x161a: 0xb399, 0x161b: 0xb399, 0x161c: 0xb399, 0x161d: 0xb3b1, + 0x161e: 0xb3b1, 0x161f: 0xb3b1, 0x1620: 0xb3b1, 0x1621: 0xb3c9, 0x1622: 0xb3c9, 0x1623: 0xb3c9, + 0x1624: 0xb3c9, 0x1625: 0xb3e1, 0x1626: 0xb3e1, 0x1627: 0xb3e1, 0x1628: 0xb3e1, 0x1629: 0xb3f9, + 0x162a: 0xb3f9, 0x162b: 0xb3f9, 0x162c: 0xb3f9, 0x162d: 0xb411, 0x162e: 0xb411, 0x162f: 0x7ab1, + 0x1630: 0x7ab1, 0x1631: 0xb429, 0x1632: 0xb429, 0x1633: 0xb429, 0x1634: 0xb429, 0x1635: 0xb441, + 0x1636: 0xb441, 0x1637: 0xb469, 0x1638: 0xb469, 0x1639: 0xb491, 0x163a: 0xb491, 0x163b: 0xb4b9, + 0x163c: 0xb4b9, 0x163d: 0x0040, 0x163e: 0x0040, 0x163f: 0x03c0, // Block 0x59, offset 0x1640 - 0x1640: 0x8475, 0x1641: 0x8495, 0x1642: 0x84b5, 0x1643: 0x84d5, 0x1644: 0x84f5, 0x1645: 0x8515, - 0x1646: 0x8535, 0x1647: 0x8555, 0x1648: 0x84d5, 0x1649: 0x8575, 0x164a: 0x84d5, 0x164b: 0x8595, - 0x164c: 0x8595, 0x164d: 0x85b5, 0x164e: 0x85b5, 0x164f: 0x85d5, 0x1650: 0x8515, 0x1651: 0x85f5, - 0x1652: 0x8615, 0x1653: 0x85f5, 0x1654: 0x8635, 0x1655: 0x8615, 0x1656: 0x8655, 0x1657: 0x8655, - 0x1658: 0x8675, 0x1659: 0x8675, 0x165a: 0x8695, 0x165b: 0x8695, 0x165c: 0x8615, 0x165d: 0x8115, - 0x165e: 0x86b5, 0x165f: 0x86d5, 0x1660: 0x0040, 0x1661: 0x86f5, 0x1662: 0x8715, 0x1663: 0x8735, - 0x1664: 0x8755, 0x1665: 0x8735, 0x1666: 0x8775, 0x1667: 0x8795, 0x1668: 0x87b5, 0x1669: 0x87b5, - 0x166a: 0x87d5, 0x166b: 0x87d5, 0x166c: 0x87f5, 0x166d: 0x87f5, 0x166e: 0x87d5, 0x166f: 0x87d5, - 0x1670: 0x8815, 0x1671: 0x8835, 0x1672: 0x8855, 0x1673: 0x8875, 0x1674: 0x8895, 0x1675: 0x88b5, - 0x1676: 0x88b5, 0x1677: 0x88b5, 0x1678: 0x88d5, 0x1679: 0x88d5, 0x167a: 0x88d5, 0x167b: 0x88d5, - 0x167c: 0x87b5, 0x167d: 0x87b5, 0x167e: 0x87b5, 0x167f: 0x0040, + 0x1640: 0x0040, 0x1641: 0xaefa, 0x1642: 0xb4e2, 0x1643: 0xaf6a, 0x1644: 0xafda, 0x1645: 0xafea, + 0x1646: 0xaf7a, 0x1647: 0xb4f2, 0x1648: 0x1fd2, 0x1649: 0x1fe2, 0x164a: 0xaf8a, 0x164b: 0x1fb2, + 0x164c: 0xaeda, 0x164d: 0xaf99, 0x164e: 0x29d1, 0x164f: 0xb502, 0x1650: 0x1f41, 0x1651: 0x00c9, + 0x1652: 0x0069, 0x1653: 0x0079, 0x1654: 0x1f51, 0x1655: 0x1f61, 0x1656: 0x1f71, 0x1657: 0x1f81, + 0x1658: 0x1f91, 0x1659: 0x1fa1, 0x165a: 0xaeea, 0x165b: 0x03c2, 0x165c: 0xafaa, 0x165d: 0x1fc2, + 0x165e: 0xafba, 0x165f: 0xaf0a, 0x1660: 0xaffa, 0x1661: 0x0039, 0x1662: 0x0ee9, 0x1663: 0x1159, + 0x1664: 0x0ef9, 0x1665: 0x0f09, 0x1666: 0x1199, 0x1667: 0x0f31, 0x1668: 0x0249, 0x1669: 0x0f41, + 0x166a: 0x0259, 0x166b: 0x0f51, 0x166c: 0x0359, 0x166d: 0x0f61, 0x166e: 0x0f71, 0x166f: 0x00d9, + 0x1670: 0x0f99, 0x1671: 0x2039, 0x1672: 0x0269, 0x1673: 0x01d9, 0x1674: 0x0fa9, 0x1675: 0x0fb9, + 0x1676: 0x1089, 0x1677: 0x0279, 0x1678: 0x0369, 0x1679: 0x0289, 0x167a: 0x13d1, 0x167b: 0xaf4a, + 0x167c: 0xafca, 0x167d: 0xaf5a, 0x167e: 0xb512, 0x167f: 0xaf1a, // Block 0x5a, offset 0x1680 - 0x1680: 0x0040, 0x1681: 0x0040, 0x1682: 0x8715, 0x1683: 0x86f5, 0x1684: 0x88f5, 0x1685: 0x86f5, - 0x1686: 0x8715, 0x1687: 0x86f5, 0x1688: 0x0040, 0x1689: 0x0040, 0x168a: 0x8915, 0x168b: 0x8715, - 0x168c: 0x8935, 0x168d: 0x88f5, 0x168e: 0x8935, 0x168f: 0x8715, 0x1690: 0x0040, 0x1691: 0x0040, - 0x1692: 0x8955, 0x1693: 0x8975, 0x1694: 0x8875, 0x1695: 0x8935, 0x1696: 0x88f5, 0x1697: 0x8935, - 0x1698: 0x0040, 0x1699: 0x0040, 0x169a: 0x8995, 0x169b: 0x89b5, 0x169c: 0x8995, 0x169d: 0x0040, - 0x169e: 0x0040, 0x169f: 0x0040, 0x16a0: 0xb541, 0x16a1: 0xb559, 0x16a2: 0xb571, 0x16a3: 0x89d6, - 0x16a4: 0xb589, 0x16a5: 0xb5a1, 0x16a6: 0x89f5, 0x16a7: 0x0040, 0x16a8: 0x8a15, 0x16a9: 0x8a35, - 0x16aa: 0x8a55, 0x16ab: 0x8a35, 0x16ac: 0x8a75, 0x16ad: 0x8a95, 0x16ae: 0x8ab5, 0x16af: 0x0040, - 0x16b0: 0x0040, 0x16b1: 0x0040, 0x16b2: 0x0040, 0x16b3: 0x0040, 0x16b4: 0x0040, 0x16b5: 0x0040, - 0x16b6: 0x0040, 0x16b7: 0x0040, 0x16b8: 0x0040, 0x16b9: 0x0340, 0x16ba: 0x0340, 0x16bb: 0x0340, - 0x16bc: 0x0040, 0x16bd: 0x0040, 0x16be: 0x0040, 0x16bf: 0x0040, + 0x1680: 0x1caa, 0x1681: 0x0039, 0x1682: 0x0ee9, 0x1683: 0x1159, 0x1684: 0x0ef9, 0x1685: 0x0f09, + 0x1686: 0x1199, 0x1687: 0x0f31, 0x1688: 0x0249, 0x1689: 0x0f41, 0x168a: 0x0259, 0x168b: 0x0f51, + 0x168c: 0x0359, 0x168d: 0x0f61, 0x168e: 0x0f71, 0x168f: 0x00d9, 0x1690: 0x0f99, 0x1691: 0x2039, + 0x1692: 0x0269, 0x1693: 0x01d9, 0x1694: 0x0fa9, 0x1695: 0x0fb9, 0x1696: 0x1089, 0x1697: 0x0279, + 0x1698: 0x0369, 0x1699: 0x0289, 0x169a: 0x13d1, 0x169b: 0xaf2a, 0x169c: 0xb522, 0x169d: 0xaf3a, + 0x169e: 0xb532, 0x169f: 0x80d5, 0x16a0: 0x80f5, 0x16a1: 0x29d1, 0x16a2: 0x8115, 0x16a3: 0x8115, + 0x16a4: 0x8135, 0x16a5: 0x8155, 0x16a6: 0x8175, 0x16a7: 0x8195, 0x16a8: 0x81b5, 0x16a9: 0x81d5, + 0x16aa: 0x81f5, 0x16ab: 0x8215, 0x16ac: 0x8235, 0x16ad: 0x8255, 0x16ae: 0x8275, 0x16af: 0x8295, + 0x16b0: 0x82b5, 0x16b1: 0x82d5, 0x16b2: 0x82f5, 0x16b3: 0x8315, 0x16b4: 0x8335, 0x16b5: 0x8355, + 0x16b6: 0x8375, 0x16b7: 0x8395, 0x16b8: 0x83b5, 0x16b9: 0x83d5, 0x16ba: 0x83f5, 0x16bb: 0x8415, + 0x16bc: 0x81b5, 0x16bd: 0x8435, 0x16be: 0x8455, 0x16bf: 0x8215, // Block 0x5b, offset 0x16c0 - 0x16c0: 0x0208, 0x16c1: 0x0208, 0x16c2: 0x0208, 0x16c3: 0x0208, 0x16c4: 0x0208, 0x16c5: 0x0408, - 0x16c6: 0x0008, 0x16c7: 0x0408, 0x16c8: 0x0018, 0x16c9: 0x0408, 0x16ca: 0x0408, 0x16cb: 0x0008, - 0x16cc: 0x0008, 0x16cd: 0x0108, 0x16ce: 0x0408, 0x16cf: 0x0408, 0x16d0: 0x0408, 0x16d1: 0x0408, - 0x16d2: 0x0408, 0x16d3: 0x0208, 0x16d4: 0x0208, 0x16d5: 0x0208, 0x16d6: 0x0208, 0x16d7: 0x0108, - 0x16d8: 0x0208, 0x16d9: 0x0208, 0x16da: 0x0208, 0x16db: 0x0208, 0x16dc: 0x0208, 0x16dd: 0x0408, - 0x16de: 0x0208, 0x16df: 0x0208, 0x16e0: 0x0208, 0x16e1: 0x0408, 0x16e2: 0x0008, 0x16e3: 0x0008, - 0x16e4: 0x0408, 0x16e5: 0x1308, 0x16e6: 0x1308, 0x16e7: 0x0040, 0x16e8: 0x0040, 0x16e9: 0x0040, - 0x16ea: 0x0040, 0x16eb: 0x0218, 0x16ec: 0x0218, 0x16ed: 0x0218, 0x16ee: 0x0218, 0x16ef: 0x0418, - 0x16f0: 0x0018, 0x16f1: 0x0018, 0x16f2: 0x0018, 0x16f3: 0x0018, 0x16f4: 0x0018, 0x16f5: 0x0018, - 0x16f6: 0x0018, 0x16f7: 0x0040, 0x16f8: 0x0040, 0x16f9: 0x0040, 0x16fa: 0x0040, 0x16fb: 0x0040, - 0x16fc: 0x0040, 0x16fd: 0x0040, 0x16fe: 0x0040, 0x16ff: 0x0040, + 0x16c0: 0x8475, 0x16c1: 0x8495, 0x16c2: 0x84b5, 0x16c3: 0x84d5, 0x16c4: 0x84f5, 0x16c5: 0x8515, + 0x16c6: 0x8535, 0x16c7: 0x8555, 0x16c8: 0x84d5, 0x16c9: 0x8575, 0x16ca: 0x84d5, 0x16cb: 0x8595, + 0x16cc: 0x8595, 0x16cd: 0x85b5, 0x16ce: 0x85b5, 0x16cf: 0x85d5, 0x16d0: 0x8515, 0x16d1: 0x85f5, + 0x16d2: 0x8615, 0x16d3: 0x85f5, 0x16d4: 0x8635, 0x16d5: 0x8615, 0x16d6: 0x8655, 0x16d7: 0x8655, + 0x16d8: 0x8675, 0x16d9: 0x8675, 0x16da: 0x8695, 0x16db: 0x8695, 0x16dc: 0x8615, 0x16dd: 0x8115, + 0x16de: 0x86b5, 0x16df: 0x86d5, 0x16e0: 0x0040, 0x16e1: 0x86f5, 0x16e2: 0x8715, 0x16e3: 0x8735, + 0x16e4: 0x8755, 0x16e5: 0x8735, 0x16e6: 0x8775, 0x16e7: 0x8795, 0x16e8: 0x87b5, 0x16e9: 0x87b5, + 0x16ea: 0x87d5, 0x16eb: 0x87d5, 0x16ec: 0x87f5, 0x16ed: 0x87f5, 0x16ee: 0x87d5, 0x16ef: 0x87d5, + 0x16f0: 0x8815, 0x16f1: 0x8835, 0x16f2: 0x8855, 0x16f3: 0x8875, 0x16f4: 0x8895, 0x16f5: 0x88b5, + 0x16f6: 0x88b5, 0x16f7: 0x88b5, 0x16f8: 0x88d5, 0x16f9: 0x88d5, 0x16fa: 0x88d5, 0x16fb: 0x88d5, + 0x16fc: 0x87b5, 0x16fd: 0x87b5, 0x16fe: 0x87b5, 0x16ff: 0x0040, // Block 0x5c, offset 0x1700 - 0x1700: 0x0208, 0x1701: 0x0408, 0x1702: 0x0208, 0x1703: 0x0408, 0x1704: 0x0408, 0x1705: 0x0408, - 0x1706: 0x0208, 0x1707: 0x0208, 0x1708: 0x0208, 0x1709: 0x0408, 0x170a: 0x0208, 0x170b: 0x0208, - 0x170c: 0x0408, 0x170d: 0x0208, 0x170e: 0x0408, 0x170f: 0x0408, 0x1710: 0x0208, 0x1711: 0x0408, - 0x1712: 0x0040, 0x1713: 0x0040, 0x1714: 0x0040, 0x1715: 0x0040, 0x1716: 0x0040, 0x1717: 0x0040, - 0x1718: 0x0040, 0x1719: 0x0018, 0x171a: 0x0018, 0x171b: 0x0018, 0x171c: 0x0018, 0x171d: 0x0040, - 0x171e: 0x0040, 0x171f: 0x0040, 0x1720: 0x0040, 0x1721: 0x0040, 0x1722: 0x0040, 0x1723: 0x0040, - 0x1724: 0x0040, 0x1725: 0x0040, 0x1726: 0x0040, 0x1727: 0x0040, 0x1728: 0x0040, 0x1729: 0x0418, - 0x172a: 0x0418, 0x172b: 0x0418, 0x172c: 0x0418, 0x172d: 0x0218, 0x172e: 0x0218, 0x172f: 0x0018, + 0x1700: 0x0040, 0x1701: 0x0040, 0x1702: 0x8715, 0x1703: 0x86f5, 0x1704: 0x88f5, 0x1705: 0x86f5, + 0x1706: 0x8715, 0x1707: 0x86f5, 0x1708: 0x0040, 0x1709: 0x0040, 0x170a: 0x8915, 0x170b: 0x8715, + 0x170c: 0x8935, 0x170d: 0x88f5, 0x170e: 0x8935, 0x170f: 0x8715, 0x1710: 0x0040, 0x1711: 0x0040, + 0x1712: 0x8955, 0x1713: 0x8975, 0x1714: 0x8875, 0x1715: 0x8935, 0x1716: 0x88f5, 0x1717: 0x8935, + 0x1718: 0x0040, 0x1719: 0x0040, 0x171a: 0x8995, 0x171b: 0x89b5, 0x171c: 0x8995, 0x171d: 0x0040, + 0x171e: 0x0040, 0x171f: 0x0040, 0x1720: 0xb541, 0x1721: 0xb559, 0x1722: 0xb571, 0x1723: 0x89d6, + 0x1724: 0xb589, 0x1725: 0xb5a1, 0x1726: 0x89f5, 0x1727: 0x0040, 0x1728: 0x8a15, 0x1729: 0x8a35, + 0x172a: 0x8a55, 0x172b: 0x8a35, 0x172c: 0x8a75, 0x172d: 0x8a95, 0x172e: 0x8ab5, 0x172f: 0x0040, 0x1730: 0x0040, 0x1731: 0x0040, 0x1732: 0x0040, 0x1733: 0x0040, 0x1734: 0x0040, 0x1735: 0x0040, - 0x1736: 0x0040, 0x1737: 0x0040, 0x1738: 0x0040, 0x1739: 0x0040, 0x173a: 0x0040, 0x173b: 0x0040, + 0x1736: 0x0040, 0x1737: 0x0040, 0x1738: 0x0040, 0x1739: 0x0340, 0x173a: 0x0340, 0x173b: 0x0340, 0x173c: 0x0040, 0x173d: 0x0040, 0x173e: 0x0040, 0x173f: 0x0040, // Block 0x5d, offset 0x1740 - 0x1740: 0x1308, 0x1741: 0x1308, 0x1742: 0x1008, 0x1743: 0x1008, 0x1744: 0x0040, 0x1745: 0x0008, - 0x1746: 0x0008, 0x1747: 0x0008, 0x1748: 0x0008, 0x1749: 0x0008, 0x174a: 0x0008, 0x174b: 0x0008, - 0x174c: 0x0008, 0x174d: 0x0040, 0x174e: 0x0040, 0x174f: 0x0008, 0x1750: 0x0008, 0x1751: 0x0040, - 0x1752: 0x0040, 0x1753: 0x0008, 0x1754: 0x0008, 0x1755: 0x0008, 0x1756: 0x0008, 0x1757: 0x0008, - 0x1758: 0x0008, 0x1759: 0x0008, 0x175a: 0x0008, 0x175b: 0x0008, 0x175c: 0x0008, 0x175d: 0x0008, - 0x175e: 0x0008, 0x175f: 0x0008, 0x1760: 0x0008, 0x1761: 0x0008, 0x1762: 0x0008, 0x1763: 0x0008, - 0x1764: 0x0008, 0x1765: 0x0008, 0x1766: 0x0008, 0x1767: 0x0008, 0x1768: 0x0008, 0x1769: 0x0040, - 0x176a: 0x0008, 0x176b: 0x0008, 0x176c: 0x0008, 0x176d: 0x0008, 0x176e: 0x0008, 0x176f: 0x0008, - 0x1770: 0x0008, 0x1771: 0x0040, 0x1772: 0x0008, 0x1773: 0x0008, 0x1774: 0x0040, 0x1775: 0x0008, - 0x1776: 0x0008, 0x1777: 0x0008, 0x1778: 0x0008, 0x1779: 0x0008, 0x177a: 0x0040, 0x177b: 0x0040, - 0x177c: 0x1308, 0x177d: 0x0008, 0x177e: 0x1008, 0x177f: 0x1008, + 0x1740: 0x0a08, 0x1741: 0x0a08, 0x1742: 0x0a08, 0x1743: 0x0a08, 0x1744: 0x0a08, 0x1745: 0x0c08, + 0x1746: 0x0808, 0x1747: 0x0c08, 0x1748: 0x0818, 0x1749: 0x0c08, 0x174a: 0x0c08, 0x174b: 0x0808, + 0x174c: 0x0808, 0x174d: 0x0908, 0x174e: 0x0c08, 0x174f: 0x0c08, 0x1750: 0x0c08, 0x1751: 0x0c08, + 0x1752: 0x0c08, 0x1753: 0x0a08, 0x1754: 0x0a08, 0x1755: 0x0a08, 0x1756: 0x0a08, 0x1757: 0x0908, + 0x1758: 0x0a08, 0x1759: 0x0a08, 0x175a: 0x0a08, 0x175b: 0x0a08, 0x175c: 0x0a08, 0x175d: 0x0c08, + 0x175e: 0x0a08, 0x175f: 0x0a08, 0x1760: 0x0a08, 0x1761: 0x0c08, 0x1762: 0x0808, 0x1763: 0x0808, + 0x1764: 0x0c08, 0x1765: 0x3308, 0x1766: 0x3308, 0x1767: 0x0040, 0x1768: 0x0040, 0x1769: 0x0040, + 0x176a: 0x0040, 0x176b: 0x0a18, 0x176c: 0x0a18, 0x176d: 0x0a18, 0x176e: 0x0a18, 0x176f: 0x0c18, + 0x1770: 0x0818, 0x1771: 0x0818, 0x1772: 0x0818, 0x1773: 0x0818, 0x1774: 0x0818, 0x1775: 0x0818, + 0x1776: 0x0818, 0x1777: 0x0040, 0x1778: 0x0040, 0x1779: 0x0040, 0x177a: 0x0040, 0x177b: 0x0040, + 0x177c: 0x0040, 0x177d: 0x0040, 0x177e: 0x0040, 0x177f: 0x0040, // Block 0x5e, offset 0x1780 - 0x1780: 0x1308, 0x1781: 0x1008, 0x1782: 0x1008, 0x1783: 0x1008, 0x1784: 0x1008, 0x1785: 0x0040, - 0x1786: 0x0040, 0x1787: 0x1008, 0x1788: 0x1008, 0x1789: 0x0040, 0x178a: 0x0040, 0x178b: 0x1008, - 0x178c: 0x1008, 0x178d: 0x1808, 0x178e: 0x0040, 0x178f: 0x0040, 0x1790: 0x0008, 0x1791: 0x0040, - 0x1792: 0x0040, 0x1793: 0x0040, 0x1794: 0x0040, 0x1795: 0x0040, 0x1796: 0x0040, 0x1797: 0x1008, - 0x1798: 0x0040, 0x1799: 0x0040, 0x179a: 0x0040, 0x179b: 0x0040, 0x179c: 0x0040, 0x179d: 0x0008, - 0x179e: 0x0008, 0x179f: 0x0008, 0x17a0: 0x0008, 0x17a1: 0x0008, 0x17a2: 0x1008, 0x17a3: 0x1008, - 0x17a4: 0x0040, 0x17a5: 0x0040, 0x17a6: 0x1308, 0x17a7: 0x1308, 0x17a8: 0x1308, 0x17a9: 0x1308, - 0x17aa: 0x1308, 0x17ab: 0x1308, 0x17ac: 0x1308, 0x17ad: 0x0040, 0x17ae: 0x0040, 0x17af: 0x0040, - 0x17b0: 0x1308, 0x17b1: 0x1308, 0x17b2: 0x1308, 0x17b3: 0x1308, 0x17b4: 0x1308, 0x17b5: 0x0040, + 0x1780: 0x0a08, 0x1781: 0x0c08, 0x1782: 0x0a08, 0x1783: 0x0c08, 0x1784: 0x0c08, 0x1785: 0x0c08, + 0x1786: 0x0a08, 0x1787: 0x0a08, 0x1788: 0x0a08, 0x1789: 0x0c08, 0x178a: 0x0a08, 0x178b: 0x0a08, + 0x178c: 0x0c08, 0x178d: 0x0a08, 0x178e: 0x0c08, 0x178f: 0x0c08, 0x1790: 0x0a08, 0x1791: 0x0c08, + 0x1792: 0x0040, 0x1793: 0x0040, 0x1794: 0x0040, 0x1795: 0x0040, 0x1796: 0x0040, 0x1797: 0x0040, + 0x1798: 0x0040, 0x1799: 0x0818, 0x179a: 0x0818, 0x179b: 0x0818, 0x179c: 0x0818, 0x179d: 0x0040, + 0x179e: 0x0040, 0x179f: 0x0040, 0x17a0: 0x0040, 0x17a1: 0x0040, 0x17a2: 0x0040, 0x17a3: 0x0040, + 0x17a4: 0x0040, 0x17a5: 0x0040, 0x17a6: 0x0040, 0x17a7: 0x0040, 0x17a8: 0x0040, 0x17a9: 0x0c18, + 0x17aa: 0x0c18, 0x17ab: 0x0c18, 0x17ac: 0x0c18, 0x17ad: 0x0a18, 0x17ae: 0x0a18, 0x17af: 0x0818, + 0x17b0: 0x0040, 0x17b1: 0x0040, 0x17b2: 0x0040, 0x17b3: 0x0040, 0x17b4: 0x0040, 0x17b5: 0x0040, 0x17b6: 0x0040, 0x17b7: 0x0040, 0x17b8: 0x0040, 0x17b9: 0x0040, 0x17ba: 0x0040, 0x17bb: 0x0040, 0x17bc: 0x0040, 0x17bd: 0x0040, 0x17be: 0x0040, 0x17bf: 0x0040, // Block 0x5f, offset 0x17c0 - 0x17c0: 0x0039, 0x17c1: 0x0ee9, 0x17c2: 0x1159, 0x17c3: 0x0ef9, 0x17c4: 0x0f09, 0x17c5: 0x1199, - 0x17c6: 0x0f31, 0x17c7: 0x0249, 0x17c8: 0x0f41, 0x17c9: 0x0259, 0x17ca: 0x0f51, 0x17cb: 0x0359, - 0x17cc: 0x0f61, 0x17cd: 0x0f71, 0x17ce: 0x00d9, 0x17cf: 0x0f99, 0x17d0: 0x2039, 0x17d1: 0x0269, - 0x17d2: 0x01d9, 0x17d3: 0x0fa9, 0x17d4: 0x0fb9, 0x17d5: 0x1089, 0x17d6: 0x0279, 0x17d7: 0x0369, - 0x17d8: 0x0289, 0x17d9: 0x13d1, 0x17da: 0x0039, 0x17db: 0x0ee9, 0x17dc: 0x1159, 0x17dd: 0x0ef9, - 0x17de: 0x0f09, 0x17df: 0x1199, 0x17e0: 0x0f31, 0x17e1: 0x0249, 0x17e2: 0x0f41, 0x17e3: 0x0259, - 0x17e4: 0x0f51, 0x17e5: 0x0359, 0x17e6: 0x0f61, 0x17e7: 0x0f71, 0x17e8: 0x00d9, 0x17e9: 0x0f99, - 0x17ea: 0x2039, 0x17eb: 0x0269, 0x17ec: 0x01d9, 0x17ed: 0x0fa9, 0x17ee: 0x0fb9, 0x17ef: 0x1089, - 0x17f0: 0x0279, 0x17f1: 0x0369, 0x17f2: 0x0289, 0x17f3: 0x13d1, 0x17f4: 0x0039, 0x17f5: 0x0ee9, - 0x17f6: 0x1159, 0x17f7: 0x0ef9, 0x17f8: 0x0f09, 0x17f9: 0x1199, 0x17fa: 0x0f31, 0x17fb: 0x0249, - 0x17fc: 0x0f41, 0x17fd: 0x0259, 0x17fe: 0x0f51, 0x17ff: 0x0359, + 0x17c0: 0x3308, 0x17c1: 0x3308, 0x17c2: 0x3008, 0x17c3: 0x3008, 0x17c4: 0x0040, 0x17c5: 0x0008, + 0x17c6: 0x0008, 0x17c7: 0x0008, 0x17c8: 0x0008, 0x17c9: 0x0008, 0x17ca: 0x0008, 0x17cb: 0x0008, + 0x17cc: 0x0008, 0x17cd: 0x0040, 0x17ce: 0x0040, 0x17cf: 0x0008, 0x17d0: 0x0008, 0x17d1: 0x0040, + 0x17d2: 0x0040, 0x17d3: 0x0008, 0x17d4: 0x0008, 0x17d5: 0x0008, 0x17d6: 0x0008, 0x17d7: 0x0008, + 0x17d8: 0x0008, 0x17d9: 0x0008, 0x17da: 0x0008, 0x17db: 0x0008, 0x17dc: 0x0008, 0x17dd: 0x0008, + 0x17de: 0x0008, 0x17df: 0x0008, 0x17e0: 0x0008, 0x17e1: 0x0008, 0x17e2: 0x0008, 0x17e3: 0x0008, + 0x17e4: 0x0008, 0x17e5: 0x0008, 0x17e6: 0x0008, 0x17e7: 0x0008, 0x17e8: 0x0008, 0x17e9: 0x0040, + 0x17ea: 0x0008, 0x17eb: 0x0008, 0x17ec: 0x0008, 0x17ed: 0x0008, 0x17ee: 0x0008, 0x17ef: 0x0008, + 0x17f0: 0x0008, 0x17f1: 0x0040, 0x17f2: 0x0008, 0x17f3: 0x0008, 0x17f4: 0x0040, 0x17f5: 0x0008, + 0x17f6: 0x0008, 0x17f7: 0x0008, 0x17f8: 0x0008, 0x17f9: 0x0008, 0x17fa: 0x0040, 0x17fb: 0x0040, + 0x17fc: 0x3308, 0x17fd: 0x0008, 0x17fe: 0x3008, 0x17ff: 0x3008, // Block 0x60, offset 0x1800 - 0x1800: 0x0f61, 0x1801: 0x0f71, 0x1802: 0x00d9, 0x1803: 0x0f99, 0x1804: 0x2039, 0x1805: 0x0269, - 0x1806: 0x01d9, 0x1807: 0x0fa9, 0x1808: 0x0fb9, 0x1809: 0x1089, 0x180a: 0x0279, 0x180b: 0x0369, - 0x180c: 0x0289, 0x180d: 0x13d1, 0x180e: 0x0039, 0x180f: 0x0ee9, 0x1810: 0x1159, 0x1811: 0x0ef9, - 0x1812: 0x0f09, 0x1813: 0x1199, 0x1814: 0x0f31, 0x1815: 0x0040, 0x1816: 0x0f41, 0x1817: 0x0259, - 0x1818: 0x0f51, 0x1819: 0x0359, 0x181a: 0x0f61, 0x181b: 0x0f71, 0x181c: 0x00d9, 0x181d: 0x0f99, - 0x181e: 0x2039, 0x181f: 0x0269, 0x1820: 0x01d9, 0x1821: 0x0fa9, 0x1822: 0x0fb9, 0x1823: 0x1089, - 0x1824: 0x0279, 0x1825: 0x0369, 0x1826: 0x0289, 0x1827: 0x13d1, 0x1828: 0x0039, 0x1829: 0x0ee9, - 0x182a: 0x1159, 0x182b: 0x0ef9, 0x182c: 0x0f09, 0x182d: 0x1199, 0x182e: 0x0f31, 0x182f: 0x0249, - 0x1830: 0x0f41, 0x1831: 0x0259, 0x1832: 0x0f51, 0x1833: 0x0359, 0x1834: 0x0f61, 0x1835: 0x0f71, - 0x1836: 0x00d9, 0x1837: 0x0f99, 0x1838: 0x2039, 0x1839: 0x0269, 0x183a: 0x01d9, 0x183b: 0x0fa9, - 0x183c: 0x0fb9, 0x183d: 0x1089, 0x183e: 0x0279, 0x183f: 0x0369, + 0x1800: 0x3308, 0x1801: 0x3008, 0x1802: 0x3008, 0x1803: 0x3008, 0x1804: 0x3008, 0x1805: 0x0040, + 0x1806: 0x0040, 0x1807: 0x3008, 0x1808: 0x3008, 0x1809: 0x0040, 0x180a: 0x0040, 0x180b: 0x3008, + 0x180c: 0x3008, 0x180d: 0x3808, 0x180e: 0x0040, 0x180f: 0x0040, 0x1810: 0x0008, 0x1811: 0x0040, + 0x1812: 0x0040, 0x1813: 0x0040, 0x1814: 0x0040, 0x1815: 0x0040, 0x1816: 0x0040, 0x1817: 0x3008, + 0x1818: 0x0040, 0x1819: 0x0040, 0x181a: 0x0040, 0x181b: 0x0040, 0x181c: 0x0040, 0x181d: 0x0008, + 0x181e: 0x0008, 0x181f: 0x0008, 0x1820: 0x0008, 0x1821: 0x0008, 0x1822: 0x3008, 0x1823: 0x3008, + 0x1824: 0x0040, 0x1825: 0x0040, 0x1826: 0x3308, 0x1827: 0x3308, 0x1828: 0x3308, 0x1829: 0x3308, + 0x182a: 0x3308, 0x182b: 0x3308, 0x182c: 0x3308, 0x182d: 0x0040, 0x182e: 0x0040, 0x182f: 0x0040, + 0x1830: 0x3308, 0x1831: 0x3308, 0x1832: 0x3308, 0x1833: 0x3308, 0x1834: 0x3308, 0x1835: 0x0040, + 0x1836: 0x0040, 0x1837: 0x0040, 0x1838: 0x0040, 0x1839: 0x0040, 0x183a: 0x0040, 0x183b: 0x0040, + 0x183c: 0x0040, 0x183d: 0x0040, 0x183e: 0x0040, 0x183f: 0x0040, // Block 0x61, offset 0x1840 - 0x1840: 0x0289, 0x1841: 0x13d1, 0x1842: 0x0039, 0x1843: 0x0ee9, 0x1844: 0x1159, 0x1845: 0x0ef9, - 0x1846: 0x0f09, 0x1847: 0x1199, 0x1848: 0x0f31, 0x1849: 0x0249, 0x184a: 0x0f41, 0x184b: 0x0259, - 0x184c: 0x0f51, 0x184d: 0x0359, 0x184e: 0x0f61, 0x184f: 0x0f71, 0x1850: 0x00d9, 0x1851: 0x0f99, - 0x1852: 0x2039, 0x1853: 0x0269, 0x1854: 0x01d9, 0x1855: 0x0fa9, 0x1856: 0x0fb9, 0x1857: 0x1089, - 0x1858: 0x0279, 0x1859: 0x0369, 0x185a: 0x0289, 0x185b: 0x13d1, 0x185c: 0x0039, 0x185d: 0x0040, - 0x185e: 0x1159, 0x185f: 0x0ef9, 0x1860: 0x0040, 0x1861: 0x0040, 0x1862: 0x0f31, 0x1863: 0x0040, - 0x1864: 0x0040, 0x1865: 0x0259, 0x1866: 0x0f51, 0x1867: 0x0040, 0x1868: 0x0040, 0x1869: 0x0f71, - 0x186a: 0x00d9, 0x186b: 0x0f99, 0x186c: 0x2039, 0x186d: 0x0040, 0x186e: 0x01d9, 0x186f: 0x0fa9, - 0x1870: 0x0fb9, 0x1871: 0x1089, 0x1872: 0x0279, 0x1873: 0x0369, 0x1874: 0x0289, 0x1875: 0x13d1, - 0x1876: 0x0039, 0x1877: 0x0ee9, 0x1878: 0x1159, 0x1879: 0x0ef9, 0x187a: 0x0040, 0x187b: 0x1199, - 0x187c: 0x0040, 0x187d: 0x0249, 0x187e: 0x0f41, 0x187f: 0x0259, + 0x1840: 0x0039, 0x1841: 0x0ee9, 0x1842: 0x1159, 0x1843: 0x0ef9, 0x1844: 0x0f09, 0x1845: 0x1199, + 0x1846: 0x0f31, 0x1847: 0x0249, 0x1848: 0x0f41, 0x1849: 0x0259, 0x184a: 0x0f51, 0x184b: 0x0359, + 0x184c: 0x0f61, 0x184d: 0x0f71, 0x184e: 0x00d9, 0x184f: 0x0f99, 0x1850: 0x2039, 0x1851: 0x0269, + 0x1852: 0x01d9, 0x1853: 0x0fa9, 0x1854: 0x0fb9, 0x1855: 0x1089, 0x1856: 0x0279, 0x1857: 0x0369, + 0x1858: 0x0289, 0x1859: 0x13d1, 0x185a: 0x0039, 0x185b: 0x0ee9, 0x185c: 0x1159, 0x185d: 0x0ef9, + 0x185e: 0x0f09, 0x185f: 0x1199, 0x1860: 0x0f31, 0x1861: 0x0249, 0x1862: 0x0f41, 0x1863: 0x0259, + 0x1864: 0x0f51, 0x1865: 0x0359, 0x1866: 0x0f61, 0x1867: 0x0f71, 0x1868: 0x00d9, 0x1869: 0x0f99, + 0x186a: 0x2039, 0x186b: 0x0269, 0x186c: 0x01d9, 0x186d: 0x0fa9, 0x186e: 0x0fb9, 0x186f: 0x1089, + 0x1870: 0x0279, 0x1871: 0x0369, 0x1872: 0x0289, 0x1873: 0x13d1, 0x1874: 0x0039, 0x1875: 0x0ee9, + 0x1876: 0x1159, 0x1877: 0x0ef9, 0x1878: 0x0f09, 0x1879: 0x1199, 0x187a: 0x0f31, 0x187b: 0x0249, + 0x187c: 0x0f41, 0x187d: 0x0259, 0x187e: 0x0f51, 0x187f: 0x0359, // Block 0x62, offset 0x1880 - 0x1880: 0x0f51, 0x1881: 0x0359, 0x1882: 0x0f61, 0x1883: 0x0f71, 0x1884: 0x0040, 0x1885: 0x0f99, - 0x1886: 0x2039, 0x1887: 0x0269, 0x1888: 0x01d9, 0x1889: 0x0fa9, 0x188a: 0x0fb9, 0x188b: 0x1089, - 0x188c: 0x0279, 0x188d: 0x0369, 0x188e: 0x0289, 0x188f: 0x13d1, 0x1890: 0x0039, 0x1891: 0x0ee9, - 0x1892: 0x1159, 0x1893: 0x0ef9, 0x1894: 0x0f09, 0x1895: 0x1199, 0x1896: 0x0f31, 0x1897: 0x0249, - 0x1898: 0x0f41, 0x1899: 0x0259, 0x189a: 0x0f51, 0x189b: 0x0359, 0x189c: 0x0f61, 0x189d: 0x0f71, - 0x189e: 0x00d9, 0x189f: 0x0f99, 0x18a0: 0x2039, 0x18a1: 0x0269, 0x18a2: 0x01d9, 0x18a3: 0x0fa9, - 0x18a4: 0x0fb9, 0x18a5: 0x1089, 0x18a6: 0x0279, 0x18a7: 0x0369, 0x18a8: 0x0289, 0x18a9: 0x13d1, - 0x18aa: 0x0039, 0x18ab: 0x0ee9, 0x18ac: 0x1159, 0x18ad: 0x0ef9, 0x18ae: 0x0f09, 0x18af: 0x1199, - 0x18b0: 0x0f31, 0x18b1: 0x0249, 0x18b2: 0x0f41, 0x18b3: 0x0259, 0x18b4: 0x0f51, 0x18b5: 0x0359, - 0x18b6: 0x0f61, 0x18b7: 0x0f71, 0x18b8: 0x00d9, 0x18b9: 0x0f99, 0x18ba: 0x2039, 0x18bb: 0x0269, - 0x18bc: 0x01d9, 0x18bd: 0x0fa9, 0x18be: 0x0fb9, 0x18bf: 0x1089, + 0x1880: 0x0f61, 0x1881: 0x0f71, 0x1882: 0x00d9, 0x1883: 0x0f99, 0x1884: 0x2039, 0x1885: 0x0269, + 0x1886: 0x01d9, 0x1887: 0x0fa9, 0x1888: 0x0fb9, 0x1889: 0x1089, 0x188a: 0x0279, 0x188b: 0x0369, + 0x188c: 0x0289, 0x188d: 0x13d1, 0x188e: 0x0039, 0x188f: 0x0ee9, 0x1890: 0x1159, 0x1891: 0x0ef9, + 0x1892: 0x0f09, 0x1893: 0x1199, 0x1894: 0x0f31, 0x1895: 0x0040, 0x1896: 0x0f41, 0x1897: 0x0259, + 0x1898: 0x0f51, 0x1899: 0x0359, 0x189a: 0x0f61, 0x189b: 0x0f71, 0x189c: 0x00d9, 0x189d: 0x0f99, + 0x189e: 0x2039, 0x189f: 0x0269, 0x18a0: 0x01d9, 0x18a1: 0x0fa9, 0x18a2: 0x0fb9, 0x18a3: 0x1089, + 0x18a4: 0x0279, 0x18a5: 0x0369, 0x18a6: 0x0289, 0x18a7: 0x13d1, 0x18a8: 0x0039, 0x18a9: 0x0ee9, + 0x18aa: 0x1159, 0x18ab: 0x0ef9, 0x18ac: 0x0f09, 0x18ad: 0x1199, 0x18ae: 0x0f31, 0x18af: 0x0249, + 0x18b0: 0x0f41, 0x18b1: 0x0259, 0x18b2: 0x0f51, 0x18b3: 0x0359, 0x18b4: 0x0f61, 0x18b5: 0x0f71, + 0x18b6: 0x00d9, 0x18b7: 0x0f99, 0x18b8: 0x2039, 0x18b9: 0x0269, 0x18ba: 0x01d9, 0x18bb: 0x0fa9, + 0x18bc: 0x0fb9, 0x18bd: 0x1089, 0x18be: 0x0279, 0x18bf: 0x0369, // Block 0x63, offset 0x18c0 - 0x18c0: 0x0279, 0x18c1: 0x0369, 0x18c2: 0x0289, 0x18c3: 0x13d1, 0x18c4: 0x0039, 0x18c5: 0x0ee9, - 0x18c6: 0x0040, 0x18c7: 0x0ef9, 0x18c8: 0x0f09, 0x18c9: 0x1199, 0x18ca: 0x0f31, 0x18cb: 0x0040, - 0x18cc: 0x0040, 0x18cd: 0x0259, 0x18ce: 0x0f51, 0x18cf: 0x0359, 0x18d0: 0x0f61, 0x18d1: 0x0f71, - 0x18d2: 0x00d9, 0x18d3: 0x0f99, 0x18d4: 0x2039, 0x18d5: 0x0040, 0x18d6: 0x01d9, 0x18d7: 0x0fa9, - 0x18d8: 0x0fb9, 0x18d9: 0x1089, 0x18da: 0x0279, 0x18db: 0x0369, 0x18dc: 0x0289, 0x18dd: 0x0040, - 0x18de: 0x0039, 0x18df: 0x0ee9, 0x18e0: 0x1159, 0x18e1: 0x0ef9, 0x18e2: 0x0f09, 0x18e3: 0x1199, - 0x18e4: 0x0f31, 0x18e5: 0x0249, 0x18e6: 0x0f41, 0x18e7: 0x0259, 0x18e8: 0x0f51, 0x18e9: 0x0359, - 0x18ea: 0x0f61, 0x18eb: 0x0f71, 0x18ec: 0x00d9, 0x18ed: 0x0f99, 0x18ee: 0x2039, 0x18ef: 0x0269, - 0x18f0: 0x01d9, 0x18f1: 0x0fa9, 0x18f2: 0x0fb9, 0x18f3: 0x1089, 0x18f4: 0x0279, 0x18f5: 0x0369, - 0x18f6: 0x0289, 0x18f7: 0x13d1, 0x18f8: 0x0039, 0x18f9: 0x0ee9, 0x18fa: 0x0040, 0x18fb: 0x0ef9, - 0x18fc: 0x0f09, 0x18fd: 0x1199, 0x18fe: 0x0f31, 0x18ff: 0x0040, + 0x18c0: 0x0289, 0x18c1: 0x13d1, 0x18c2: 0x0039, 0x18c3: 0x0ee9, 0x18c4: 0x1159, 0x18c5: 0x0ef9, + 0x18c6: 0x0f09, 0x18c7: 0x1199, 0x18c8: 0x0f31, 0x18c9: 0x0249, 0x18ca: 0x0f41, 0x18cb: 0x0259, + 0x18cc: 0x0f51, 0x18cd: 0x0359, 0x18ce: 0x0f61, 0x18cf: 0x0f71, 0x18d0: 0x00d9, 0x18d1: 0x0f99, + 0x18d2: 0x2039, 0x18d3: 0x0269, 0x18d4: 0x01d9, 0x18d5: 0x0fa9, 0x18d6: 0x0fb9, 0x18d7: 0x1089, + 0x18d8: 0x0279, 0x18d9: 0x0369, 0x18da: 0x0289, 0x18db: 0x13d1, 0x18dc: 0x0039, 0x18dd: 0x0040, + 0x18de: 0x1159, 0x18df: 0x0ef9, 0x18e0: 0x0040, 0x18e1: 0x0040, 0x18e2: 0x0f31, 0x18e3: 0x0040, + 0x18e4: 0x0040, 0x18e5: 0x0259, 0x18e6: 0x0f51, 0x18e7: 0x0040, 0x18e8: 0x0040, 0x18e9: 0x0f71, + 0x18ea: 0x00d9, 0x18eb: 0x0f99, 0x18ec: 0x2039, 0x18ed: 0x0040, 0x18ee: 0x01d9, 0x18ef: 0x0fa9, + 0x18f0: 0x0fb9, 0x18f1: 0x1089, 0x18f2: 0x0279, 0x18f3: 0x0369, 0x18f4: 0x0289, 0x18f5: 0x13d1, + 0x18f6: 0x0039, 0x18f7: 0x0ee9, 0x18f8: 0x1159, 0x18f9: 0x0ef9, 0x18fa: 0x0040, 0x18fb: 0x1199, + 0x18fc: 0x0040, 0x18fd: 0x0249, 0x18fe: 0x0f41, 0x18ff: 0x0259, // Block 0x64, offset 0x1900 - 0x1900: 0x0f41, 0x1901: 0x0259, 0x1902: 0x0f51, 0x1903: 0x0359, 0x1904: 0x0f61, 0x1905: 0x0040, - 0x1906: 0x00d9, 0x1907: 0x0040, 0x1908: 0x0040, 0x1909: 0x0040, 0x190a: 0x01d9, 0x190b: 0x0fa9, - 0x190c: 0x0fb9, 0x190d: 0x1089, 0x190e: 0x0279, 0x190f: 0x0369, 0x1910: 0x0289, 0x1911: 0x0040, - 0x1912: 0x0039, 0x1913: 0x0ee9, 0x1914: 0x1159, 0x1915: 0x0ef9, 0x1916: 0x0f09, 0x1917: 0x1199, - 0x1918: 0x0f31, 0x1919: 0x0249, 0x191a: 0x0f41, 0x191b: 0x0259, 0x191c: 0x0f51, 0x191d: 0x0359, - 0x191e: 0x0f61, 0x191f: 0x0f71, 0x1920: 0x00d9, 0x1921: 0x0f99, 0x1922: 0x2039, 0x1923: 0x0269, - 0x1924: 0x01d9, 0x1925: 0x0fa9, 0x1926: 0x0fb9, 0x1927: 0x1089, 0x1928: 0x0279, 0x1929: 0x0369, - 0x192a: 0x0289, 0x192b: 0x13d1, 0x192c: 0x0039, 0x192d: 0x0ee9, 0x192e: 0x1159, 0x192f: 0x0ef9, - 0x1930: 0x0f09, 0x1931: 0x1199, 0x1932: 0x0f31, 0x1933: 0x0249, 0x1934: 0x0f41, 0x1935: 0x0259, - 0x1936: 0x0f51, 0x1937: 0x0359, 0x1938: 0x0f61, 0x1939: 0x0f71, 0x193a: 0x00d9, 0x193b: 0x0f99, - 0x193c: 0x2039, 0x193d: 0x0269, 0x193e: 0x01d9, 0x193f: 0x0fa9, + 0x1900: 0x0f51, 0x1901: 0x0359, 0x1902: 0x0f61, 0x1903: 0x0f71, 0x1904: 0x0040, 0x1905: 0x0f99, + 0x1906: 0x2039, 0x1907: 0x0269, 0x1908: 0x01d9, 0x1909: 0x0fa9, 0x190a: 0x0fb9, 0x190b: 0x1089, + 0x190c: 0x0279, 0x190d: 0x0369, 0x190e: 0x0289, 0x190f: 0x13d1, 0x1910: 0x0039, 0x1911: 0x0ee9, + 0x1912: 0x1159, 0x1913: 0x0ef9, 0x1914: 0x0f09, 0x1915: 0x1199, 0x1916: 0x0f31, 0x1917: 0x0249, + 0x1918: 0x0f41, 0x1919: 0x0259, 0x191a: 0x0f51, 0x191b: 0x0359, 0x191c: 0x0f61, 0x191d: 0x0f71, + 0x191e: 0x00d9, 0x191f: 0x0f99, 0x1920: 0x2039, 0x1921: 0x0269, 0x1922: 0x01d9, 0x1923: 0x0fa9, + 0x1924: 0x0fb9, 0x1925: 0x1089, 0x1926: 0x0279, 0x1927: 0x0369, 0x1928: 0x0289, 0x1929: 0x13d1, + 0x192a: 0x0039, 0x192b: 0x0ee9, 0x192c: 0x1159, 0x192d: 0x0ef9, 0x192e: 0x0f09, 0x192f: 0x1199, + 0x1930: 0x0f31, 0x1931: 0x0249, 0x1932: 0x0f41, 0x1933: 0x0259, 0x1934: 0x0f51, 0x1935: 0x0359, + 0x1936: 0x0f61, 0x1937: 0x0f71, 0x1938: 0x00d9, 0x1939: 0x0f99, 0x193a: 0x2039, 0x193b: 0x0269, + 0x193c: 0x01d9, 0x193d: 0x0fa9, 0x193e: 0x0fb9, 0x193f: 0x1089, // Block 0x65, offset 0x1940 - 0x1940: 0x0fb9, 0x1941: 0x1089, 0x1942: 0x0279, 0x1943: 0x0369, 0x1944: 0x0289, 0x1945: 0x13d1, - 0x1946: 0x0039, 0x1947: 0x0ee9, 0x1948: 0x1159, 0x1949: 0x0ef9, 0x194a: 0x0f09, 0x194b: 0x1199, - 0x194c: 0x0f31, 0x194d: 0x0249, 0x194e: 0x0f41, 0x194f: 0x0259, 0x1950: 0x0f51, 0x1951: 0x0359, - 0x1952: 0x0f61, 0x1953: 0x0f71, 0x1954: 0x00d9, 0x1955: 0x0f99, 0x1956: 0x2039, 0x1957: 0x0269, - 0x1958: 0x01d9, 0x1959: 0x0fa9, 0x195a: 0x0fb9, 0x195b: 0x1089, 0x195c: 0x0279, 0x195d: 0x0369, - 0x195e: 0x0289, 0x195f: 0x13d1, 0x1960: 0x0039, 0x1961: 0x0ee9, 0x1962: 0x1159, 0x1963: 0x0ef9, - 0x1964: 0x0f09, 0x1965: 0x1199, 0x1966: 0x0f31, 0x1967: 0x0249, 0x1968: 0x0f41, 0x1969: 0x0259, - 0x196a: 0x0f51, 0x196b: 0x0359, 0x196c: 0x0f61, 0x196d: 0x0f71, 0x196e: 0x00d9, 0x196f: 0x0f99, - 0x1970: 0x2039, 0x1971: 0x0269, 0x1972: 0x01d9, 0x1973: 0x0fa9, 0x1974: 0x0fb9, 0x1975: 0x1089, - 0x1976: 0x0279, 0x1977: 0x0369, 0x1978: 0x0289, 0x1979: 0x13d1, 0x197a: 0x0039, 0x197b: 0x0ee9, - 0x197c: 0x1159, 0x197d: 0x0ef9, 0x197e: 0x0f09, 0x197f: 0x1199, + 0x1940: 0x0279, 0x1941: 0x0369, 0x1942: 0x0289, 0x1943: 0x13d1, 0x1944: 0x0039, 0x1945: 0x0ee9, + 0x1946: 0x0040, 0x1947: 0x0ef9, 0x1948: 0x0f09, 0x1949: 0x1199, 0x194a: 0x0f31, 0x194b: 0x0040, + 0x194c: 0x0040, 0x194d: 0x0259, 0x194e: 0x0f51, 0x194f: 0x0359, 0x1950: 0x0f61, 0x1951: 0x0f71, + 0x1952: 0x00d9, 0x1953: 0x0f99, 0x1954: 0x2039, 0x1955: 0x0040, 0x1956: 0x01d9, 0x1957: 0x0fa9, + 0x1958: 0x0fb9, 0x1959: 0x1089, 0x195a: 0x0279, 0x195b: 0x0369, 0x195c: 0x0289, 0x195d: 0x0040, + 0x195e: 0x0039, 0x195f: 0x0ee9, 0x1960: 0x1159, 0x1961: 0x0ef9, 0x1962: 0x0f09, 0x1963: 0x1199, + 0x1964: 0x0f31, 0x1965: 0x0249, 0x1966: 0x0f41, 0x1967: 0x0259, 0x1968: 0x0f51, 0x1969: 0x0359, + 0x196a: 0x0f61, 0x196b: 0x0f71, 0x196c: 0x00d9, 0x196d: 0x0f99, 0x196e: 0x2039, 0x196f: 0x0269, + 0x1970: 0x01d9, 0x1971: 0x0fa9, 0x1972: 0x0fb9, 0x1973: 0x1089, 0x1974: 0x0279, 0x1975: 0x0369, + 0x1976: 0x0289, 0x1977: 0x13d1, 0x1978: 0x0039, 0x1979: 0x0ee9, 0x197a: 0x0040, 0x197b: 0x0ef9, + 0x197c: 0x0f09, 0x197d: 0x1199, 0x197e: 0x0f31, 0x197f: 0x0040, // Block 0x66, offset 0x1980 - 0x1980: 0x0f31, 0x1981: 0x0249, 0x1982: 0x0f41, 0x1983: 0x0259, 0x1984: 0x0f51, 0x1985: 0x0359, - 0x1986: 0x0f61, 0x1987: 0x0f71, 0x1988: 0x00d9, 0x1989: 0x0f99, 0x198a: 0x2039, 0x198b: 0x0269, - 0x198c: 0x01d9, 0x198d: 0x0fa9, 0x198e: 0x0fb9, 0x198f: 0x1089, 0x1990: 0x0279, 0x1991: 0x0369, - 0x1992: 0x0289, 0x1993: 0x13d1, 0x1994: 0x0039, 0x1995: 0x0ee9, 0x1996: 0x1159, 0x1997: 0x0ef9, - 0x1998: 0x0f09, 0x1999: 0x1199, 0x199a: 0x0f31, 0x199b: 0x0249, 0x199c: 0x0f41, 0x199d: 0x0259, - 0x199e: 0x0f51, 0x199f: 0x0359, 0x19a0: 0x0f61, 0x19a1: 0x0f71, 0x19a2: 0x00d9, 0x19a3: 0x0f99, - 0x19a4: 0x2039, 0x19a5: 0x0269, 0x19a6: 0x01d9, 0x19a7: 0x0fa9, 0x19a8: 0x0fb9, 0x19a9: 0x1089, - 0x19aa: 0x0279, 0x19ab: 0x0369, 0x19ac: 0x0289, 0x19ad: 0x13d1, 0x19ae: 0x0039, 0x19af: 0x0ee9, - 0x19b0: 0x1159, 0x19b1: 0x0ef9, 0x19b2: 0x0f09, 0x19b3: 0x1199, 0x19b4: 0x0f31, 0x19b5: 0x0249, - 0x19b6: 0x0f41, 0x19b7: 0x0259, 0x19b8: 0x0f51, 0x19b9: 0x0359, 0x19ba: 0x0f61, 0x19bb: 0x0f71, - 0x19bc: 0x00d9, 0x19bd: 0x0f99, 0x19be: 0x2039, 0x19bf: 0x0269, + 0x1980: 0x0f41, 0x1981: 0x0259, 0x1982: 0x0f51, 0x1983: 0x0359, 0x1984: 0x0f61, 0x1985: 0x0040, + 0x1986: 0x00d9, 0x1987: 0x0040, 0x1988: 0x0040, 0x1989: 0x0040, 0x198a: 0x01d9, 0x198b: 0x0fa9, + 0x198c: 0x0fb9, 0x198d: 0x1089, 0x198e: 0x0279, 0x198f: 0x0369, 0x1990: 0x0289, 0x1991: 0x0040, + 0x1992: 0x0039, 0x1993: 0x0ee9, 0x1994: 0x1159, 0x1995: 0x0ef9, 0x1996: 0x0f09, 0x1997: 0x1199, + 0x1998: 0x0f31, 0x1999: 0x0249, 0x199a: 0x0f41, 0x199b: 0x0259, 0x199c: 0x0f51, 0x199d: 0x0359, + 0x199e: 0x0f61, 0x199f: 0x0f71, 0x19a0: 0x00d9, 0x19a1: 0x0f99, 0x19a2: 0x2039, 0x19a3: 0x0269, + 0x19a4: 0x01d9, 0x19a5: 0x0fa9, 0x19a6: 0x0fb9, 0x19a7: 0x1089, 0x19a8: 0x0279, 0x19a9: 0x0369, + 0x19aa: 0x0289, 0x19ab: 0x13d1, 0x19ac: 0x0039, 0x19ad: 0x0ee9, 0x19ae: 0x1159, 0x19af: 0x0ef9, + 0x19b0: 0x0f09, 0x19b1: 0x1199, 0x19b2: 0x0f31, 0x19b3: 0x0249, 0x19b4: 0x0f41, 0x19b5: 0x0259, + 0x19b6: 0x0f51, 0x19b7: 0x0359, 0x19b8: 0x0f61, 0x19b9: 0x0f71, 0x19ba: 0x00d9, 0x19bb: 0x0f99, + 0x19bc: 0x2039, 0x19bd: 0x0269, 0x19be: 0x01d9, 0x19bf: 0x0fa9, // Block 0x67, offset 0x19c0 - 0x19c0: 0x01d9, 0x19c1: 0x0fa9, 0x19c2: 0x0fb9, 0x19c3: 0x1089, 0x19c4: 0x0279, 0x19c5: 0x0369, - 0x19c6: 0x0289, 0x19c7: 0x13d1, 0x19c8: 0x0039, 0x19c9: 0x0ee9, 0x19ca: 0x1159, 0x19cb: 0x0ef9, - 0x19cc: 0x0f09, 0x19cd: 0x1199, 0x19ce: 0x0f31, 0x19cf: 0x0249, 0x19d0: 0x0f41, 0x19d1: 0x0259, - 0x19d2: 0x0f51, 0x19d3: 0x0359, 0x19d4: 0x0f61, 0x19d5: 0x0f71, 0x19d6: 0x00d9, 0x19d7: 0x0f99, - 0x19d8: 0x2039, 0x19d9: 0x0269, 0x19da: 0x01d9, 0x19db: 0x0fa9, 0x19dc: 0x0fb9, 0x19dd: 0x1089, - 0x19de: 0x0279, 0x19df: 0x0369, 0x19e0: 0x0289, 0x19e1: 0x13d1, 0x19e2: 0x0039, 0x19e3: 0x0ee9, - 0x19e4: 0x1159, 0x19e5: 0x0ef9, 0x19e6: 0x0f09, 0x19e7: 0x1199, 0x19e8: 0x0f31, 0x19e9: 0x0249, - 0x19ea: 0x0f41, 0x19eb: 0x0259, 0x19ec: 0x0f51, 0x19ed: 0x0359, 0x19ee: 0x0f61, 0x19ef: 0x0f71, - 0x19f0: 0x00d9, 0x19f1: 0x0f99, 0x19f2: 0x2039, 0x19f3: 0x0269, 0x19f4: 0x01d9, 0x19f5: 0x0fa9, - 0x19f6: 0x0fb9, 0x19f7: 0x1089, 0x19f8: 0x0279, 0x19f9: 0x0369, 0x19fa: 0x0289, 0x19fb: 0x13d1, - 0x19fc: 0x0039, 0x19fd: 0x0ee9, 0x19fe: 0x1159, 0x19ff: 0x0ef9, + 0x19c0: 0x0fb9, 0x19c1: 0x1089, 0x19c2: 0x0279, 0x19c3: 0x0369, 0x19c4: 0x0289, 0x19c5: 0x13d1, + 0x19c6: 0x0039, 0x19c7: 0x0ee9, 0x19c8: 0x1159, 0x19c9: 0x0ef9, 0x19ca: 0x0f09, 0x19cb: 0x1199, + 0x19cc: 0x0f31, 0x19cd: 0x0249, 0x19ce: 0x0f41, 0x19cf: 0x0259, 0x19d0: 0x0f51, 0x19d1: 0x0359, + 0x19d2: 0x0f61, 0x19d3: 0x0f71, 0x19d4: 0x00d9, 0x19d5: 0x0f99, 0x19d6: 0x2039, 0x19d7: 0x0269, + 0x19d8: 0x01d9, 0x19d9: 0x0fa9, 0x19da: 0x0fb9, 0x19db: 0x1089, 0x19dc: 0x0279, 0x19dd: 0x0369, + 0x19de: 0x0289, 0x19df: 0x13d1, 0x19e0: 0x0039, 0x19e1: 0x0ee9, 0x19e2: 0x1159, 0x19e3: 0x0ef9, + 0x19e4: 0x0f09, 0x19e5: 0x1199, 0x19e6: 0x0f31, 0x19e7: 0x0249, 0x19e8: 0x0f41, 0x19e9: 0x0259, + 0x19ea: 0x0f51, 0x19eb: 0x0359, 0x19ec: 0x0f61, 0x19ed: 0x0f71, 0x19ee: 0x00d9, 0x19ef: 0x0f99, + 0x19f0: 0x2039, 0x19f1: 0x0269, 0x19f2: 0x01d9, 0x19f3: 0x0fa9, 0x19f4: 0x0fb9, 0x19f5: 0x1089, + 0x19f6: 0x0279, 0x19f7: 0x0369, 0x19f8: 0x0289, 0x19f9: 0x13d1, 0x19fa: 0x0039, 0x19fb: 0x0ee9, + 0x19fc: 0x1159, 0x19fd: 0x0ef9, 0x19fe: 0x0f09, 0x19ff: 0x1199, // Block 0x68, offset 0x1a00 - 0x1a00: 0x0f09, 0x1a01: 0x1199, 0x1a02: 0x0f31, 0x1a03: 0x0249, 0x1a04: 0x0f41, 0x1a05: 0x0259, - 0x1a06: 0x0f51, 0x1a07: 0x0359, 0x1a08: 0x0f61, 0x1a09: 0x0f71, 0x1a0a: 0x00d9, 0x1a0b: 0x0f99, - 0x1a0c: 0x2039, 0x1a0d: 0x0269, 0x1a0e: 0x01d9, 0x1a0f: 0x0fa9, 0x1a10: 0x0fb9, 0x1a11: 0x1089, - 0x1a12: 0x0279, 0x1a13: 0x0369, 0x1a14: 0x0289, 0x1a15: 0x13d1, 0x1a16: 0x0039, 0x1a17: 0x0ee9, - 0x1a18: 0x1159, 0x1a19: 0x0ef9, 0x1a1a: 0x0f09, 0x1a1b: 0x1199, 0x1a1c: 0x0f31, 0x1a1d: 0x0249, - 0x1a1e: 0x0f41, 0x1a1f: 0x0259, 0x1a20: 0x0f51, 0x1a21: 0x0359, 0x1a22: 0x0f61, 0x1a23: 0x0f71, - 0x1a24: 0x00d9, 0x1a25: 0x0f99, 0x1a26: 0x2039, 0x1a27: 0x0269, 0x1a28: 0x01d9, 0x1a29: 0x0fa9, - 0x1a2a: 0x0fb9, 0x1a2b: 0x1089, 0x1a2c: 0x0279, 0x1a2d: 0x0369, 0x1a2e: 0x0289, 0x1a2f: 0x13d1, - 0x1a30: 0x0039, 0x1a31: 0x0ee9, 0x1a32: 0x1159, 0x1a33: 0x0ef9, 0x1a34: 0x0f09, 0x1a35: 0x1199, - 0x1a36: 0x0f31, 0x1a37: 0x0249, 0x1a38: 0x0f41, 0x1a39: 0x0259, 0x1a3a: 0x0f51, 0x1a3b: 0x0359, - 0x1a3c: 0x0f61, 0x1a3d: 0x0f71, 0x1a3e: 0x00d9, 0x1a3f: 0x0f99, + 0x1a00: 0x0f31, 0x1a01: 0x0249, 0x1a02: 0x0f41, 0x1a03: 0x0259, 0x1a04: 0x0f51, 0x1a05: 0x0359, + 0x1a06: 0x0f61, 0x1a07: 0x0f71, 0x1a08: 0x00d9, 0x1a09: 0x0f99, 0x1a0a: 0x2039, 0x1a0b: 0x0269, + 0x1a0c: 0x01d9, 0x1a0d: 0x0fa9, 0x1a0e: 0x0fb9, 0x1a0f: 0x1089, 0x1a10: 0x0279, 0x1a11: 0x0369, + 0x1a12: 0x0289, 0x1a13: 0x13d1, 0x1a14: 0x0039, 0x1a15: 0x0ee9, 0x1a16: 0x1159, 0x1a17: 0x0ef9, + 0x1a18: 0x0f09, 0x1a19: 0x1199, 0x1a1a: 0x0f31, 0x1a1b: 0x0249, 0x1a1c: 0x0f41, 0x1a1d: 0x0259, + 0x1a1e: 0x0f51, 0x1a1f: 0x0359, 0x1a20: 0x0f61, 0x1a21: 0x0f71, 0x1a22: 0x00d9, 0x1a23: 0x0f99, + 0x1a24: 0x2039, 0x1a25: 0x0269, 0x1a26: 0x01d9, 0x1a27: 0x0fa9, 0x1a28: 0x0fb9, 0x1a29: 0x1089, + 0x1a2a: 0x0279, 0x1a2b: 0x0369, 0x1a2c: 0x0289, 0x1a2d: 0x13d1, 0x1a2e: 0x0039, 0x1a2f: 0x0ee9, + 0x1a30: 0x1159, 0x1a31: 0x0ef9, 0x1a32: 0x0f09, 0x1a33: 0x1199, 0x1a34: 0x0f31, 0x1a35: 0x0249, + 0x1a36: 0x0f41, 0x1a37: 0x0259, 0x1a38: 0x0f51, 0x1a39: 0x0359, 0x1a3a: 0x0f61, 0x1a3b: 0x0f71, + 0x1a3c: 0x00d9, 0x1a3d: 0x0f99, 0x1a3e: 0x2039, 0x1a3f: 0x0269, // Block 0x69, offset 0x1a40 - 0x1a40: 0x2039, 0x1a41: 0x0269, 0x1a42: 0x01d9, 0x1a43: 0x0fa9, 0x1a44: 0x0fb9, 0x1a45: 0x1089, - 0x1a46: 0x0279, 0x1a47: 0x0369, 0x1a48: 0x0289, 0x1a49: 0x13d1, 0x1a4a: 0x0039, 0x1a4b: 0x0ee9, - 0x1a4c: 0x1159, 0x1a4d: 0x0ef9, 0x1a4e: 0x0f09, 0x1a4f: 0x1199, 0x1a50: 0x0f31, 0x1a51: 0x0249, - 0x1a52: 0x0f41, 0x1a53: 0x0259, 0x1a54: 0x0f51, 0x1a55: 0x0359, 0x1a56: 0x0f61, 0x1a57: 0x0f71, - 0x1a58: 0x00d9, 0x1a59: 0x0f99, 0x1a5a: 0x2039, 0x1a5b: 0x0269, 0x1a5c: 0x01d9, 0x1a5d: 0x0fa9, - 0x1a5e: 0x0fb9, 0x1a5f: 0x1089, 0x1a60: 0x0279, 0x1a61: 0x0369, 0x1a62: 0x0289, 0x1a63: 0x13d1, - 0x1a64: 0xba81, 0x1a65: 0xba99, 0x1a66: 0x0040, 0x1a67: 0x0040, 0x1a68: 0xbab1, 0x1a69: 0x1099, - 0x1a6a: 0x10b1, 0x1a6b: 0x10c9, 0x1a6c: 0xbac9, 0x1a6d: 0xbae1, 0x1a6e: 0xbaf9, 0x1a6f: 0x1429, - 0x1a70: 0x1a31, 0x1a71: 0xbb11, 0x1a72: 0xbb29, 0x1a73: 0xbb41, 0x1a74: 0xbb59, 0x1a75: 0xbb71, - 0x1a76: 0xbb89, 0x1a77: 0x2109, 0x1a78: 0x1111, 0x1a79: 0x1429, 0x1a7a: 0xbba1, 0x1a7b: 0xbbb9, - 0x1a7c: 0xbbd1, 0x1a7d: 0x10e1, 0x1a7e: 0x10f9, 0x1a7f: 0xbbe9, + 0x1a40: 0x01d9, 0x1a41: 0x0fa9, 0x1a42: 0x0fb9, 0x1a43: 0x1089, 0x1a44: 0x0279, 0x1a45: 0x0369, + 0x1a46: 0x0289, 0x1a47: 0x13d1, 0x1a48: 0x0039, 0x1a49: 0x0ee9, 0x1a4a: 0x1159, 0x1a4b: 0x0ef9, + 0x1a4c: 0x0f09, 0x1a4d: 0x1199, 0x1a4e: 0x0f31, 0x1a4f: 0x0249, 0x1a50: 0x0f41, 0x1a51: 0x0259, + 0x1a52: 0x0f51, 0x1a53: 0x0359, 0x1a54: 0x0f61, 0x1a55: 0x0f71, 0x1a56: 0x00d9, 0x1a57: 0x0f99, + 0x1a58: 0x2039, 0x1a59: 0x0269, 0x1a5a: 0x01d9, 0x1a5b: 0x0fa9, 0x1a5c: 0x0fb9, 0x1a5d: 0x1089, + 0x1a5e: 0x0279, 0x1a5f: 0x0369, 0x1a60: 0x0289, 0x1a61: 0x13d1, 0x1a62: 0x0039, 0x1a63: 0x0ee9, + 0x1a64: 0x1159, 0x1a65: 0x0ef9, 0x1a66: 0x0f09, 0x1a67: 0x1199, 0x1a68: 0x0f31, 0x1a69: 0x0249, + 0x1a6a: 0x0f41, 0x1a6b: 0x0259, 0x1a6c: 0x0f51, 0x1a6d: 0x0359, 0x1a6e: 0x0f61, 0x1a6f: 0x0f71, + 0x1a70: 0x00d9, 0x1a71: 0x0f99, 0x1a72: 0x2039, 0x1a73: 0x0269, 0x1a74: 0x01d9, 0x1a75: 0x0fa9, + 0x1a76: 0x0fb9, 0x1a77: 0x1089, 0x1a78: 0x0279, 0x1a79: 0x0369, 0x1a7a: 0x0289, 0x1a7b: 0x13d1, + 0x1a7c: 0x0039, 0x1a7d: 0x0ee9, 0x1a7e: 0x1159, 0x1a7f: 0x0ef9, // Block 0x6a, offset 0x1a80 - 0x1a80: 0x2079, 0x1a81: 0xbc01, 0x1a82: 0xbab1, 0x1a83: 0x1099, 0x1a84: 0x10b1, 0x1a85: 0x10c9, - 0x1a86: 0xbac9, 0x1a87: 0xbae1, 0x1a88: 0xbaf9, 0x1a89: 0x1429, 0x1a8a: 0x1a31, 0x1a8b: 0xbb11, - 0x1a8c: 0xbb29, 0x1a8d: 0xbb41, 0x1a8e: 0xbb59, 0x1a8f: 0xbb71, 0x1a90: 0xbb89, 0x1a91: 0x2109, - 0x1a92: 0x1111, 0x1a93: 0xbba1, 0x1a94: 0xbba1, 0x1a95: 0xbbb9, 0x1a96: 0xbbd1, 0x1a97: 0x10e1, - 0x1a98: 0x10f9, 0x1a99: 0xbbe9, 0x1a9a: 0x2079, 0x1a9b: 0xbc21, 0x1a9c: 0xbac9, 0x1a9d: 0x1429, - 0x1a9e: 0xbb11, 0x1a9f: 0x10e1, 0x1aa0: 0x1111, 0x1aa1: 0x2109, 0x1aa2: 0xbab1, 0x1aa3: 0x1099, - 0x1aa4: 0x10b1, 0x1aa5: 0x10c9, 0x1aa6: 0xbac9, 0x1aa7: 0xbae1, 0x1aa8: 0xbaf9, 0x1aa9: 0x1429, - 0x1aaa: 0x1a31, 0x1aab: 0xbb11, 0x1aac: 0xbb29, 0x1aad: 0xbb41, 0x1aae: 0xbb59, 0x1aaf: 0xbb71, - 0x1ab0: 0xbb89, 0x1ab1: 0x2109, 0x1ab2: 0x1111, 0x1ab3: 0x1429, 0x1ab4: 0xbba1, 0x1ab5: 0xbbb9, - 0x1ab6: 0xbbd1, 0x1ab7: 0x10e1, 0x1ab8: 0x10f9, 0x1ab9: 0xbbe9, 0x1aba: 0x2079, 0x1abb: 0xbc01, - 0x1abc: 0xbab1, 0x1abd: 0x1099, 0x1abe: 0x10b1, 0x1abf: 0x10c9, + 0x1a80: 0x0f09, 0x1a81: 0x1199, 0x1a82: 0x0f31, 0x1a83: 0x0249, 0x1a84: 0x0f41, 0x1a85: 0x0259, + 0x1a86: 0x0f51, 0x1a87: 0x0359, 0x1a88: 0x0f61, 0x1a89: 0x0f71, 0x1a8a: 0x00d9, 0x1a8b: 0x0f99, + 0x1a8c: 0x2039, 0x1a8d: 0x0269, 0x1a8e: 0x01d9, 0x1a8f: 0x0fa9, 0x1a90: 0x0fb9, 0x1a91: 0x1089, + 0x1a92: 0x0279, 0x1a93: 0x0369, 0x1a94: 0x0289, 0x1a95: 0x13d1, 0x1a96: 0x0039, 0x1a97: 0x0ee9, + 0x1a98: 0x1159, 0x1a99: 0x0ef9, 0x1a9a: 0x0f09, 0x1a9b: 0x1199, 0x1a9c: 0x0f31, 0x1a9d: 0x0249, + 0x1a9e: 0x0f41, 0x1a9f: 0x0259, 0x1aa0: 0x0f51, 0x1aa1: 0x0359, 0x1aa2: 0x0f61, 0x1aa3: 0x0f71, + 0x1aa4: 0x00d9, 0x1aa5: 0x0f99, 0x1aa6: 0x2039, 0x1aa7: 0x0269, 0x1aa8: 0x01d9, 0x1aa9: 0x0fa9, + 0x1aaa: 0x0fb9, 0x1aab: 0x1089, 0x1aac: 0x0279, 0x1aad: 0x0369, 0x1aae: 0x0289, 0x1aaf: 0x13d1, + 0x1ab0: 0x0039, 0x1ab1: 0x0ee9, 0x1ab2: 0x1159, 0x1ab3: 0x0ef9, 0x1ab4: 0x0f09, 0x1ab5: 0x1199, + 0x1ab6: 0x0f31, 0x1ab7: 0x0249, 0x1ab8: 0x0f41, 0x1ab9: 0x0259, 0x1aba: 0x0f51, 0x1abb: 0x0359, + 0x1abc: 0x0f61, 0x1abd: 0x0f71, 0x1abe: 0x00d9, 0x1abf: 0x0f99, // Block 0x6b, offset 0x1ac0 - 0x1ac0: 0xbac9, 0x1ac1: 0xbae1, 0x1ac2: 0xbaf9, 0x1ac3: 0x1429, 0x1ac4: 0x1a31, 0x1ac5: 0xbb11, - 0x1ac6: 0xbb29, 0x1ac7: 0xbb41, 0x1ac8: 0xbb59, 0x1ac9: 0xbb71, 0x1aca: 0xbb89, 0x1acb: 0x2109, - 0x1acc: 0x1111, 0x1acd: 0xbba1, 0x1ace: 0xbba1, 0x1acf: 0xbbb9, 0x1ad0: 0xbbd1, 0x1ad1: 0x10e1, - 0x1ad2: 0x10f9, 0x1ad3: 0xbbe9, 0x1ad4: 0x2079, 0x1ad5: 0xbc21, 0x1ad6: 0xbac9, 0x1ad7: 0x1429, - 0x1ad8: 0xbb11, 0x1ad9: 0x10e1, 0x1ada: 0x1111, 0x1adb: 0x2109, 0x1adc: 0xbab1, 0x1add: 0x1099, - 0x1ade: 0x10b1, 0x1adf: 0x10c9, 0x1ae0: 0xbac9, 0x1ae1: 0xbae1, 0x1ae2: 0xbaf9, 0x1ae3: 0x1429, - 0x1ae4: 0x1a31, 0x1ae5: 0xbb11, 0x1ae6: 0xbb29, 0x1ae7: 0xbb41, 0x1ae8: 0xbb59, 0x1ae9: 0xbb71, - 0x1aea: 0xbb89, 0x1aeb: 0x2109, 0x1aec: 0x1111, 0x1aed: 0x1429, 0x1aee: 0xbba1, 0x1aef: 0xbbb9, - 0x1af0: 0xbbd1, 0x1af1: 0x10e1, 0x1af2: 0x10f9, 0x1af3: 0xbbe9, 0x1af4: 0x2079, 0x1af5: 0xbc01, - 0x1af6: 0xbab1, 0x1af7: 0x1099, 0x1af8: 0x10b1, 0x1af9: 0x10c9, 0x1afa: 0xbac9, 0x1afb: 0xbae1, - 0x1afc: 0xbaf9, 0x1afd: 0x1429, 0x1afe: 0x1a31, 0x1aff: 0xbb11, + 0x1ac0: 0x2039, 0x1ac1: 0x0269, 0x1ac2: 0x01d9, 0x1ac3: 0x0fa9, 0x1ac4: 0x0fb9, 0x1ac5: 0x1089, + 0x1ac6: 0x0279, 0x1ac7: 0x0369, 0x1ac8: 0x0289, 0x1ac9: 0x13d1, 0x1aca: 0x0039, 0x1acb: 0x0ee9, + 0x1acc: 0x1159, 0x1acd: 0x0ef9, 0x1ace: 0x0f09, 0x1acf: 0x1199, 0x1ad0: 0x0f31, 0x1ad1: 0x0249, + 0x1ad2: 0x0f41, 0x1ad3: 0x0259, 0x1ad4: 0x0f51, 0x1ad5: 0x0359, 0x1ad6: 0x0f61, 0x1ad7: 0x0f71, + 0x1ad8: 0x00d9, 0x1ad9: 0x0f99, 0x1ada: 0x2039, 0x1adb: 0x0269, 0x1adc: 0x01d9, 0x1add: 0x0fa9, + 0x1ade: 0x0fb9, 0x1adf: 0x1089, 0x1ae0: 0x0279, 0x1ae1: 0x0369, 0x1ae2: 0x0289, 0x1ae3: 0x13d1, + 0x1ae4: 0xba81, 0x1ae5: 0xba99, 0x1ae6: 0x0040, 0x1ae7: 0x0040, 0x1ae8: 0xbab1, 0x1ae9: 0x1099, + 0x1aea: 0x10b1, 0x1aeb: 0x10c9, 0x1aec: 0xbac9, 0x1aed: 0xbae1, 0x1aee: 0xbaf9, 0x1aef: 0x1429, + 0x1af0: 0x1a31, 0x1af1: 0xbb11, 0x1af2: 0xbb29, 0x1af3: 0xbb41, 0x1af4: 0xbb59, 0x1af5: 0xbb71, + 0x1af6: 0xbb89, 0x1af7: 0x2109, 0x1af8: 0x1111, 0x1af9: 0x1429, 0x1afa: 0xbba1, 0x1afb: 0xbbb9, + 0x1afc: 0xbbd1, 0x1afd: 0x10e1, 0x1afe: 0x10f9, 0x1aff: 0xbbe9, // Block 0x6c, offset 0x1b00 - 0x1b00: 0xbb29, 0x1b01: 0xbb41, 0x1b02: 0xbb59, 0x1b03: 0xbb71, 0x1b04: 0xbb89, 0x1b05: 0x2109, - 0x1b06: 0x1111, 0x1b07: 0xbba1, 0x1b08: 0xbba1, 0x1b09: 0xbbb9, 0x1b0a: 0xbbd1, 0x1b0b: 0x10e1, - 0x1b0c: 0x10f9, 0x1b0d: 0xbbe9, 0x1b0e: 0x2079, 0x1b0f: 0xbc21, 0x1b10: 0xbac9, 0x1b11: 0x1429, - 0x1b12: 0xbb11, 0x1b13: 0x10e1, 0x1b14: 0x1111, 0x1b15: 0x2109, 0x1b16: 0xbab1, 0x1b17: 0x1099, - 0x1b18: 0x10b1, 0x1b19: 0x10c9, 0x1b1a: 0xbac9, 0x1b1b: 0xbae1, 0x1b1c: 0xbaf9, 0x1b1d: 0x1429, - 0x1b1e: 0x1a31, 0x1b1f: 0xbb11, 0x1b20: 0xbb29, 0x1b21: 0xbb41, 0x1b22: 0xbb59, 0x1b23: 0xbb71, - 0x1b24: 0xbb89, 0x1b25: 0x2109, 0x1b26: 0x1111, 0x1b27: 0x1429, 0x1b28: 0xbba1, 0x1b29: 0xbbb9, - 0x1b2a: 0xbbd1, 0x1b2b: 0x10e1, 0x1b2c: 0x10f9, 0x1b2d: 0xbbe9, 0x1b2e: 0x2079, 0x1b2f: 0xbc01, - 0x1b30: 0xbab1, 0x1b31: 0x1099, 0x1b32: 0x10b1, 0x1b33: 0x10c9, 0x1b34: 0xbac9, 0x1b35: 0xbae1, - 0x1b36: 0xbaf9, 0x1b37: 0x1429, 0x1b38: 0x1a31, 0x1b39: 0xbb11, 0x1b3a: 0xbb29, 0x1b3b: 0xbb41, - 0x1b3c: 0xbb59, 0x1b3d: 0xbb71, 0x1b3e: 0xbb89, 0x1b3f: 0x2109, + 0x1b00: 0x2079, 0x1b01: 0xbc01, 0x1b02: 0xbab1, 0x1b03: 0x1099, 0x1b04: 0x10b1, 0x1b05: 0x10c9, + 0x1b06: 0xbac9, 0x1b07: 0xbae1, 0x1b08: 0xbaf9, 0x1b09: 0x1429, 0x1b0a: 0x1a31, 0x1b0b: 0xbb11, + 0x1b0c: 0xbb29, 0x1b0d: 0xbb41, 0x1b0e: 0xbb59, 0x1b0f: 0xbb71, 0x1b10: 0xbb89, 0x1b11: 0x2109, + 0x1b12: 0x1111, 0x1b13: 0xbba1, 0x1b14: 0xbba1, 0x1b15: 0xbbb9, 0x1b16: 0xbbd1, 0x1b17: 0x10e1, + 0x1b18: 0x10f9, 0x1b19: 0xbbe9, 0x1b1a: 0x2079, 0x1b1b: 0xbc21, 0x1b1c: 0xbac9, 0x1b1d: 0x1429, + 0x1b1e: 0xbb11, 0x1b1f: 0x10e1, 0x1b20: 0x1111, 0x1b21: 0x2109, 0x1b22: 0xbab1, 0x1b23: 0x1099, + 0x1b24: 0x10b1, 0x1b25: 0x10c9, 0x1b26: 0xbac9, 0x1b27: 0xbae1, 0x1b28: 0xbaf9, 0x1b29: 0x1429, + 0x1b2a: 0x1a31, 0x1b2b: 0xbb11, 0x1b2c: 0xbb29, 0x1b2d: 0xbb41, 0x1b2e: 0xbb59, 0x1b2f: 0xbb71, + 0x1b30: 0xbb89, 0x1b31: 0x2109, 0x1b32: 0x1111, 0x1b33: 0x1429, 0x1b34: 0xbba1, 0x1b35: 0xbbb9, + 0x1b36: 0xbbd1, 0x1b37: 0x10e1, 0x1b38: 0x10f9, 0x1b39: 0xbbe9, 0x1b3a: 0x2079, 0x1b3b: 0xbc01, + 0x1b3c: 0xbab1, 0x1b3d: 0x1099, 0x1b3e: 0x10b1, 0x1b3f: 0x10c9, // Block 0x6d, offset 0x1b40 - 0x1b40: 0x1111, 0x1b41: 0xbba1, 0x1b42: 0xbba1, 0x1b43: 0xbbb9, 0x1b44: 0xbbd1, 0x1b45: 0x10e1, - 0x1b46: 0x10f9, 0x1b47: 0xbbe9, 0x1b48: 0x2079, 0x1b49: 0xbc21, 0x1b4a: 0xbac9, 0x1b4b: 0x1429, - 0x1b4c: 0xbb11, 0x1b4d: 0x10e1, 0x1b4e: 0x1111, 0x1b4f: 0x2109, 0x1b50: 0xbab1, 0x1b51: 0x1099, - 0x1b52: 0x10b1, 0x1b53: 0x10c9, 0x1b54: 0xbac9, 0x1b55: 0xbae1, 0x1b56: 0xbaf9, 0x1b57: 0x1429, - 0x1b58: 0x1a31, 0x1b59: 0xbb11, 0x1b5a: 0xbb29, 0x1b5b: 0xbb41, 0x1b5c: 0xbb59, 0x1b5d: 0xbb71, - 0x1b5e: 0xbb89, 0x1b5f: 0x2109, 0x1b60: 0x1111, 0x1b61: 0x1429, 0x1b62: 0xbba1, 0x1b63: 0xbbb9, - 0x1b64: 0xbbd1, 0x1b65: 0x10e1, 0x1b66: 0x10f9, 0x1b67: 0xbbe9, 0x1b68: 0x2079, 0x1b69: 0xbc01, - 0x1b6a: 0xbab1, 0x1b6b: 0x1099, 0x1b6c: 0x10b1, 0x1b6d: 0x10c9, 0x1b6e: 0xbac9, 0x1b6f: 0xbae1, - 0x1b70: 0xbaf9, 0x1b71: 0x1429, 0x1b72: 0x1a31, 0x1b73: 0xbb11, 0x1b74: 0xbb29, 0x1b75: 0xbb41, - 0x1b76: 0xbb59, 0x1b77: 0xbb71, 0x1b78: 0xbb89, 0x1b79: 0x2109, 0x1b7a: 0x1111, 0x1b7b: 0xbba1, - 0x1b7c: 0xbba1, 0x1b7d: 0xbbb9, 0x1b7e: 0xbbd1, 0x1b7f: 0x10e1, + 0x1b40: 0xbac9, 0x1b41: 0xbae1, 0x1b42: 0xbaf9, 0x1b43: 0x1429, 0x1b44: 0x1a31, 0x1b45: 0xbb11, + 0x1b46: 0xbb29, 0x1b47: 0xbb41, 0x1b48: 0xbb59, 0x1b49: 0xbb71, 0x1b4a: 0xbb89, 0x1b4b: 0x2109, + 0x1b4c: 0x1111, 0x1b4d: 0xbba1, 0x1b4e: 0xbba1, 0x1b4f: 0xbbb9, 0x1b50: 0xbbd1, 0x1b51: 0x10e1, + 0x1b52: 0x10f9, 0x1b53: 0xbbe9, 0x1b54: 0x2079, 0x1b55: 0xbc21, 0x1b56: 0xbac9, 0x1b57: 0x1429, + 0x1b58: 0xbb11, 0x1b59: 0x10e1, 0x1b5a: 0x1111, 0x1b5b: 0x2109, 0x1b5c: 0xbab1, 0x1b5d: 0x1099, + 0x1b5e: 0x10b1, 0x1b5f: 0x10c9, 0x1b60: 0xbac9, 0x1b61: 0xbae1, 0x1b62: 0xbaf9, 0x1b63: 0x1429, + 0x1b64: 0x1a31, 0x1b65: 0xbb11, 0x1b66: 0xbb29, 0x1b67: 0xbb41, 0x1b68: 0xbb59, 0x1b69: 0xbb71, + 0x1b6a: 0xbb89, 0x1b6b: 0x2109, 0x1b6c: 0x1111, 0x1b6d: 0x1429, 0x1b6e: 0xbba1, 0x1b6f: 0xbbb9, + 0x1b70: 0xbbd1, 0x1b71: 0x10e1, 0x1b72: 0x10f9, 0x1b73: 0xbbe9, 0x1b74: 0x2079, 0x1b75: 0xbc01, + 0x1b76: 0xbab1, 0x1b77: 0x1099, 0x1b78: 0x10b1, 0x1b79: 0x10c9, 0x1b7a: 0xbac9, 0x1b7b: 0xbae1, + 0x1b7c: 0xbaf9, 0x1b7d: 0x1429, 0x1b7e: 0x1a31, 0x1b7f: 0xbb11, // Block 0x6e, offset 0x1b80 - 0x1b80: 0x10f9, 0x1b81: 0xbbe9, 0x1b82: 0x2079, 0x1b83: 0xbc21, 0x1b84: 0xbac9, 0x1b85: 0x1429, - 0x1b86: 0xbb11, 0x1b87: 0x10e1, 0x1b88: 0x1111, 0x1b89: 0x2109, 0x1b8a: 0xbc41, 0x1b8b: 0xbc41, - 0x1b8c: 0x0040, 0x1b8d: 0x0040, 0x1b8e: 0x1f41, 0x1b8f: 0x00c9, 0x1b90: 0x0069, 0x1b91: 0x0079, - 0x1b92: 0x1f51, 0x1b93: 0x1f61, 0x1b94: 0x1f71, 0x1b95: 0x1f81, 0x1b96: 0x1f91, 0x1b97: 0x1fa1, - 0x1b98: 0x1f41, 0x1b99: 0x00c9, 0x1b9a: 0x0069, 0x1b9b: 0x0079, 0x1b9c: 0x1f51, 0x1b9d: 0x1f61, - 0x1b9e: 0x1f71, 0x1b9f: 0x1f81, 0x1ba0: 0x1f91, 0x1ba1: 0x1fa1, 0x1ba2: 0x1f41, 0x1ba3: 0x00c9, - 0x1ba4: 0x0069, 0x1ba5: 0x0079, 0x1ba6: 0x1f51, 0x1ba7: 0x1f61, 0x1ba8: 0x1f71, 0x1ba9: 0x1f81, - 0x1baa: 0x1f91, 0x1bab: 0x1fa1, 0x1bac: 0x1f41, 0x1bad: 0x00c9, 0x1bae: 0x0069, 0x1baf: 0x0079, - 0x1bb0: 0x1f51, 0x1bb1: 0x1f61, 0x1bb2: 0x1f71, 0x1bb3: 0x1f81, 0x1bb4: 0x1f91, 0x1bb5: 0x1fa1, - 0x1bb6: 0x1f41, 0x1bb7: 0x00c9, 0x1bb8: 0x0069, 0x1bb9: 0x0079, 0x1bba: 0x1f51, 0x1bbb: 0x1f61, - 0x1bbc: 0x1f71, 0x1bbd: 0x1f81, 0x1bbe: 0x1f91, 0x1bbf: 0x1fa1, + 0x1b80: 0xbb29, 0x1b81: 0xbb41, 0x1b82: 0xbb59, 0x1b83: 0xbb71, 0x1b84: 0xbb89, 0x1b85: 0x2109, + 0x1b86: 0x1111, 0x1b87: 0xbba1, 0x1b88: 0xbba1, 0x1b89: 0xbbb9, 0x1b8a: 0xbbd1, 0x1b8b: 0x10e1, + 0x1b8c: 0x10f9, 0x1b8d: 0xbbe9, 0x1b8e: 0x2079, 0x1b8f: 0xbc21, 0x1b90: 0xbac9, 0x1b91: 0x1429, + 0x1b92: 0xbb11, 0x1b93: 0x10e1, 0x1b94: 0x1111, 0x1b95: 0x2109, 0x1b96: 0xbab1, 0x1b97: 0x1099, + 0x1b98: 0x10b1, 0x1b99: 0x10c9, 0x1b9a: 0xbac9, 0x1b9b: 0xbae1, 0x1b9c: 0xbaf9, 0x1b9d: 0x1429, + 0x1b9e: 0x1a31, 0x1b9f: 0xbb11, 0x1ba0: 0xbb29, 0x1ba1: 0xbb41, 0x1ba2: 0xbb59, 0x1ba3: 0xbb71, + 0x1ba4: 0xbb89, 0x1ba5: 0x2109, 0x1ba6: 0x1111, 0x1ba7: 0x1429, 0x1ba8: 0xbba1, 0x1ba9: 0xbbb9, + 0x1baa: 0xbbd1, 0x1bab: 0x10e1, 0x1bac: 0x10f9, 0x1bad: 0xbbe9, 0x1bae: 0x2079, 0x1baf: 0xbc01, + 0x1bb0: 0xbab1, 0x1bb1: 0x1099, 0x1bb2: 0x10b1, 0x1bb3: 0x10c9, 0x1bb4: 0xbac9, 0x1bb5: 0xbae1, + 0x1bb6: 0xbaf9, 0x1bb7: 0x1429, 0x1bb8: 0x1a31, 0x1bb9: 0xbb11, 0x1bba: 0xbb29, 0x1bbb: 0xbb41, + 0x1bbc: 0xbb59, 0x1bbd: 0xbb71, 0x1bbe: 0xbb89, 0x1bbf: 0x2109, // Block 0x6f, offset 0x1bc0 - 0x1bc0: 0xe115, 0x1bc1: 0xe115, 0x1bc2: 0xe135, 0x1bc3: 0xe135, 0x1bc4: 0xe115, 0x1bc5: 0xe115, - 0x1bc6: 0xe175, 0x1bc7: 0xe175, 0x1bc8: 0xe115, 0x1bc9: 0xe115, 0x1bca: 0xe135, 0x1bcb: 0xe135, - 0x1bcc: 0xe115, 0x1bcd: 0xe115, 0x1bce: 0xe1f5, 0x1bcf: 0xe1f5, 0x1bd0: 0xe115, 0x1bd1: 0xe115, - 0x1bd2: 0xe135, 0x1bd3: 0xe135, 0x1bd4: 0xe115, 0x1bd5: 0xe115, 0x1bd6: 0xe175, 0x1bd7: 0xe175, - 0x1bd8: 0xe115, 0x1bd9: 0xe115, 0x1bda: 0xe135, 0x1bdb: 0xe135, 0x1bdc: 0xe115, 0x1bdd: 0xe115, - 0x1bde: 0x8b05, 0x1bdf: 0x8b05, 0x1be0: 0x04b5, 0x1be1: 0x04b5, 0x1be2: 0x0208, 0x1be3: 0x0208, - 0x1be4: 0x0208, 0x1be5: 0x0208, 0x1be6: 0x0208, 0x1be7: 0x0208, 0x1be8: 0x0208, 0x1be9: 0x0208, - 0x1bea: 0x0208, 0x1beb: 0x0208, 0x1bec: 0x0208, 0x1bed: 0x0208, 0x1bee: 0x0208, 0x1bef: 0x0208, - 0x1bf0: 0x0208, 0x1bf1: 0x0208, 0x1bf2: 0x0208, 0x1bf3: 0x0208, 0x1bf4: 0x0208, 0x1bf5: 0x0208, - 0x1bf6: 0x0208, 0x1bf7: 0x0208, 0x1bf8: 0x0208, 0x1bf9: 0x0208, 0x1bfa: 0x0208, 0x1bfb: 0x0208, - 0x1bfc: 0x0208, 0x1bfd: 0x0208, 0x1bfe: 0x0208, 0x1bff: 0x0208, + 0x1bc0: 0x1111, 0x1bc1: 0xbba1, 0x1bc2: 0xbba1, 0x1bc3: 0xbbb9, 0x1bc4: 0xbbd1, 0x1bc5: 0x10e1, + 0x1bc6: 0x10f9, 0x1bc7: 0xbbe9, 0x1bc8: 0x2079, 0x1bc9: 0xbc21, 0x1bca: 0xbac9, 0x1bcb: 0x1429, + 0x1bcc: 0xbb11, 0x1bcd: 0x10e1, 0x1bce: 0x1111, 0x1bcf: 0x2109, 0x1bd0: 0xbab1, 0x1bd1: 0x1099, + 0x1bd2: 0x10b1, 0x1bd3: 0x10c9, 0x1bd4: 0xbac9, 0x1bd5: 0xbae1, 0x1bd6: 0xbaf9, 0x1bd7: 0x1429, + 0x1bd8: 0x1a31, 0x1bd9: 0xbb11, 0x1bda: 0xbb29, 0x1bdb: 0xbb41, 0x1bdc: 0xbb59, 0x1bdd: 0xbb71, + 0x1bde: 0xbb89, 0x1bdf: 0x2109, 0x1be0: 0x1111, 0x1be1: 0x1429, 0x1be2: 0xbba1, 0x1be3: 0xbbb9, + 0x1be4: 0xbbd1, 0x1be5: 0x10e1, 0x1be6: 0x10f9, 0x1be7: 0xbbe9, 0x1be8: 0x2079, 0x1be9: 0xbc01, + 0x1bea: 0xbab1, 0x1beb: 0x1099, 0x1bec: 0x10b1, 0x1bed: 0x10c9, 0x1bee: 0xbac9, 0x1bef: 0xbae1, + 0x1bf0: 0xbaf9, 0x1bf1: 0x1429, 0x1bf2: 0x1a31, 0x1bf3: 0xbb11, 0x1bf4: 0xbb29, 0x1bf5: 0xbb41, + 0x1bf6: 0xbb59, 0x1bf7: 0xbb71, 0x1bf8: 0xbb89, 0x1bf9: 0x2109, 0x1bfa: 0x1111, 0x1bfb: 0xbba1, + 0x1bfc: 0xbba1, 0x1bfd: 0xbbb9, 0x1bfe: 0xbbd1, 0x1bff: 0x10e1, // Block 0x70, offset 0x1c00 - 0x1c00: 0xb189, 0x1c01: 0xb1a1, 0x1c02: 0xb201, 0x1c03: 0xb249, 0x1c04: 0x0040, 0x1c05: 0xb411, - 0x1c06: 0xb291, 0x1c07: 0xb219, 0x1c08: 0xb309, 0x1c09: 0xb429, 0x1c0a: 0xb399, 0x1c0b: 0xb3b1, - 0x1c0c: 0xb3c9, 0x1c0d: 0xb3e1, 0x1c0e: 0xb2a9, 0x1c0f: 0xb339, 0x1c10: 0xb369, 0x1c11: 0xb2d9, - 0x1c12: 0xb381, 0x1c13: 0xb279, 0x1c14: 0xb2c1, 0x1c15: 0xb1d1, 0x1c16: 0xb1e9, 0x1c17: 0xb231, - 0x1c18: 0xb261, 0x1c19: 0xb2f1, 0x1c1a: 0xb321, 0x1c1b: 0xb351, 0x1c1c: 0xbc59, 0x1c1d: 0x7949, - 0x1c1e: 0xbc71, 0x1c1f: 0xbc89, 0x1c20: 0x0040, 0x1c21: 0xb1a1, 0x1c22: 0xb201, 0x1c23: 0x0040, - 0x1c24: 0xb3f9, 0x1c25: 0x0040, 0x1c26: 0x0040, 0x1c27: 0xb219, 0x1c28: 0x0040, 0x1c29: 0xb429, - 0x1c2a: 0xb399, 0x1c2b: 0xb3b1, 0x1c2c: 0xb3c9, 0x1c2d: 0xb3e1, 0x1c2e: 0xb2a9, 0x1c2f: 0xb339, - 0x1c30: 0xb369, 0x1c31: 0xb2d9, 0x1c32: 0xb381, 0x1c33: 0x0040, 0x1c34: 0xb2c1, 0x1c35: 0xb1d1, - 0x1c36: 0xb1e9, 0x1c37: 0xb231, 0x1c38: 0x0040, 0x1c39: 0xb2f1, 0x1c3a: 0x0040, 0x1c3b: 0xb351, - 0x1c3c: 0x0040, 0x1c3d: 0x0040, 0x1c3e: 0x0040, 0x1c3f: 0x0040, + 0x1c00: 0x10f9, 0x1c01: 0xbbe9, 0x1c02: 0x2079, 0x1c03: 0xbc21, 0x1c04: 0xbac9, 0x1c05: 0x1429, + 0x1c06: 0xbb11, 0x1c07: 0x10e1, 0x1c08: 0x1111, 0x1c09: 0x2109, 0x1c0a: 0xbc41, 0x1c0b: 0xbc41, + 0x1c0c: 0x0040, 0x1c0d: 0x0040, 0x1c0e: 0x1f41, 0x1c0f: 0x00c9, 0x1c10: 0x0069, 0x1c11: 0x0079, + 0x1c12: 0x1f51, 0x1c13: 0x1f61, 0x1c14: 0x1f71, 0x1c15: 0x1f81, 0x1c16: 0x1f91, 0x1c17: 0x1fa1, + 0x1c18: 0x1f41, 0x1c19: 0x00c9, 0x1c1a: 0x0069, 0x1c1b: 0x0079, 0x1c1c: 0x1f51, 0x1c1d: 0x1f61, + 0x1c1e: 0x1f71, 0x1c1f: 0x1f81, 0x1c20: 0x1f91, 0x1c21: 0x1fa1, 0x1c22: 0x1f41, 0x1c23: 0x00c9, + 0x1c24: 0x0069, 0x1c25: 0x0079, 0x1c26: 0x1f51, 0x1c27: 0x1f61, 0x1c28: 0x1f71, 0x1c29: 0x1f81, + 0x1c2a: 0x1f91, 0x1c2b: 0x1fa1, 0x1c2c: 0x1f41, 0x1c2d: 0x00c9, 0x1c2e: 0x0069, 0x1c2f: 0x0079, + 0x1c30: 0x1f51, 0x1c31: 0x1f61, 0x1c32: 0x1f71, 0x1c33: 0x1f81, 0x1c34: 0x1f91, 0x1c35: 0x1fa1, + 0x1c36: 0x1f41, 0x1c37: 0x00c9, 0x1c38: 0x0069, 0x1c39: 0x0079, 0x1c3a: 0x1f51, 0x1c3b: 0x1f61, + 0x1c3c: 0x1f71, 0x1c3d: 0x1f81, 0x1c3e: 0x1f91, 0x1c3f: 0x1fa1, // Block 0x71, offset 0x1c40 - 0x1c40: 0x0040, 0x1c41: 0x0040, 0x1c42: 0xb201, 0x1c43: 0x0040, 0x1c44: 0x0040, 0x1c45: 0x0040, - 0x1c46: 0x0040, 0x1c47: 0xb219, 0x1c48: 0x0040, 0x1c49: 0xb429, 0x1c4a: 0x0040, 0x1c4b: 0xb3b1, - 0x1c4c: 0x0040, 0x1c4d: 0xb3e1, 0x1c4e: 0xb2a9, 0x1c4f: 0xb339, 0x1c50: 0x0040, 0x1c51: 0xb2d9, - 0x1c52: 0xb381, 0x1c53: 0x0040, 0x1c54: 0xb2c1, 0x1c55: 0x0040, 0x1c56: 0x0040, 0x1c57: 0xb231, - 0x1c58: 0x0040, 0x1c59: 0xb2f1, 0x1c5a: 0x0040, 0x1c5b: 0xb351, 0x1c5c: 0x0040, 0x1c5d: 0x7949, - 0x1c5e: 0x0040, 0x1c5f: 0xbc89, 0x1c60: 0x0040, 0x1c61: 0xb1a1, 0x1c62: 0xb201, 0x1c63: 0x0040, - 0x1c64: 0xb3f9, 0x1c65: 0x0040, 0x1c66: 0x0040, 0x1c67: 0xb219, 0x1c68: 0xb309, 0x1c69: 0xb429, - 0x1c6a: 0xb399, 0x1c6b: 0x0040, 0x1c6c: 0xb3c9, 0x1c6d: 0xb3e1, 0x1c6e: 0xb2a9, 0x1c6f: 0xb339, - 0x1c70: 0xb369, 0x1c71: 0xb2d9, 0x1c72: 0xb381, 0x1c73: 0x0040, 0x1c74: 0xb2c1, 0x1c75: 0xb1d1, - 0x1c76: 0xb1e9, 0x1c77: 0xb231, 0x1c78: 0x0040, 0x1c79: 0xb2f1, 0x1c7a: 0xb321, 0x1c7b: 0xb351, - 0x1c7c: 0xbc59, 0x1c7d: 0x0040, 0x1c7e: 0xbc71, 0x1c7f: 0x0040, + 0x1c40: 0xe115, 0x1c41: 0xe115, 0x1c42: 0xe135, 0x1c43: 0xe135, 0x1c44: 0xe115, 0x1c45: 0xe115, + 0x1c46: 0xe175, 0x1c47: 0xe175, 0x1c48: 0xe115, 0x1c49: 0xe115, 0x1c4a: 0xe135, 0x1c4b: 0xe135, + 0x1c4c: 0xe115, 0x1c4d: 0xe115, 0x1c4e: 0xe1f5, 0x1c4f: 0xe1f5, 0x1c50: 0xe115, 0x1c51: 0xe115, + 0x1c52: 0xe135, 0x1c53: 0xe135, 0x1c54: 0xe115, 0x1c55: 0xe115, 0x1c56: 0xe175, 0x1c57: 0xe175, + 0x1c58: 0xe115, 0x1c59: 0xe115, 0x1c5a: 0xe135, 0x1c5b: 0xe135, 0x1c5c: 0xe115, 0x1c5d: 0xe115, + 0x1c5e: 0x8b05, 0x1c5f: 0x8b05, 0x1c60: 0x04b5, 0x1c61: 0x04b5, 0x1c62: 0x0a08, 0x1c63: 0x0a08, + 0x1c64: 0x0a08, 0x1c65: 0x0a08, 0x1c66: 0x0a08, 0x1c67: 0x0a08, 0x1c68: 0x0a08, 0x1c69: 0x0a08, + 0x1c6a: 0x0a08, 0x1c6b: 0x0a08, 0x1c6c: 0x0a08, 0x1c6d: 0x0a08, 0x1c6e: 0x0a08, 0x1c6f: 0x0a08, + 0x1c70: 0x0a08, 0x1c71: 0x0a08, 0x1c72: 0x0a08, 0x1c73: 0x0a08, 0x1c74: 0x0a08, 0x1c75: 0x0a08, + 0x1c76: 0x0a08, 0x1c77: 0x0a08, 0x1c78: 0x0a08, 0x1c79: 0x0a08, 0x1c7a: 0x0a08, 0x1c7b: 0x0a08, + 0x1c7c: 0x0a08, 0x1c7d: 0x0a08, 0x1c7e: 0x0a08, 0x1c7f: 0x0a08, // Block 0x72, offset 0x1c80 - 0x1c80: 0xb189, 0x1c81: 0xb1a1, 0x1c82: 0xb201, 0x1c83: 0xb249, 0x1c84: 0xb3f9, 0x1c85: 0xb411, - 0x1c86: 0xb291, 0x1c87: 0xb219, 0x1c88: 0xb309, 0x1c89: 0xb429, 0x1c8a: 0x0040, 0x1c8b: 0xb3b1, + 0x1c80: 0xb189, 0x1c81: 0xb1a1, 0x1c82: 0xb201, 0x1c83: 0xb249, 0x1c84: 0x0040, 0x1c85: 0xb411, + 0x1c86: 0xb291, 0x1c87: 0xb219, 0x1c88: 0xb309, 0x1c89: 0xb429, 0x1c8a: 0xb399, 0x1c8b: 0xb3b1, 0x1c8c: 0xb3c9, 0x1c8d: 0xb3e1, 0x1c8e: 0xb2a9, 0x1c8f: 0xb339, 0x1c90: 0xb369, 0x1c91: 0xb2d9, 0x1c92: 0xb381, 0x1c93: 0xb279, 0x1c94: 0xb2c1, 0x1c95: 0xb1d1, 0x1c96: 0xb1e9, 0x1c97: 0xb231, - 0x1c98: 0xb261, 0x1c99: 0xb2f1, 0x1c9a: 0xb321, 0x1c9b: 0xb351, 0x1c9c: 0x0040, 0x1c9d: 0x0040, - 0x1c9e: 0x0040, 0x1c9f: 0x0040, 0x1ca0: 0x0040, 0x1ca1: 0xb1a1, 0x1ca2: 0xb201, 0x1ca3: 0xb249, - 0x1ca4: 0x0040, 0x1ca5: 0xb411, 0x1ca6: 0xb291, 0x1ca7: 0xb219, 0x1ca8: 0xb309, 0x1ca9: 0xb429, - 0x1caa: 0x0040, 0x1cab: 0xb3b1, 0x1cac: 0xb3c9, 0x1cad: 0xb3e1, 0x1cae: 0xb2a9, 0x1caf: 0xb339, - 0x1cb0: 0xb369, 0x1cb1: 0xb2d9, 0x1cb2: 0xb381, 0x1cb3: 0xb279, 0x1cb4: 0xb2c1, 0x1cb5: 0xb1d1, - 0x1cb6: 0xb1e9, 0x1cb7: 0xb231, 0x1cb8: 0xb261, 0x1cb9: 0xb2f1, 0x1cba: 0xb321, 0x1cbb: 0xb351, + 0x1c98: 0xb261, 0x1c99: 0xb2f1, 0x1c9a: 0xb321, 0x1c9b: 0xb351, 0x1c9c: 0xbc59, 0x1c9d: 0x7949, + 0x1c9e: 0xbc71, 0x1c9f: 0xbc89, 0x1ca0: 0x0040, 0x1ca1: 0xb1a1, 0x1ca2: 0xb201, 0x1ca3: 0x0040, + 0x1ca4: 0xb3f9, 0x1ca5: 0x0040, 0x1ca6: 0x0040, 0x1ca7: 0xb219, 0x1ca8: 0x0040, 0x1ca9: 0xb429, + 0x1caa: 0xb399, 0x1cab: 0xb3b1, 0x1cac: 0xb3c9, 0x1cad: 0xb3e1, 0x1cae: 0xb2a9, 0x1caf: 0xb339, + 0x1cb0: 0xb369, 0x1cb1: 0xb2d9, 0x1cb2: 0xb381, 0x1cb3: 0x0040, 0x1cb4: 0xb2c1, 0x1cb5: 0xb1d1, + 0x1cb6: 0xb1e9, 0x1cb7: 0xb231, 0x1cb8: 0x0040, 0x1cb9: 0xb2f1, 0x1cba: 0x0040, 0x1cbb: 0xb351, 0x1cbc: 0x0040, 0x1cbd: 0x0040, 0x1cbe: 0x0040, 0x1cbf: 0x0040, // Block 0x73, offset 0x1cc0 - 0x1cc0: 0x0040, 0x1cc1: 0xbca2, 0x1cc2: 0xbcba, 0x1cc3: 0xbcd2, 0x1cc4: 0xbcea, 0x1cc5: 0xbd02, - 0x1cc6: 0xbd1a, 0x1cc7: 0xbd32, 0x1cc8: 0xbd4a, 0x1cc9: 0xbd62, 0x1cca: 0xbd7a, 0x1ccb: 0x0018, - 0x1ccc: 0x0018, 0x1ccd: 0x0040, 0x1cce: 0x0040, 0x1ccf: 0x0040, 0x1cd0: 0xbd92, 0x1cd1: 0xbdb2, - 0x1cd2: 0xbdd2, 0x1cd3: 0xbdf2, 0x1cd4: 0xbe12, 0x1cd5: 0xbe32, 0x1cd6: 0xbe52, 0x1cd7: 0xbe72, - 0x1cd8: 0xbe92, 0x1cd9: 0xbeb2, 0x1cda: 0xbed2, 0x1cdb: 0xbef2, 0x1cdc: 0xbf12, 0x1cdd: 0xbf32, - 0x1cde: 0xbf52, 0x1cdf: 0xbf72, 0x1ce0: 0xbf92, 0x1ce1: 0xbfb2, 0x1ce2: 0xbfd2, 0x1ce3: 0xbff2, - 0x1ce4: 0xc012, 0x1ce5: 0xc032, 0x1ce6: 0xc052, 0x1ce7: 0xc072, 0x1ce8: 0xc092, 0x1ce9: 0xc0b2, - 0x1cea: 0xc0d1, 0x1ceb: 0x1159, 0x1cec: 0x0269, 0x1ced: 0x6671, 0x1cee: 0xc111, 0x1cef: 0x0040, - 0x1cf0: 0x0039, 0x1cf1: 0x0ee9, 0x1cf2: 0x1159, 0x1cf3: 0x0ef9, 0x1cf4: 0x0f09, 0x1cf5: 0x1199, - 0x1cf6: 0x0f31, 0x1cf7: 0x0249, 0x1cf8: 0x0f41, 0x1cf9: 0x0259, 0x1cfa: 0x0f51, 0x1cfb: 0x0359, - 0x1cfc: 0x0f61, 0x1cfd: 0x0f71, 0x1cfe: 0x00d9, 0x1cff: 0x0f99, + 0x1cc0: 0x0040, 0x1cc1: 0x0040, 0x1cc2: 0xb201, 0x1cc3: 0x0040, 0x1cc4: 0x0040, 0x1cc5: 0x0040, + 0x1cc6: 0x0040, 0x1cc7: 0xb219, 0x1cc8: 0x0040, 0x1cc9: 0xb429, 0x1cca: 0x0040, 0x1ccb: 0xb3b1, + 0x1ccc: 0x0040, 0x1ccd: 0xb3e1, 0x1cce: 0xb2a9, 0x1ccf: 0xb339, 0x1cd0: 0x0040, 0x1cd1: 0xb2d9, + 0x1cd2: 0xb381, 0x1cd3: 0x0040, 0x1cd4: 0xb2c1, 0x1cd5: 0x0040, 0x1cd6: 0x0040, 0x1cd7: 0xb231, + 0x1cd8: 0x0040, 0x1cd9: 0xb2f1, 0x1cda: 0x0040, 0x1cdb: 0xb351, 0x1cdc: 0x0040, 0x1cdd: 0x7949, + 0x1cde: 0x0040, 0x1cdf: 0xbc89, 0x1ce0: 0x0040, 0x1ce1: 0xb1a1, 0x1ce2: 0xb201, 0x1ce3: 0x0040, + 0x1ce4: 0xb3f9, 0x1ce5: 0x0040, 0x1ce6: 0x0040, 0x1ce7: 0xb219, 0x1ce8: 0xb309, 0x1ce9: 0xb429, + 0x1cea: 0xb399, 0x1ceb: 0x0040, 0x1cec: 0xb3c9, 0x1ced: 0xb3e1, 0x1cee: 0xb2a9, 0x1cef: 0xb339, + 0x1cf0: 0xb369, 0x1cf1: 0xb2d9, 0x1cf2: 0xb381, 0x1cf3: 0x0040, 0x1cf4: 0xb2c1, 0x1cf5: 0xb1d1, + 0x1cf6: 0xb1e9, 0x1cf7: 0xb231, 0x1cf8: 0x0040, 0x1cf9: 0xb2f1, 0x1cfa: 0xb321, 0x1cfb: 0xb351, + 0x1cfc: 0xbc59, 0x1cfd: 0x0040, 0x1cfe: 0xbc71, 0x1cff: 0x0040, // Block 0x74, offset 0x1d00 - 0x1d00: 0x2039, 0x1d01: 0x0269, 0x1d02: 0x01d9, 0x1d03: 0x0fa9, 0x1d04: 0x0fb9, 0x1d05: 0x1089, - 0x1d06: 0x0279, 0x1d07: 0x0369, 0x1d08: 0x0289, 0x1d09: 0x13d1, 0x1d0a: 0xc129, 0x1d0b: 0x65b1, - 0x1d0c: 0xc141, 0x1d0d: 0x1441, 0x1d0e: 0xc159, 0x1d0f: 0xc179, 0x1d10: 0x0018, 0x1d11: 0x0018, - 0x1d12: 0x0018, 0x1d13: 0x0018, 0x1d14: 0x0018, 0x1d15: 0x0018, 0x1d16: 0x0018, 0x1d17: 0x0018, - 0x1d18: 0x0018, 0x1d19: 0x0018, 0x1d1a: 0x0018, 0x1d1b: 0x0018, 0x1d1c: 0x0018, 0x1d1d: 0x0018, - 0x1d1e: 0x0018, 0x1d1f: 0x0018, 0x1d20: 0x0018, 0x1d21: 0x0018, 0x1d22: 0x0018, 0x1d23: 0x0018, - 0x1d24: 0x0018, 0x1d25: 0x0018, 0x1d26: 0x0018, 0x1d27: 0x0018, 0x1d28: 0x0018, 0x1d29: 0x0018, - 0x1d2a: 0xc191, 0x1d2b: 0xc1a9, 0x1d2c: 0x0040, 0x1d2d: 0x0040, 0x1d2e: 0x0040, 0x1d2f: 0x0040, - 0x1d30: 0x0018, 0x1d31: 0x0018, 0x1d32: 0x0018, 0x1d33: 0x0018, 0x1d34: 0x0018, 0x1d35: 0x0018, - 0x1d36: 0x0018, 0x1d37: 0x0018, 0x1d38: 0x0018, 0x1d39: 0x0018, 0x1d3a: 0x0018, 0x1d3b: 0x0018, - 0x1d3c: 0x0018, 0x1d3d: 0x0018, 0x1d3e: 0x0018, 0x1d3f: 0x0018, + 0x1d00: 0xb189, 0x1d01: 0xb1a1, 0x1d02: 0xb201, 0x1d03: 0xb249, 0x1d04: 0xb3f9, 0x1d05: 0xb411, + 0x1d06: 0xb291, 0x1d07: 0xb219, 0x1d08: 0xb309, 0x1d09: 0xb429, 0x1d0a: 0x0040, 0x1d0b: 0xb3b1, + 0x1d0c: 0xb3c9, 0x1d0d: 0xb3e1, 0x1d0e: 0xb2a9, 0x1d0f: 0xb339, 0x1d10: 0xb369, 0x1d11: 0xb2d9, + 0x1d12: 0xb381, 0x1d13: 0xb279, 0x1d14: 0xb2c1, 0x1d15: 0xb1d1, 0x1d16: 0xb1e9, 0x1d17: 0xb231, + 0x1d18: 0xb261, 0x1d19: 0xb2f1, 0x1d1a: 0xb321, 0x1d1b: 0xb351, 0x1d1c: 0x0040, 0x1d1d: 0x0040, + 0x1d1e: 0x0040, 0x1d1f: 0x0040, 0x1d20: 0x0040, 0x1d21: 0xb1a1, 0x1d22: 0xb201, 0x1d23: 0xb249, + 0x1d24: 0x0040, 0x1d25: 0xb411, 0x1d26: 0xb291, 0x1d27: 0xb219, 0x1d28: 0xb309, 0x1d29: 0xb429, + 0x1d2a: 0x0040, 0x1d2b: 0xb3b1, 0x1d2c: 0xb3c9, 0x1d2d: 0xb3e1, 0x1d2e: 0xb2a9, 0x1d2f: 0xb339, + 0x1d30: 0xb369, 0x1d31: 0xb2d9, 0x1d32: 0xb381, 0x1d33: 0xb279, 0x1d34: 0xb2c1, 0x1d35: 0xb1d1, + 0x1d36: 0xb1e9, 0x1d37: 0xb231, 0x1d38: 0xb261, 0x1d39: 0xb2f1, 0x1d3a: 0xb321, 0x1d3b: 0xb351, + 0x1d3c: 0x0040, 0x1d3d: 0x0040, 0x1d3e: 0x0040, 0x1d3f: 0x0040, // Block 0x75, offset 0x1d40 - 0x1d40: 0xc1d9, 0x1d41: 0xc211, 0x1d42: 0xc249, 0x1d43: 0x0040, 0x1d44: 0x0040, 0x1d45: 0x0040, - 0x1d46: 0x0040, 0x1d47: 0x0040, 0x1d48: 0x0040, 0x1d49: 0x0040, 0x1d4a: 0x0040, 0x1d4b: 0x0040, - 0x1d4c: 0x0040, 0x1d4d: 0x0040, 0x1d4e: 0x0040, 0x1d4f: 0x0040, 0x1d50: 0xc269, 0x1d51: 0xc289, - 0x1d52: 0xc2a9, 0x1d53: 0xc2c9, 0x1d54: 0xc2e9, 0x1d55: 0xc309, 0x1d56: 0xc329, 0x1d57: 0xc349, - 0x1d58: 0xc369, 0x1d59: 0xc389, 0x1d5a: 0xc3a9, 0x1d5b: 0xc3c9, 0x1d5c: 0xc3e9, 0x1d5d: 0xc409, - 0x1d5e: 0xc429, 0x1d5f: 0xc449, 0x1d60: 0xc469, 0x1d61: 0xc489, 0x1d62: 0xc4a9, 0x1d63: 0xc4c9, - 0x1d64: 0xc4e9, 0x1d65: 0xc509, 0x1d66: 0xc529, 0x1d67: 0xc549, 0x1d68: 0xc569, 0x1d69: 0xc589, - 0x1d6a: 0xc5a9, 0x1d6b: 0xc5c9, 0x1d6c: 0xc5e9, 0x1d6d: 0xc609, 0x1d6e: 0xc629, 0x1d6f: 0xc649, - 0x1d70: 0xc669, 0x1d71: 0xc689, 0x1d72: 0xc6a9, 0x1d73: 0xc6c9, 0x1d74: 0xc6e9, 0x1d75: 0xc709, - 0x1d76: 0xc729, 0x1d77: 0xc749, 0x1d78: 0xc769, 0x1d79: 0xc789, 0x1d7a: 0xc7a9, 0x1d7b: 0xc7c9, - 0x1d7c: 0x0040, 0x1d7d: 0x0040, 0x1d7e: 0x0040, 0x1d7f: 0x0040, + 0x1d40: 0x0040, 0x1d41: 0xbca2, 0x1d42: 0xbcba, 0x1d43: 0xbcd2, 0x1d44: 0xbcea, 0x1d45: 0xbd02, + 0x1d46: 0xbd1a, 0x1d47: 0xbd32, 0x1d48: 0xbd4a, 0x1d49: 0xbd62, 0x1d4a: 0xbd7a, 0x1d4b: 0x0018, + 0x1d4c: 0x0018, 0x1d4d: 0x0040, 0x1d4e: 0x0040, 0x1d4f: 0x0040, 0x1d50: 0xbd92, 0x1d51: 0xbdb2, + 0x1d52: 0xbdd2, 0x1d53: 0xbdf2, 0x1d54: 0xbe12, 0x1d55: 0xbe32, 0x1d56: 0xbe52, 0x1d57: 0xbe72, + 0x1d58: 0xbe92, 0x1d59: 0xbeb2, 0x1d5a: 0xbed2, 0x1d5b: 0xbef2, 0x1d5c: 0xbf12, 0x1d5d: 0xbf32, + 0x1d5e: 0xbf52, 0x1d5f: 0xbf72, 0x1d60: 0xbf92, 0x1d61: 0xbfb2, 0x1d62: 0xbfd2, 0x1d63: 0xbff2, + 0x1d64: 0xc012, 0x1d65: 0xc032, 0x1d66: 0xc052, 0x1d67: 0xc072, 0x1d68: 0xc092, 0x1d69: 0xc0b2, + 0x1d6a: 0xc0d1, 0x1d6b: 0x1159, 0x1d6c: 0x0269, 0x1d6d: 0x6671, 0x1d6e: 0xc111, 0x1d6f: 0x0040, + 0x1d70: 0x0039, 0x1d71: 0x0ee9, 0x1d72: 0x1159, 0x1d73: 0x0ef9, 0x1d74: 0x0f09, 0x1d75: 0x1199, + 0x1d76: 0x0f31, 0x1d77: 0x0249, 0x1d78: 0x0f41, 0x1d79: 0x0259, 0x1d7a: 0x0f51, 0x1d7b: 0x0359, + 0x1d7c: 0x0f61, 0x1d7d: 0x0f71, 0x1d7e: 0x00d9, 0x1d7f: 0x0f99, // Block 0x76, offset 0x1d80 - 0x1d80: 0xcaf9, 0x1d81: 0xcb19, 0x1d82: 0xcb39, 0x1d83: 0x8b1d, 0x1d84: 0xcb59, 0x1d85: 0xcb79, - 0x1d86: 0xcb99, 0x1d87: 0xcbb9, 0x1d88: 0xcbd9, 0x1d89: 0xcbf9, 0x1d8a: 0xcc19, 0x1d8b: 0xcc39, - 0x1d8c: 0xcc59, 0x1d8d: 0x8b3d, 0x1d8e: 0xcc79, 0x1d8f: 0xcc99, 0x1d90: 0xccb9, 0x1d91: 0xccd9, - 0x1d92: 0x8b5d, 0x1d93: 0xccf9, 0x1d94: 0xcd19, 0x1d95: 0xc429, 0x1d96: 0x8b7d, 0x1d97: 0xcd39, - 0x1d98: 0xcd59, 0x1d99: 0xcd79, 0x1d9a: 0xcd99, 0x1d9b: 0xcdb9, 0x1d9c: 0x8b9d, 0x1d9d: 0xcdd9, - 0x1d9e: 0xcdf9, 0x1d9f: 0xce19, 0x1da0: 0xce39, 0x1da1: 0xce59, 0x1da2: 0xc789, 0x1da3: 0xce79, - 0x1da4: 0xce99, 0x1da5: 0xceb9, 0x1da6: 0xced9, 0x1da7: 0xcef9, 0x1da8: 0xcf19, 0x1da9: 0xcf39, - 0x1daa: 0xcf59, 0x1dab: 0xcf79, 0x1dac: 0xcf99, 0x1dad: 0xcfb9, 0x1dae: 0xcfd9, 0x1daf: 0xcff9, - 0x1db0: 0xd019, 0x1db1: 0xd039, 0x1db2: 0xd039, 0x1db3: 0xd039, 0x1db4: 0x8bbd, 0x1db5: 0xd059, - 0x1db6: 0xd079, 0x1db7: 0xd099, 0x1db8: 0x8bdd, 0x1db9: 0xd0b9, 0x1dba: 0xd0d9, 0x1dbb: 0xd0f9, - 0x1dbc: 0xd119, 0x1dbd: 0xd139, 0x1dbe: 0xd159, 0x1dbf: 0xd179, + 0x1d80: 0x2039, 0x1d81: 0x0269, 0x1d82: 0x01d9, 0x1d83: 0x0fa9, 0x1d84: 0x0fb9, 0x1d85: 0x1089, + 0x1d86: 0x0279, 0x1d87: 0x0369, 0x1d88: 0x0289, 0x1d89: 0x13d1, 0x1d8a: 0xc129, 0x1d8b: 0x65b1, + 0x1d8c: 0xc141, 0x1d8d: 0x1441, 0x1d8e: 0xc159, 0x1d8f: 0xc179, 0x1d90: 0x0018, 0x1d91: 0x0018, + 0x1d92: 0x0018, 0x1d93: 0x0018, 0x1d94: 0x0018, 0x1d95: 0x0018, 0x1d96: 0x0018, 0x1d97: 0x0018, + 0x1d98: 0x0018, 0x1d99: 0x0018, 0x1d9a: 0x0018, 0x1d9b: 0x0018, 0x1d9c: 0x0018, 0x1d9d: 0x0018, + 0x1d9e: 0x0018, 0x1d9f: 0x0018, 0x1da0: 0x0018, 0x1da1: 0x0018, 0x1da2: 0x0018, 0x1da3: 0x0018, + 0x1da4: 0x0018, 0x1da5: 0x0018, 0x1da6: 0x0018, 0x1da7: 0x0018, 0x1da8: 0x0018, 0x1da9: 0x0018, + 0x1daa: 0xc191, 0x1dab: 0xc1a9, 0x1dac: 0x0040, 0x1dad: 0x0040, 0x1dae: 0x0040, 0x1daf: 0x0040, + 0x1db0: 0x0018, 0x1db1: 0x0018, 0x1db2: 0x0018, 0x1db3: 0x0018, 0x1db4: 0x0018, 0x1db5: 0x0018, + 0x1db6: 0x0018, 0x1db7: 0x0018, 0x1db8: 0x0018, 0x1db9: 0x0018, 0x1dba: 0x0018, 0x1dbb: 0x0018, + 0x1dbc: 0x0018, 0x1dbd: 0x0018, 0x1dbe: 0x0018, 0x1dbf: 0x0018, // Block 0x77, offset 0x1dc0 - 0x1dc0: 0xd199, 0x1dc1: 0xd1b9, 0x1dc2: 0xd1d9, 0x1dc3: 0xd1f9, 0x1dc4: 0xd219, 0x1dc5: 0xd239, - 0x1dc6: 0xd239, 0x1dc7: 0xd259, 0x1dc8: 0xd279, 0x1dc9: 0xd299, 0x1dca: 0xd2b9, 0x1dcb: 0xd2d9, - 0x1dcc: 0xd2f9, 0x1dcd: 0xd319, 0x1dce: 0xd339, 0x1dcf: 0xd359, 0x1dd0: 0xd379, 0x1dd1: 0xd399, - 0x1dd2: 0xd3b9, 0x1dd3: 0xd3d9, 0x1dd4: 0xd3f9, 0x1dd5: 0xd419, 0x1dd6: 0xd439, 0x1dd7: 0xd459, - 0x1dd8: 0xd479, 0x1dd9: 0x8bfd, 0x1dda: 0xd499, 0x1ddb: 0xd4b9, 0x1ddc: 0xd4d9, 0x1ddd: 0xc309, - 0x1dde: 0xd4f9, 0x1ddf: 0xd519, 0x1de0: 0x8c1d, 0x1de1: 0x8c3d, 0x1de2: 0xd539, 0x1de3: 0xd559, - 0x1de4: 0xd579, 0x1de5: 0xd599, 0x1de6: 0xd5b9, 0x1de7: 0xd5d9, 0x1de8: 0x0040, 0x1de9: 0xd5f9, - 0x1dea: 0xd619, 0x1deb: 0xd619, 0x1dec: 0x8c5d, 0x1ded: 0xd639, 0x1dee: 0xd659, 0x1def: 0xd679, - 0x1df0: 0xd699, 0x1df1: 0x8c7d, 0x1df2: 0xd6b9, 0x1df3: 0xd6d9, 0x1df4: 0x0040, 0x1df5: 0xd6f9, - 0x1df6: 0xd719, 0x1df7: 0xd739, 0x1df8: 0xd759, 0x1df9: 0xd779, 0x1dfa: 0xd799, 0x1dfb: 0x8c9d, - 0x1dfc: 0xd7b9, 0x1dfd: 0x8cbd, 0x1dfe: 0xd7d9, 0x1dff: 0xd7f9, + 0x1dc0: 0xc1d9, 0x1dc1: 0xc211, 0x1dc2: 0xc249, 0x1dc3: 0x0040, 0x1dc4: 0x0040, 0x1dc5: 0x0040, + 0x1dc6: 0x0040, 0x1dc7: 0x0040, 0x1dc8: 0x0040, 0x1dc9: 0x0040, 0x1dca: 0x0040, 0x1dcb: 0x0040, + 0x1dcc: 0x0040, 0x1dcd: 0x0040, 0x1dce: 0x0040, 0x1dcf: 0x0040, 0x1dd0: 0xc269, 0x1dd1: 0xc289, + 0x1dd2: 0xc2a9, 0x1dd3: 0xc2c9, 0x1dd4: 0xc2e9, 0x1dd5: 0xc309, 0x1dd6: 0xc329, 0x1dd7: 0xc349, + 0x1dd8: 0xc369, 0x1dd9: 0xc389, 0x1dda: 0xc3a9, 0x1ddb: 0xc3c9, 0x1ddc: 0xc3e9, 0x1ddd: 0xc409, + 0x1dde: 0xc429, 0x1ddf: 0xc449, 0x1de0: 0xc469, 0x1de1: 0xc489, 0x1de2: 0xc4a9, 0x1de3: 0xc4c9, + 0x1de4: 0xc4e9, 0x1de5: 0xc509, 0x1de6: 0xc529, 0x1de7: 0xc549, 0x1de8: 0xc569, 0x1de9: 0xc589, + 0x1dea: 0xc5a9, 0x1deb: 0xc5c9, 0x1dec: 0xc5e9, 0x1ded: 0xc609, 0x1dee: 0xc629, 0x1def: 0xc649, + 0x1df0: 0xc669, 0x1df1: 0xc689, 0x1df2: 0xc6a9, 0x1df3: 0xc6c9, 0x1df4: 0xc6e9, 0x1df5: 0xc709, + 0x1df6: 0xc729, 0x1df7: 0xc749, 0x1df8: 0xc769, 0x1df9: 0xc789, 0x1dfa: 0xc7a9, 0x1dfb: 0xc7c9, + 0x1dfc: 0x0040, 0x1dfd: 0x0040, 0x1dfe: 0x0040, 0x1dff: 0x0040, // Block 0x78, offset 0x1e00 - 0x1e00: 0xd819, 0x1e01: 0xd839, 0x1e02: 0xd859, 0x1e03: 0xd879, 0x1e04: 0xd899, 0x1e05: 0xd8b9, - 0x1e06: 0xd8d9, 0x1e07: 0xd8f9, 0x1e08: 0xd919, 0x1e09: 0x8cdd, 0x1e0a: 0xd939, 0x1e0b: 0xd959, - 0x1e0c: 0xd979, 0x1e0d: 0xd999, 0x1e0e: 0xd9b9, 0x1e0f: 0x8cfd, 0x1e10: 0xd9d9, 0x1e11: 0x8d1d, - 0x1e12: 0x8d3d, 0x1e13: 0xd9f9, 0x1e14: 0xda19, 0x1e15: 0xda19, 0x1e16: 0xda39, 0x1e17: 0x8d5d, - 0x1e18: 0x8d7d, 0x1e19: 0xda59, 0x1e1a: 0xda79, 0x1e1b: 0xda99, 0x1e1c: 0xdab9, 0x1e1d: 0xdad9, - 0x1e1e: 0xdaf9, 0x1e1f: 0xdb19, 0x1e20: 0xdb39, 0x1e21: 0xdb59, 0x1e22: 0xdb79, 0x1e23: 0xdb99, - 0x1e24: 0x8d9d, 0x1e25: 0xdbb9, 0x1e26: 0xdbd9, 0x1e27: 0xdbf9, 0x1e28: 0xdc19, 0x1e29: 0xdbf9, - 0x1e2a: 0xdc39, 0x1e2b: 0xdc59, 0x1e2c: 0xdc79, 0x1e2d: 0xdc99, 0x1e2e: 0xdcb9, 0x1e2f: 0xdcd9, - 0x1e30: 0xdcf9, 0x1e31: 0xdd19, 0x1e32: 0xdd39, 0x1e33: 0xdd59, 0x1e34: 0xdd79, 0x1e35: 0xdd99, - 0x1e36: 0xddb9, 0x1e37: 0xddd9, 0x1e38: 0x8dbd, 0x1e39: 0xddf9, 0x1e3a: 0xde19, 0x1e3b: 0xde39, - 0x1e3c: 0xde59, 0x1e3d: 0xde79, 0x1e3e: 0x8ddd, 0x1e3f: 0xde99, + 0x1e00: 0xcaf9, 0x1e01: 0xcb19, 0x1e02: 0xcb39, 0x1e03: 0x8b1d, 0x1e04: 0xcb59, 0x1e05: 0xcb79, + 0x1e06: 0xcb99, 0x1e07: 0xcbb9, 0x1e08: 0xcbd9, 0x1e09: 0xcbf9, 0x1e0a: 0xcc19, 0x1e0b: 0xcc39, + 0x1e0c: 0xcc59, 0x1e0d: 0x8b3d, 0x1e0e: 0xcc79, 0x1e0f: 0xcc99, 0x1e10: 0xccb9, 0x1e11: 0xccd9, + 0x1e12: 0x8b5d, 0x1e13: 0xccf9, 0x1e14: 0xcd19, 0x1e15: 0xc429, 0x1e16: 0x8b7d, 0x1e17: 0xcd39, + 0x1e18: 0xcd59, 0x1e19: 0xcd79, 0x1e1a: 0xcd99, 0x1e1b: 0xcdb9, 0x1e1c: 0x8b9d, 0x1e1d: 0xcdd9, + 0x1e1e: 0xcdf9, 0x1e1f: 0xce19, 0x1e20: 0xce39, 0x1e21: 0xce59, 0x1e22: 0xc789, 0x1e23: 0xce79, + 0x1e24: 0xce99, 0x1e25: 0xceb9, 0x1e26: 0xced9, 0x1e27: 0xcef9, 0x1e28: 0xcf19, 0x1e29: 0xcf39, + 0x1e2a: 0xcf59, 0x1e2b: 0xcf79, 0x1e2c: 0xcf99, 0x1e2d: 0xcfb9, 0x1e2e: 0xcfd9, 0x1e2f: 0xcff9, + 0x1e30: 0xd019, 0x1e31: 0xd039, 0x1e32: 0xd039, 0x1e33: 0xd039, 0x1e34: 0x8bbd, 0x1e35: 0xd059, + 0x1e36: 0xd079, 0x1e37: 0xd099, 0x1e38: 0x8bdd, 0x1e39: 0xd0b9, 0x1e3a: 0xd0d9, 0x1e3b: 0xd0f9, + 0x1e3c: 0xd119, 0x1e3d: 0xd139, 0x1e3e: 0xd159, 0x1e3f: 0xd179, // Block 0x79, offset 0x1e40 - 0x1e40: 0xe599, 0x1e41: 0xe5b9, 0x1e42: 0xe5d9, 0x1e43: 0xe5f9, 0x1e44: 0xe619, 0x1e45: 0xe639, - 0x1e46: 0x8efd, 0x1e47: 0xe659, 0x1e48: 0xe679, 0x1e49: 0xe699, 0x1e4a: 0xe6b9, 0x1e4b: 0xe6d9, - 0x1e4c: 0xe6f9, 0x1e4d: 0x8f1d, 0x1e4e: 0xe719, 0x1e4f: 0xe739, 0x1e50: 0x8f3d, 0x1e51: 0x8f5d, - 0x1e52: 0xe759, 0x1e53: 0xe779, 0x1e54: 0xe799, 0x1e55: 0xe7b9, 0x1e56: 0xe7d9, 0x1e57: 0xe7f9, - 0x1e58: 0xe819, 0x1e59: 0xe839, 0x1e5a: 0xe859, 0x1e5b: 0x8f7d, 0x1e5c: 0xe879, 0x1e5d: 0x8f9d, - 0x1e5e: 0xe899, 0x1e5f: 0x0040, 0x1e60: 0xe8b9, 0x1e61: 0xe8d9, 0x1e62: 0xe8f9, 0x1e63: 0x8fbd, - 0x1e64: 0xe919, 0x1e65: 0xe939, 0x1e66: 0x8fdd, 0x1e67: 0x8ffd, 0x1e68: 0xe959, 0x1e69: 0xe979, - 0x1e6a: 0xe999, 0x1e6b: 0xe9b9, 0x1e6c: 0xe9d9, 0x1e6d: 0xe9d9, 0x1e6e: 0xe9f9, 0x1e6f: 0xea19, - 0x1e70: 0xea39, 0x1e71: 0xea59, 0x1e72: 0xea79, 0x1e73: 0xea99, 0x1e74: 0xeab9, 0x1e75: 0x901d, - 0x1e76: 0xead9, 0x1e77: 0x903d, 0x1e78: 0xeaf9, 0x1e79: 0x905d, 0x1e7a: 0xeb19, 0x1e7b: 0x907d, - 0x1e7c: 0x909d, 0x1e7d: 0x90bd, 0x1e7e: 0xeb39, 0x1e7f: 0xeb59, + 0x1e40: 0xd199, 0x1e41: 0xd1b9, 0x1e42: 0xd1d9, 0x1e43: 0xd1f9, 0x1e44: 0xd219, 0x1e45: 0xd239, + 0x1e46: 0xd239, 0x1e47: 0xd259, 0x1e48: 0xd279, 0x1e49: 0xd299, 0x1e4a: 0xd2b9, 0x1e4b: 0xd2d9, + 0x1e4c: 0xd2f9, 0x1e4d: 0xd319, 0x1e4e: 0xd339, 0x1e4f: 0xd359, 0x1e50: 0xd379, 0x1e51: 0xd399, + 0x1e52: 0xd3b9, 0x1e53: 0xd3d9, 0x1e54: 0xd3f9, 0x1e55: 0xd419, 0x1e56: 0xd439, 0x1e57: 0xd459, + 0x1e58: 0xd479, 0x1e59: 0x8bfd, 0x1e5a: 0xd499, 0x1e5b: 0xd4b9, 0x1e5c: 0xd4d9, 0x1e5d: 0xc309, + 0x1e5e: 0xd4f9, 0x1e5f: 0xd519, 0x1e60: 0x8c1d, 0x1e61: 0x8c3d, 0x1e62: 0xd539, 0x1e63: 0xd559, + 0x1e64: 0xd579, 0x1e65: 0xd599, 0x1e66: 0xd5b9, 0x1e67: 0xd5d9, 0x1e68: 0x2040, 0x1e69: 0xd5f9, + 0x1e6a: 0xd619, 0x1e6b: 0xd619, 0x1e6c: 0x8c5d, 0x1e6d: 0xd639, 0x1e6e: 0xd659, 0x1e6f: 0xd679, + 0x1e70: 0xd699, 0x1e71: 0x8c7d, 0x1e72: 0xd6b9, 0x1e73: 0xd6d9, 0x1e74: 0x2040, 0x1e75: 0xd6f9, + 0x1e76: 0xd719, 0x1e77: 0xd739, 0x1e78: 0xd759, 0x1e79: 0xd779, 0x1e7a: 0xd799, 0x1e7b: 0x8c9d, + 0x1e7c: 0xd7b9, 0x1e7d: 0x8cbd, 0x1e7e: 0xd7d9, 0x1e7f: 0xd7f9, // Block 0x7a, offset 0x1e80 - 0x1e80: 0xeb79, 0x1e81: 0x90dd, 0x1e82: 0x90fd, 0x1e83: 0x911d, 0x1e84: 0x913d, 0x1e85: 0xeb99, - 0x1e86: 0xebb9, 0x1e87: 0xebb9, 0x1e88: 0xebd9, 0x1e89: 0xebf9, 0x1e8a: 0xec19, 0x1e8b: 0xec39, - 0x1e8c: 0xec59, 0x1e8d: 0x915d, 0x1e8e: 0xec79, 0x1e8f: 0xec99, 0x1e90: 0xecb9, 0x1e91: 0xecd9, - 0x1e92: 0x917d, 0x1e93: 0xecf9, 0x1e94: 0x919d, 0x1e95: 0x91bd, 0x1e96: 0xed19, 0x1e97: 0xed39, - 0x1e98: 0xed59, 0x1e99: 0xed79, 0x1e9a: 0xed99, 0x1e9b: 0xedb9, 0x1e9c: 0x91dd, 0x1e9d: 0x91fd, - 0x1e9e: 0x921d, 0x1e9f: 0x0040, 0x1ea0: 0xedd9, 0x1ea1: 0x923d, 0x1ea2: 0xedf9, 0x1ea3: 0xee19, - 0x1ea4: 0xee39, 0x1ea5: 0x925d, 0x1ea6: 0xee59, 0x1ea7: 0xee79, 0x1ea8: 0xee99, 0x1ea9: 0xeeb9, - 0x1eaa: 0xeed9, 0x1eab: 0x927d, 0x1eac: 0xeef9, 0x1ead: 0xef19, 0x1eae: 0xef39, 0x1eaf: 0xef59, - 0x1eb0: 0xef79, 0x1eb1: 0xef99, 0x1eb2: 0x929d, 0x1eb3: 0x92bd, 0x1eb4: 0xefb9, 0x1eb5: 0x92dd, - 0x1eb6: 0xefd9, 0x1eb7: 0x92fd, 0x1eb8: 0xeff9, 0x1eb9: 0xf019, 0x1eba: 0xf039, 0x1ebb: 0x931d, - 0x1ebc: 0x933d, 0x1ebd: 0xf059, 0x1ebe: 0x935d, 0x1ebf: 0xf079, + 0x1e80: 0xd819, 0x1e81: 0xd839, 0x1e82: 0xd859, 0x1e83: 0xd879, 0x1e84: 0xd899, 0x1e85: 0xd8b9, + 0x1e86: 0xd8d9, 0x1e87: 0xd8f9, 0x1e88: 0xd919, 0x1e89: 0x8cdd, 0x1e8a: 0xd939, 0x1e8b: 0xd959, + 0x1e8c: 0xd979, 0x1e8d: 0xd999, 0x1e8e: 0xd9b9, 0x1e8f: 0x8cfd, 0x1e90: 0xd9d9, 0x1e91: 0x8d1d, + 0x1e92: 0x8d3d, 0x1e93: 0xd9f9, 0x1e94: 0xda19, 0x1e95: 0xda19, 0x1e96: 0xda39, 0x1e97: 0x8d5d, + 0x1e98: 0x8d7d, 0x1e99: 0xda59, 0x1e9a: 0xda79, 0x1e9b: 0xda99, 0x1e9c: 0xdab9, 0x1e9d: 0xdad9, + 0x1e9e: 0xdaf9, 0x1e9f: 0xdb19, 0x1ea0: 0xdb39, 0x1ea1: 0xdb59, 0x1ea2: 0xdb79, 0x1ea3: 0xdb99, + 0x1ea4: 0x8d9d, 0x1ea5: 0xdbb9, 0x1ea6: 0xdbd9, 0x1ea7: 0xdbf9, 0x1ea8: 0xdc19, 0x1ea9: 0xdbf9, + 0x1eaa: 0xdc39, 0x1eab: 0xdc59, 0x1eac: 0xdc79, 0x1ead: 0xdc99, 0x1eae: 0xdcb9, 0x1eaf: 0xdcd9, + 0x1eb0: 0xdcf9, 0x1eb1: 0xdd19, 0x1eb2: 0xdd39, 0x1eb3: 0xdd59, 0x1eb4: 0xdd79, 0x1eb5: 0xdd99, + 0x1eb6: 0xddb9, 0x1eb7: 0xddd9, 0x1eb8: 0x8dbd, 0x1eb9: 0xddf9, 0x1eba: 0xde19, 0x1ebb: 0xde39, + 0x1ebc: 0xde59, 0x1ebd: 0xde79, 0x1ebe: 0x8ddd, 0x1ebf: 0xde99, // Block 0x7b, offset 0x1ec0 - 0x1ec0: 0xf6b9, 0x1ec1: 0xf6d9, 0x1ec2: 0xf6f9, 0x1ec3: 0xf719, 0x1ec4: 0xf739, 0x1ec5: 0x951d, - 0x1ec6: 0xf759, 0x1ec7: 0xf779, 0x1ec8: 0xf799, 0x1ec9: 0xf7b9, 0x1eca: 0xf7d9, 0x1ecb: 0x953d, - 0x1ecc: 0x955d, 0x1ecd: 0xf7f9, 0x1ece: 0xf819, 0x1ecf: 0xf839, 0x1ed0: 0xf859, 0x1ed1: 0xf879, - 0x1ed2: 0xf899, 0x1ed3: 0x957d, 0x1ed4: 0xf8b9, 0x1ed5: 0xf8d9, 0x1ed6: 0xf8f9, 0x1ed7: 0xf919, - 0x1ed8: 0x959d, 0x1ed9: 0x95bd, 0x1eda: 0xf939, 0x1edb: 0xf959, 0x1edc: 0xf979, 0x1edd: 0x95dd, - 0x1ede: 0xf999, 0x1edf: 0xf9b9, 0x1ee0: 0x6815, 0x1ee1: 0x95fd, 0x1ee2: 0xf9d9, 0x1ee3: 0xf9f9, - 0x1ee4: 0xfa19, 0x1ee5: 0x961d, 0x1ee6: 0xfa39, 0x1ee7: 0xfa59, 0x1ee8: 0xfa79, 0x1ee9: 0xfa99, - 0x1eea: 0xfab9, 0x1eeb: 0xfad9, 0x1eec: 0xfaf9, 0x1eed: 0x963d, 0x1eee: 0xfb19, 0x1eef: 0xfb39, - 0x1ef0: 0xfb59, 0x1ef1: 0x965d, 0x1ef2: 0xfb79, 0x1ef3: 0xfb99, 0x1ef4: 0xfbb9, 0x1ef5: 0xfbd9, - 0x1ef6: 0x7b35, 0x1ef7: 0x967d, 0x1ef8: 0xfbf9, 0x1ef9: 0xfc19, 0x1efa: 0xfc39, 0x1efb: 0x969d, - 0x1efc: 0xfc59, 0x1efd: 0x96bd, 0x1efe: 0xfc79, 0x1eff: 0xfc79, + 0x1ec0: 0xe599, 0x1ec1: 0xe5b9, 0x1ec2: 0xe5d9, 0x1ec3: 0xe5f9, 0x1ec4: 0xe619, 0x1ec5: 0xe639, + 0x1ec6: 0x8efd, 0x1ec7: 0xe659, 0x1ec8: 0xe679, 0x1ec9: 0xe699, 0x1eca: 0xe6b9, 0x1ecb: 0xe6d9, + 0x1ecc: 0xe6f9, 0x1ecd: 0x8f1d, 0x1ece: 0xe719, 0x1ecf: 0xe739, 0x1ed0: 0x8f3d, 0x1ed1: 0x8f5d, + 0x1ed2: 0xe759, 0x1ed3: 0xe779, 0x1ed4: 0xe799, 0x1ed5: 0xe7b9, 0x1ed6: 0xe7d9, 0x1ed7: 0xe7f9, + 0x1ed8: 0xe819, 0x1ed9: 0xe839, 0x1eda: 0xe859, 0x1edb: 0x8f7d, 0x1edc: 0xe879, 0x1edd: 0x8f9d, + 0x1ede: 0xe899, 0x1edf: 0x2040, 0x1ee0: 0xe8b9, 0x1ee1: 0xe8d9, 0x1ee2: 0xe8f9, 0x1ee3: 0x8fbd, + 0x1ee4: 0xe919, 0x1ee5: 0xe939, 0x1ee6: 0x8fdd, 0x1ee7: 0x8ffd, 0x1ee8: 0xe959, 0x1ee9: 0xe979, + 0x1eea: 0xe999, 0x1eeb: 0xe9b9, 0x1eec: 0xe9d9, 0x1eed: 0xe9d9, 0x1eee: 0xe9f9, 0x1eef: 0xea19, + 0x1ef0: 0xea39, 0x1ef1: 0xea59, 0x1ef2: 0xea79, 0x1ef3: 0xea99, 0x1ef4: 0xeab9, 0x1ef5: 0x901d, + 0x1ef6: 0xead9, 0x1ef7: 0x903d, 0x1ef8: 0xeaf9, 0x1ef9: 0x905d, 0x1efa: 0xeb19, 0x1efb: 0x907d, + 0x1efc: 0x909d, 0x1efd: 0x90bd, 0x1efe: 0xeb39, 0x1eff: 0xeb59, // Block 0x7c, offset 0x1f00 - 0x1f00: 0xfc99, 0x1f01: 0x96dd, 0x1f02: 0xfcb9, 0x1f03: 0xfcd9, 0x1f04: 0xfcf9, 0x1f05: 0xfd19, - 0x1f06: 0xfd39, 0x1f07: 0xfd59, 0x1f08: 0xfd79, 0x1f09: 0x96fd, 0x1f0a: 0xfd99, 0x1f0b: 0xfdb9, - 0x1f0c: 0xfdd9, 0x1f0d: 0xfdf9, 0x1f0e: 0xfe19, 0x1f0f: 0xfe39, 0x1f10: 0x971d, 0x1f11: 0xfe59, - 0x1f12: 0x973d, 0x1f13: 0x975d, 0x1f14: 0x977d, 0x1f15: 0xfe79, 0x1f16: 0xfe99, 0x1f17: 0xfeb9, - 0x1f18: 0xfed9, 0x1f19: 0xfef9, 0x1f1a: 0xff19, 0x1f1b: 0xff39, 0x1f1c: 0xff59, 0x1f1d: 0x979d, - 0x1f1e: 0x0040, 0x1f1f: 0x0040, 0x1f20: 0x0040, 0x1f21: 0x0040, 0x1f22: 0x0040, 0x1f23: 0x0040, - 0x1f24: 0x0040, 0x1f25: 0x0040, 0x1f26: 0x0040, 0x1f27: 0x0040, 0x1f28: 0x0040, 0x1f29: 0x0040, - 0x1f2a: 0x0040, 0x1f2b: 0x0040, 0x1f2c: 0x0040, 0x1f2d: 0x0040, 0x1f2e: 0x0040, 0x1f2f: 0x0040, - 0x1f30: 0x0040, 0x1f31: 0x0040, 0x1f32: 0x0040, 0x1f33: 0x0040, 0x1f34: 0x0040, 0x1f35: 0x0040, - 0x1f36: 0x0040, 0x1f37: 0x0040, 0x1f38: 0x0040, 0x1f39: 0x0040, 0x1f3a: 0x0040, 0x1f3b: 0x0040, - 0x1f3c: 0x0040, 0x1f3d: 0x0040, 0x1f3e: 0x0040, 0x1f3f: 0x0040, + 0x1f00: 0xeb79, 0x1f01: 0x90dd, 0x1f02: 0x90fd, 0x1f03: 0x911d, 0x1f04: 0x913d, 0x1f05: 0xeb99, + 0x1f06: 0xebb9, 0x1f07: 0xebb9, 0x1f08: 0xebd9, 0x1f09: 0xebf9, 0x1f0a: 0xec19, 0x1f0b: 0xec39, + 0x1f0c: 0xec59, 0x1f0d: 0x915d, 0x1f0e: 0xec79, 0x1f0f: 0xec99, 0x1f10: 0xecb9, 0x1f11: 0xecd9, + 0x1f12: 0x917d, 0x1f13: 0xecf9, 0x1f14: 0x919d, 0x1f15: 0x91bd, 0x1f16: 0xed19, 0x1f17: 0xed39, + 0x1f18: 0xed59, 0x1f19: 0xed79, 0x1f1a: 0xed99, 0x1f1b: 0xedb9, 0x1f1c: 0x91dd, 0x1f1d: 0x91fd, + 0x1f1e: 0x921d, 0x1f1f: 0x2040, 0x1f20: 0xedd9, 0x1f21: 0x923d, 0x1f22: 0xedf9, 0x1f23: 0xee19, + 0x1f24: 0xee39, 0x1f25: 0x925d, 0x1f26: 0xee59, 0x1f27: 0xee79, 0x1f28: 0xee99, 0x1f29: 0xeeb9, + 0x1f2a: 0xeed9, 0x1f2b: 0x927d, 0x1f2c: 0xeef9, 0x1f2d: 0xef19, 0x1f2e: 0xef39, 0x1f2f: 0xef59, + 0x1f30: 0xef79, 0x1f31: 0xef99, 0x1f32: 0x929d, 0x1f33: 0x92bd, 0x1f34: 0xefb9, 0x1f35: 0x92dd, + 0x1f36: 0xefd9, 0x1f37: 0x92fd, 0x1f38: 0xeff9, 0x1f39: 0xf019, 0x1f3a: 0xf039, 0x1f3b: 0x931d, + 0x1f3c: 0x933d, 0x1f3d: 0xf059, 0x1f3e: 0x935d, 0x1f3f: 0xf079, + // Block 0x7d, offset 0x1f40 + 0x1f40: 0xf6b9, 0x1f41: 0xf6d9, 0x1f42: 0xf6f9, 0x1f43: 0xf719, 0x1f44: 0xf739, 0x1f45: 0x951d, + 0x1f46: 0xf759, 0x1f47: 0xf779, 0x1f48: 0xf799, 0x1f49: 0xf7b9, 0x1f4a: 0xf7d9, 0x1f4b: 0x953d, + 0x1f4c: 0x955d, 0x1f4d: 0xf7f9, 0x1f4e: 0xf819, 0x1f4f: 0xf839, 0x1f50: 0xf859, 0x1f51: 0xf879, + 0x1f52: 0xf899, 0x1f53: 0x957d, 0x1f54: 0xf8b9, 0x1f55: 0xf8d9, 0x1f56: 0xf8f9, 0x1f57: 0xf919, + 0x1f58: 0x959d, 0x1f59: 0x95bd, 0x1f5a: 0xf939, 0x1f5b: 0xf959, 0x1f5c: 0xf979, 0x1f5d: 0x95dd, + 0x1f5e: 0xf999, 0x1f5f: 0xf9b9, 0x1f60: 0x6815, 0x1f61: 0x95fd, 0x1f62: 0xf9d9, 0x1f63: 0xf9f9, + 0x1f64: 0xfa19, 0x1f65: 0x961d, 0x1f66: 0xfa39, 0x1f67: 0xfa59, 0x1f68: 0xfa79, 0x1f69: 0xfa99, + 0x1f6a: 0xfab9, 0x1f6b: 0xfad9, 0x1f6c: 0xfaf9, 0x1f6d: 0x963d, 0x1f6e: 0xfb19, 0x1f6f: 0xfb39, + 0x1f70: 0xfb59, 0x1f71: 0x965d, 0x1f72: 0xfb79, 0x1f73: 0xfb99, 0x1f74: 0xfbb9, 0x1f75: 0xfbd9, + 0x1f76: 0x7b35, 0x1f77: 0x967d, 0x1f78: 0xfbf9, 0x1f79: 0xfc19, 0x1f7a: 0xfc39, 0x1f7b: 0x969d, + 0x1f7c: 0xfc59, 0x1f7d: 0x96bd, 0x1f7e: 0xfc79, 0x1f7f: 0xfc79, + // Block 0x7e, offset 0x1f80 + 0x1f80: 0xfc99, 0x1f81: 0x96dd, 0x1f82: 0xfcb9, 0x1f83: 0xfcd9, 0x1f84: 0xfcf9, 0x1f85: 0xfd19, + 0x1f86: 0xfd39, 0x1f87: 0xfd59, 0x1f88: 0xfd79, 0x1f89: 0x96fd, 0x1f8a: 0xfd99, 0x1f8b: 0xfdb9, + 0x1f8c: 0xfdd9, 0x1f8d: 0xfdf9, 0x1f8e: 0xfe19, 0x1f8f: 0xfe39, 0x1f90: 0x971d, 0x1f91: 0xfe59, + 0x1f92: 0x973d, 0x1f93: 0x975d, 0x1f94: 0x977d, 0x1f95: 0xfe79, 0x1f96: 0xfe99, 0x1f97: 0xfeb9, + 0x1f98: 0xfed9, 0x1f99: 0xfef9, 0x1f9a: 0xff19, 0x1f9b: 0xff39, 0x1f9c: 0xff59, 0x1f9d: 0x979d, + 0x1f9e: 0x0040, 0x1f9f: 0x0040, 0x1fa0: 0x0040, 0x1fa1: 0x0040, 0x1fa2: 0x0040, 0x1fa3: 0x0040, + 0x1fa4: 0x0040, 0x1fa5: 0x0040, 0x1fa6: 0x0040, 0x1fa7: 0x0040, 0x1fa8: 0x0040, 0x1fa9: 0x0040, + 0x1faa: 0x0040, 0x1fab: 0x0040, 0x1fac: 0x0040, 0x1fad: 0x0040, 0x1fae: 0x0040, 0x1faf: 0x0040, + 0x1fb0: 0x0040, 0x1fb1: 0x0040, 0x1fb2: 0x0040, 0x1fb3: 0x0040, 0x1fb4: 0x0040, 0x1fb5: 0x0040, + 0x1fb6: 0x0040, 0x1fb7: 0x0040, 0x1fb8: 0x0040, 0x1fb9: 0x0040, 0x1fba: 0x0040, 0x1fbb: 0x0040, + 0x1fbc: 0x0040, 0x1fbd: 0x0040, 0x1fbe: 0x0040, 0x1fbf: 0x0040, } -// idnaIndex: 35 blocks, 2240 entries, 4480 bytes +// idnaIndex: 36 blocks, 2304 entries, 4608 bytes // Block 0 is the zero block. -var idnaIndex = [2240]uint16{ +var idnaIndex = [2304]uint16{ // Block 0x0, offset 0x0 // Block 0x1, offset 0x40 // Block 0x2, offset 0x80 // Block 0x3, offset 0xc0 - 0xc2: 0x01, 0xc3: 0x7b, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x04, 0xc7: 0x05, - 0xc8: 0x06, 0xc9: 0x7c, 0xca: 0x7d, 0xcb: 0x07, 0xcc: 0x7e, 0xcd: 0x08, 0xce: 0x09, 0xcf: 0x0a, - 0xd0: 0x7f, 0xd1: 0x0b, 0xd2: 0x0c, 0xd3: 0x0d, 0xd4: 0x0e, 0xd5: 0x80, 0xd6: 0x81, 0xd7: 0x82, - 0xd8: 0x0f, 0xd9: 0x83, 0xda: 0x84, 0xdb: 0x10, 0xdc: 0x11, 0xdd: 0x85, 0xde: 0x86, 0xdf: 0x87, + 0xc2: 0x01, 0xc3: 0x7d, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x04, 0xc7: 0x05, + 0xc8: 0x06, 0xc9: 0x7e, 0xca: 0x7f, 0xcb: 0x07, 0xcc: 0x80, 0xcd: 0x08, 0xce: 0x09, 0xcf: 0x0a, + 0xd0: 0x81, 0xd1: 0x0b, 0xd2: 0x0c, 0xd3: 0x0d, 0xd4: 0x0e, 0xd5: 0x82, 0xd6: 0x83, 0xd7: 0x84, + 0xd8: 0x0f, 0xd9: 0x10, 0xda: 0x85, 0xdb: 0x11, 0xdc: 0x12, 0xdd: 0x86, 0xde: 0x87, 0xdf: 0x88, 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, 0xe4: 0x06, 0xe5: 0x07, 0xe6: 0x07, 0xe7: 0x07, 0xe8: 0x07, 0xe9: 0x08, 0xea: 0x09, 0xeb: 0x07, 0xec: 0x07, 0xed: 0x0a, 0xee: 0x0b, 0xef: 0x0c, - 0xf0: 0x1c, 0xf1: 0x1d, 0xf2: 0x1d, 0xf3: 0x1f, 0xf4: 0x20, + 0xf0: 0x1d, 0xf1: 0x1e, 0xf2: 0x1e, 0xf3: 0x20, 0xf4: 0x21, // Block 0x4, offset 0x100 - 0x120: 0x88, 0x121: 0x89, 0x122: 0x8a, 0x123: 0x8b, 0x124: 0x8c, 0x125: 0x12, 0x126: 0x13, 0x127: 0x14, - 0x128: 0x15, 0x129: 0x16, 0x12a: 0x17, 0x12b: 0x18, 0x12c: 0x19, 0x12d: 0x1a, 0x12e: 0x1b, 0x12f: 0x8d, - 0x130: 0x8e, 0x131: 0x1c, 0x132: 0x1d, 0x133: 0x1e, 0x134: 0x8f, 0x135: 0x1f, 0x136: 0x90, 0x137: 0x91, - 0x138: 0x92, 0x139: 0x93, 0x13a: 0x20, 0x13b: 0x94, 0x13c: 0x95, 0x13d: 0x21, 0x13e: 0x22, 0x13f: 0x96, + 0x120: 0x89, 0x121: 0x13, 0x122: 0x8a, 0x123: 0x8b, 0x124: 0x8c, 0x125: 0x14, 0x126: 0x15, 0x127: 0x16, + 0x128: 0x17, 0x129: 0x18, 0x12a: 0x19, 0x12b: 0x1a, 0x12c: 0x1b, 0x12d: 0x1c, 0x12e: 0x1d, 0x12f: 0x8d, + 0x130: 0x8e, 0x131: 0x1e, 0x132: 0x1f, 0x133: 0x20, 0x134: 0x8f, 0x135: 0x21, 0x136: 0x90, 0x137: 0x91, + 0x138: 0x92, 0x139: 0x93, 0x13a: 0x22, 0x13b: 0x94, 0x13c: 0x95, 0x13d: 0x23, 0x13e: 0x24, 0x13f: 0x96, // Block 0x5, offset 0x140 - 0x140: 0x97, 0x141: 0x98, 0x142: 0x99, 0x143: 0x9a, 0x144: 0x9b, 0x145: 0x9c, 0x146: 0x9b, 0x147: 0x9b, - 0x148: 0x9d, 0x149: 0x9e, 0x14a: 0x9f, 0x14b: 0xa0, 0x14c: 0xa1, 0x14d: 0xa2, 0x14e: 0xa3, 0x14f: 0xa4, - 0x150: 0xa5, 0x151: 0x9d, 0x152: 0x9d, 0x153: 0x9d, 0x154: 0x9d, 0x155: 0x9d, 0x156: 0x9d, 0x157: 0x9d, - 0x158: 0x9d, 0x159: 0xa6, 0x15a: 0xa7, 0x15b: 0xa8, 0x15c: 0xa9, 0x15d: 0xaa, 0x15e: 0xab, 0x15f: 0xac, - 0x160: 0xad, 0x161: 0xae, 0x162: 0xaf, 0x163: 0xb0, 0x164: 0xb1, 0x165: 0xb2, 0x166: 0xb3, 0x167: 0xb4, - 0x168: 0xb5, 0x169: 0xb6, 0x16a: 0xb7, 0x16b: 0xb8, 0x16c: 0xb9, 0x16d: 0xba, 0x16e: 0xbb, 0x16f: 0xbc, - 0x170: 0xbd, 0x171: 0xbe, 0x172: 0xbf, 0x173: 0xc0, 0x174: 0x23, 0x175: 0x24, 0x176: 0x25, 0x177: 0xc1, - 0x178: 0x26, 0x179: 0x26, 0x17a: 0x27, 0x17b: 0x26, 0x17c: 0xc2, 0x17d: 0x28, 0x17e: 0x29, 0x17f: 0x2a, + 0x140: 0x97, 0x141: 0x98, 0x142: 0x99, 0x143: 0x9a, 0x144: 0x9b, 0x145: 0x9c, 0x146: 0x9d, 0x147: 0x9e, + 0x148: 0x9f, 0x149: 0xa0, 0x14a: 0xa1, 0x14b: 0xa2, 0x14c: 0xa3, 0x14d: 0xa4, 0x14e: 0xa5, 0x14f: 0xa6, + 0x150: 0xa7, 0x151: 0x9f, 0x152: 0x9f, 0x153: 0x9f, 0x154: 0x9f, 0x155: 0x9f, 0x156: 0x9f, 0x157: 0x9f, + 0x158: 0x9f, 0x159: 0xa8, 0x15a: 0xa9, 0x15b: 0xaa, 0x15c: 0xab, 0x15d: 0xac, 0x15e: 0xad, 0x15f: 0xae, + 0x160: 0xaf, 0x161: 0xb0, 0x162: 0xb1, 0x163: 0xb2, 0x164: 0xb3, 0x165: 0xb4, 0x166: 0xb5, 0x167: 0xb6, + 0x168: 0xb7, 0x169: 0xb8, 0x16a: 0xb9, 0x16b: 0xba, 0x16c: 0xbb, 0x16d: 0xbc, 0x16e: 0xbd, 0x16f: 0xbe, + 0x170: 0xbf, 0x171: 0xc0, 0x172: 0xc1, 0x173: 0xc2, 0x174: 0x25, 0x175: 0x26, 0x176: 0x27, 0x177: 0xc3, + 0x178: 0x28, 0x179: 0x28, 0x17a: 0x29, 0x17b: 0x28, 0x17c: 0xc4, 0x17d: 0x2a, 0x17e: 0x2b, 0x17f: 0x2c, // Block 0x6, offset 0x180 - 0x180: 0x2b, 0x181: 0x2c, 0x182: 0x2d, 0x183: 0xc3, 0x184: 0x2e, 0x185: 0x2f, 0x186: 0xc4, 0x187: 0x9b, - 0x188: 0xc5, 0x189: 0xc6, 0x18a: 0x9b, 0x18b: 0x9b, 0x18c: 0xc7, 0x18d: 0x9b, 0x18e: 0x9b, 0x18f: 0xc8, - 0x190: 0xc9, 0x191: 0x30, 0x192: 0x31, 0x193: 0x32, 0x194: 0x9b, 0x195: 0x9b, 0x196: 0x9b, 0x197: 0x9b, + 0x180: 0x2d, 0x181: 0x2e, 0x182: 0x2f, 0x183: 0xc5, 0x184: 0x30, 0x185: 0x31, 0x186: 0xc6, 0x187: 0x9b, + 0x188: 0xc7, 0x189: 0xc8, 0x18a: 0x9b, 0x18b: 0x9b, 0x18c: 0xc9, 0x18d: 0x9b, 0x18e: 0x9b, 0x18f: 0x9b, + 0x190: 0xca, 0x191: 0x32, 0x192: 0x33, 0x193: 0x34, 0x194: 0x9b, 0x195: 0x9b, 0x196: 0x9b, 0x197: 0x9b, 0x198: 0x9b, 0x199: 0x9b, 0x19a: 0x9b, 0x19b: 0x9b, 0x19c: 0x9b, 0x19d: 0x9b, 0x19e: 0x9b, 0x19f: 0x9b, 0x1a0: 0x9b, 0x1a1: 0x9b, 0x1a2: 0x9b, 0x1a3: 0x9b, 0x1a4: 0x9b, 0x1a5: 0x9b, 0x1a6: 0x9b, 0x1a7: 0x9b, - 0x1a8: 0xca, 0x1a9: 0xcb, 0x1aa: 0x9b, 0x1ab: 0xcc, 0x1ac: 0x9b, 0x1ad: 0xcd, 0x1ae: 0xce, 0x1af: 0xcf, - 0x1b0: 0xd0, 0x1b1: 0x33, 0x1b2: 0x26, 0x1b3: 0x34, 0x1b4: 0xd1, 0x1b5: 0xd2, 0x1b6: 0xd3, 0x1b7: 0xd4, - 0x1b8: 0xd5, 0x1b9: 0xd6, 0x1ba: 0xd7, 0x1bb: 0xd8, 0x1bc: 0xd9, 0x1bd: 0xda, 0x1be: 0xdb, 0x1bf: 0x35, + 0x1a8: 0xcb, 0x1a9: 0xcc, 0x1aa: 0x9b, 0x1ab: 0xcd, 0x1ac: 0x9b, 0x1ad: 0xce, 0x1ae: 0xcf, 0x1af: 0xd0, + 0x1b0: 0xd1, 0x1b1: 0x35, 0x1b2: 0x28, 0x1b3: 0x36, 0x1b4: 0xd2, 0x1b5: 0xd3, 0x1b6: 0xd4, 0x1b7: 0xd5, + 0x1b8: 0xd6, 0x1b9: 0xd7, 0x1ba: 0xd8, 0x1bb: 0xd9, 0x1bc: 0xda, 0x1bd: 0xdb, 0x1be: 0xdc, 0x1bf: 0x37, // Block 0x7, offset 0x1c0 - 0x1c0: 0x36, 0x1c1: 0xdc, 0x1c2: 0xdd, 0x1c3: 0xde, 0x1c4: 0xdf, 0x1c5: 0x37, 0x1c6: 0x38, 0x1c7: 0xe0, - 0x1c8: 0xe1, 0x1c9: 0x39, 0x1ca: 0x3a, 0x1cb: 0x3b, 0x1cc: 0x3c, 0x1cd: 0x3d, 0x1ce: 0x3e, 0x1cf: 0x3f, - 0x1d0: 0x9d, 0x1d1: 0x9d, 0x1d2: 0x9d, 0x1d3: 0x9d, 0x1d4: 0x9d, 0x1d5: 0x9d, 0x1d6: 0x9d, 0x1d7: 0x9d, - 0x1d8: 0x9d, 0x1d9: 0x9d, 0x1da: 0x9d, 0x1db: 0x9d, 0x1dc: 0x9d, 0x1dd: 0x9d, 0x1de: 0x9d, 0x1df: 0x9d, - 0x1e0: 0x9d, 0x1e1: 0x9d, 0x1e2: 0x9d, 0x1e3: 0x9d, 0x1e4: 0x9d, 0x1e5: 0x9d, 0x1e6: 0x9d, 0x1e7: 0x9d, - 0x1e8: 0x9d, 0x1e9: 0x9d, 0x1ea: 0x9d, 0x1eb: 0x9d, 0x1ec: 0x9d, 0x1ed: 0x9d, 0x1ee: 0x9d, 0x1ef: 0x9d, - 0x1f0: 0x9d, 0x1f1: 0x9d, 0x1f2: 0x9d, 0x1f3: 0x9d, 0x1f4: 0x9d, 0x1f5: 0x9d, 0x1f6: 0x9d, 0x1f7: 0x9d, - 0x1f8: 0x9d, 0x1f9: 0x9d, 0x1fa: 0x9d, 0x1fb: 0x9d, 0x1fc: 0x9d, 0x1fd: 0x9d, 0x1fe: 0x9d, 0x1ff: 0x9d, + 0x1c0: 0x38, 0x1c1: 0xdd, 0x1c2: 0xde, 0x1c3: 0xdf, 0x1c4: 0xe0, 0x1c5: 0x39, 0x1c6: 0x3a, 0x1c7: 0xe1, + 0x1c8: 0xe2, 0x1c9: 0x3b, 0x1ca: 0x3c, 0x1cb: 0x3d, 0x1cc: 0x3e, 0x1cd: 0x3f, 0x1ce: 0x40, 0x1cf: 0x41, + 0x1d0: 0x9f, 0x1d1: 0x9f, 0x1d2: 0x9f, 0x1d3: 0x9f, 0x1d4: 0x9f, 0x1d5: 0x9f, 0x1d6: 0x9f, 0x1d7: 0x9f, + 0x1d8: 0x9f, 0x1d9: 0x9f, 0x1da: 0x9f, 0x1db: 0x9f, 0x1dc: 0x9f, 0x1dd: 0x9f, 0x1de: 0x9f, 0x1df: 0x9f, + 0x1e0: 0x9f, 0x1e1: 0x9f, 0x1e2: 0x9f, 0x1e3: 0x9f, 0x1e4: 0x9f, 0x1e5: 0x9f, 0x1e6: 0x9f, 0x1e7: 0x9f, + 0x1e8: 0x9f, 0x1e9: 0x9f, 0x1ea: 0x9f, 0x1eb: 0x9f, 0x1ec: 0x9f, 0x1ed: 0x9f, 0x1ee: 0x9f, 0x1ef: 0x9f, + 0x1f0: 0x9f, 0x1f1: 0x9f, 0x1f2: 0x9f, 0x1f3: 0x9f, 0x1f4: 0x9f, 0x1f5: 0x9f, 0x1f6: 0x9f, 0x1f7: 0x9f, + 0x1f8: 0x9f, 0x1f9: 0x9f, 0x1fa: 0x9f, 0x1fb: 0x9f, 0x1fc: 0x9f, 0x1fd: 0x9f, 0x1fe: 0x9f, 0x1ff: 0x9f, // Block 0x8, offset 0x200 - 0x200: 0x9d, 0x201: 0x9d, 0x202: 0x9d, 0x203: 0x9d, 0x204: 0x9d, 0x205: 0x9d, 0x206: 0x9d, 0x207: 0x9d, - 0x208: 0x9d, 0x209: 0x9d, 0x20a: 0x9d, 0x20b: 0x9d, 0x20c: 0x9d, 0x20d: 0x9d, 0x20e: 0x9d, 0x20f: 0x9d, - 0x210: 0x9d, 0x211: 0x9d, 0x212: 0x9d, 0x213: 0x9d, 0x214: 0x9d, 0x215: 0x9d, 0x216: 0x9d, 0x217: 0x9d, - 0x218: 0x9d, 0x219: 0x9d, 0x21a: 0x9d, 0x21b: 0x9d, 0x21c: 0x9d, 0x21d: 0x9d, 0x21e: 0x9d, 0x21f: 0x9d, - 0x220: 0x9d, 0x221: 0x9d, 0x222: 0x9d, 0x223: 0x9d, 0x224: 0x9d, 0x225: 0x9d, 0x226: 0x9d, 0x227: 0x9d, - 0x228: 0x9d, 0x229: 0x9d, 0x22a: 0x9d, 0x22b: 0x9d, 0x22c: 0x9d, 0x22d: 0x9d, 0x22e: 0x9d, 0x22f: 0x9d, - 0x230: 0x9d, 0x231: 0x9d, 0x232: 0x9d, 0x233: 0x9d, 0x234: 0x9d, 0x235: 0x9d, 0x236: 0xb0, 0x237: 0x9b, - 0x238: 0x9d, 0x239: 0x9d, 0x23a: 0x9d, 0x23b: 0x9d, 0x23c: 0x9d, 0x23d: 0x9d, 0x23e: 0x9d, 0x23f: 0x9d, + 0x200: 0x9f, 0x201: 0x9f, 0x202: 0x9f, 0x203: 0x9f, 0x204: 0x9f, 0x205: 0x9f, 0x206: 0x9f, 0x207: 0x9f, + 0x208: 0x9f, 0x209: 0x9f, 0x20a: 0x9f, 0x20b: 0x9f, 0x20c: 0x9f, 0x20d: 0x9f, 0x20e: 0x9f, 0x20f: 0x9f, + 0x210: 0x9f, 0x211: 0x9f, 0x212: 0x9f, 0x213: 0x9f, 0x214: 0x9f, 0x215: 0x9f, 0x216: 0x9f, 0x217: 0x9f, + 0x218: 0x9f, 0x219: 0x9f, 0x21a: 0x9f, 0x21b: 0x9f, 0x21c: 0x9f, 0x21d: 0x9f, 0x21e: 0x9f, 0x21f: 0x9f, + 0x220: 0x9f, 0x221: 0x9f, 0x222: 0x9f, 0x223: 0x9f, 0x224: 0x9f, 0x225: 0x9f, 0x226: 0x9f, 0x227: 0x9f, + 0x228: 0x9f, 0x229: 0x9f, 0x22a: 0x9f, 0x22b: 0x9f, 0x22c: 0x9f, 0x22d: 0x9f, 0x22e: 0x9f, 0x22f: 0x9f, + 0x230: 0x9f, 0x231: 0x9f, 0x232: 0x9f, 0x233: 0x9f, 0x234: 0x9f, 0x235: 0x9f, 0x236: 0xb2, 0x237: 0x9b, + 0x238: 0x9f, 0x239: 0x9f, 0x23a: 0x9f, 0x23b: 0x9f, 0x23c: 0x9f, 0x23d: 0x9f, 0x23e: 0x9f, 0x23f: 0x9f, // Block 0x9, offset 0x240 - 0x240: 0x9d, 0x241: 0x9d, 0x242: 0x9d, 0x243: 0x9d, 0x244: 0x9d, 0x245: 0x9d, 0x246: 0x9d, 0x247: 0x9d, - 0x248: 0x9d, 0x249: 0x9d, 0x24a: 0x9d, 0x24b: 0x9d, 0x24c: 0x9d, 0x24d: 0x9d, 0x24e: 0x9d, 0x24f: 0x9d, - 0x250: 0x9d, 0x251: 0x9d, 0x252: 0x9d, 0x253: 0x9d, 0x254: 0x9d, 0x255: 0x9d, 0x256: 0x9d, 0x257: 0x9d, - 0x258: 0x9d, 0x259: 0x9d, 0x25a: 0x9d, 0x25b: 0x9d, 0x25c: 0x9d, 0x25d: 0x9d, 0x25e: 0x9d, 0x25f: 0x9d, - 0x260: 0x9d, 0x261: 0x9d, 0x262: 0x9d, 0x263: 0x9d, 0x264: 0x9d, 0x265: 0x9d, 0x266: 0x9d, 0x267: 0x9d, - 0x268: 0x9d, 0x269: 0x9d, 0x26a: 0x9d, 0x26b: 0x9d, 0x26c: 0x9d, 0x26d: 0x9d, 0x26e: 0x9d, 0x26f: 0x9d, - 0x270: 0x9d, 0x271: 0x9d, 0x272: 0x9d, 0x273: 0x9d, 0x274: 0x9d, 0x275: 0x9d, 0x276: 0x9d, 0x277: 0x9d, - 0x278: 0x9d, 0x279: 0x9d, 0x27a: 0x9d, 0x27b: 0x9d, 0x27c: 0x9d, 0x27d: 0x9d, 0x27e: 0x9d, 0x27f: 0x9d, + 0x240: 0x9f, 0x241: 0x9f, 0x242: 0x9f, 0x243: 0x9f, 0x244: 0x9f, 0x245: 0x9f, 0x246: 0x9f, 0x247: 0x9f, + 0x248: 0x9f, 0x249: 0x9f, 0x24a: 0x9f, 0x24b: 0x9f, 0x24c: 0x9f, 0x24d: 0x9f, 0x24e: 0x9f, 0x24f: 0x9f, + 0x250: 0x9f, 0x251: 0x9f, 0x252: 0x9f, 0x253: 0x9f, 0x254: 0x9f, 0x255: 0x9f, 0x256: 0x9f, 0x257: 0x9f, + 0x258: 0x9f, 0x259: 0x9f, 0x25a: 0x9f, 0x25b: 0x9f, 0x25c: 0x9f, 0x25d: 0x9f, 0x25e: 0x9f, 0x25f: 0x9f, + 0x260: 0x9f, 0x261: 0x9f, 0x262: 0x9f, 0x263: 0x9f, 0x264: 0x9f, 0x265: 0x9f, 0x266: 0x9f, 0x267: 0x9f, + 0x268: 0x9f, 0x269: 0x9f, 0x26a: 0x9f, 0x26b: 0x9f, 0x26c: 0x9f, 0x26d: 0x9f, 0x26e: 0x9f, 0x26f: 0x9f, + 0x270: 0x9f, 0x271: 0x9f, 0x272: 0x9f, 0x273: 0x9f, 0x274: 0x9f, 0x275: 0x9f, 0x276: 0x9f, 0x277: 0x9f, + 0x278: 0x9f, 0x279: 0x9f, 0x27a: 0x9f, 0x27b: 0x9f, 0x27c: 0x9f, 0x27d: 0x9f, 0x27e: 0x9f, 0x27f: 0x9f, // Block 0xa, offset 0x280 - 0x280: 0x9d, 0x281: 0x9d, 0x282: 0x9d, 0x283: 0x9d, 0x284: 0x9d, 0x285: 0x9d, 0x286: 0x9d, 0x287: 0x9d, - 0x288: 0x9d, 0x289: 0x9d, 0x28a: 0x9d, 0x28b: 0x9d, 0x28c: 0x9d, 0x28d: 0x9d, 0x28e: 0x9d, 0x28f: 0x9d, - 0x290: 0x9d, 0x291: 0x9d, 0x292: 0x9d, 0x293: 0x9d, 0x294: 0x9d, 0x295: 0x9d, 0x296: 0x9d, 0x297: 0x9d, - 0x298: 0x9d, 0x299: 0x9d, 0x29a: 0x9d, 0x29b: 0x9d, 0x29c: 0x9d, 0x29d: 0x9d, 0x29e: 0x9d, 0x29f: 0x9d, - 0x2a0: 0x9d, 0x2a1: 0x9d, 0x2a2: 0x9d, 0x2a3: 0x9d, 0x2a4: 0x9d, 0x2a5: 0x9d, 0x2a6: 0x9d, 0x2a7: 0x9d, - 0x2a8: 0x9d, 0x2a9: 0x9d, 0x2aa: 0x9d, 0x2ab: 0x9d, 0x2ac: 0x9d, 0x2ad: 0x9d, 0x2ae: 0x9d, 0x2af: 0x9d, - 0x2b0: 0x9d, 0x2b1: 0x9d, 0x2b2: 0x9d, 0x2b3: 0x9d, 0x2b4: 0x9d, 0x2b5: 0x9d, 0x2b6: 0x9d, 0x2b7: 0x9d, - 0x2b8: 0x9d, 0x2b9: 0x9d, 0x2ba: 0x9d, 0x2bb: 0x9d, 0x2bc: 0x9d, 0x2bd: 0x9d, 0x2be: 0x9d, 0x2bf: 0xe2, + 0x280: 0x9f, 0x281: 0x9f, 0x282: 0x9f, 0x283: 0x9f, 0x284: 0x9f, 0x285: 0x9f, 0x286: 0x9f, 0x287: 0x9f, + 0x288: 0x9f, 0x289: 0x9f, 0x28a: 0x9f, 0x28b: 0x9f, 0x28c: 0x9f, 0x28d: 0x9f, 0x28e: 0x9f, 0x28f: 0x9f, + 0x290: 0x9f, 0x291: 0x9f, 0x292: 0x9f, 0x293: 0x9f, 0x294: 0x9f, 0x295: 0x9f, 0x296: 0x9f, 0x297: 0x9f, + 0x298: 0x9f, 0x299: 0x9f, 0x29a: 0x9f, 0x29b: 0x9f, 0x29c: 0x9f, 0x29d: 0x9f, 0x29e: 0x9f, 0x29f: 0x9f, + 0x2a0: 0x9f, 0x2a1: 0x9f, 0x2a2: 0x9f, 0x2a3: 0x9f, 0x2a4: 0x9f, 0x2a5: 0x9f, 0x2a6: 0x9f, 0x2a7: 0x9f, + 0x2a8: 0x9f, 0x2a9: 0x9f, 0x2aa: 0x9f, 0x2ab: 0x9f, 0x2ac: 0x9f, 0x2ad: 0x9f, 0x2ae: 0x9f, 0x2af: 0x9f, + 0x2b0: 0x9f, 0x2b1: 0x9f, 0x2b2: 0x9f, 0x2b3: 0x9f, 0x2b4: 0x9f, 0x2b5: 0x9f, 0x2b6: 0x9f, 0x2b7: 0x9f, + 0x2b8: 0x9f, 0x2b9: 0x9f, 0x2ba: 0x9f, 0x2bb: 0x9f, 0x2bc: 0x9f, 0x2bd: 0x9f, 0x2be: 0x9f, 0x2bf: 0xe3, // Block 0xb, offset 0x2c0 - 0x2c0: 0x9d, 0x2c1: 0x9d, 0x2c2: 0x9d, 0x2c3: 0x9d, 0x2c4: 0x9d, 0x2c5: 0x9d, 0x2c6: 0x9d, 0x2c7: 0x9d, - 0x2c8: 0x9d, 0x2c9: 0x9d, 0x2ca: 0x9d, 0x2cb: 0x9d, 0x2cc: 0x9d, 0x2cd: 0x9d, 0x2ce: 0x9d, 0x2cf: 0x9d, - 0x2d0: 0x9d, 0x2d1: 0x9d, 0x2d2: 0xe3, 0x2d3: 0xe4, 0x2d4: 0x9d, 0x2d5: 0x9d, 0x2d6: 0x9d, 0x2d7: 0x9d, - 0x2d8: 0xe5, 0x2d9: 0x40, 0x2da: 0x41, 0x2db: 0xe6, 0x2dc: 0x42, 0x2dd: 0x43, 0x2de: 0x44, 0x2df: 0xe7, - 0x2e0: 0xe8, 0x2e1: 0xe9, 0x2e2: 0xea, 0x2e3: 0xeb, 0x2e4: 0xec, 0x2e5: 0xed, 0x2e6: 0xee, 0x2e7: 0xef, - 0x2e8: 0xf0, 0x2e9: 0xf1, 0x2ea: 0xf2, 0x2eb: 0xf3, 0x2ec: 0xf4, 0x2ed: 0xf5, 0x2ee: 0xf6, 0x2ef: 0xf7, - 0x2f0: 0x9d, 0x2f1: 0x9d, 0x2f2: 0x9d, 0x2f3: 0x9d, 0x2f4: 0x9d, 0x2f5: 0x9d, 0x2f6: 0x9d, 0x2f7: 0x9d, - 0x2f8: 0x9d, 0x2f9: 0x9d, 0x2fa: 0x9d, 0x2fb: 0x9d, 0x2fc: 0x9d, 0x2fd: 0x9d, 0x2fe: 0x9d, 0x2ff: 0x9d, + 0x2c0: 0x9f, 0x2c1: 0x9f, 0x2c2: 0x9f, 0x2c3: 0x9f, 0x2c4: 0x9f, 0x2c5: 0x9f, 0x2c6: 0x9f, 0x2c7: 0x9f, + 0x2c8: 0x9f, 0x2c9: 0x9f, 0x2ca: 0x9f, 0x2cb: 0x9f, 0x2cc: 0x9f, 0x2cd: 0x9f, 0x2ce: 0x9f, 0x2cf: 0x9f, + 0x2d0: 0x9f, 0x2d1: 0x9f, 0x2d2: 0xe4, 0x2d3: 0xe5, 0x2d4: 0x9f, 0x2d5: 0x9f, 0x2d6: 0x9f, 0x2d7: 0x9f, + 0x2d8: 0xe6, 0x2d9: 0x42, 0x2da: 0x43, 0x2db: 0xe7, 0x2dc: 0x44, 0x2dd: 0x45, 0x2de: 0x46, 0x2df: 0xe8, + 0x2e0: 0xe9, 0x2e1: 0xea, 0x2e2: 0xeb, 0x2e3: 0xec, 0x2e4: 0xed, 0x2e5: 0xee, 0x2e6: 0xef, 0x2e7: 0xf0, + 0x2e8: 0xf1, 0x2e9: 0xf2, 0x2ea: 0xf3, 0x2eb: 0xf4, 0x2ec: 0xf5, 0x2ed: 0xf6, 0x2ee: 0xf7, 0x2ef: 0xf8, + 0x2f0: 0x9f, 0x2f1: 0x9f, 0x2f2: 0x9f, 0x2f3: 0x9f, 0x2f4: 0x9f, 0x2f5: 0x9f, 0x2f6: 0x9f, 0x2f7: 0x9f, + 0x2f8: 0x9f, 0x2f9: 0x9f, 0x2fa: 0x9f, 0x2fb: 0x9f, 0x2fc: 0x9f, 0x2fd: 0x9f, 0x2fe: 0x9f, 0x2ff: 0x9f, // Block 0xc, offset 0x300 - 0x300: 0x9d, 0x301: 0x9d, 0x302: 0x9d, 0x303: 0x9d, 0x304: 0x9d, 0x305: 0x9d, 0x306: 0x9d, 0x307: 0x9d, - 0x308: 0x9d, 0x309: 0x9d, 0x30a: 0x9d, 0x30b: 0x9d, 0x30c: 0x9d, 0x30d: 0x9d, 0x30e: 0x9d, 0x30f: 0x9d, - 0x310: 0x9d, 0x311: 0x9d, 0x312: 0x9d, 0x313: 0x9d, 0x314: 0x9d, 0x315: 0x9d, 0x316: 0x9d, 0x317: 0x9d, - 0x318: 0x9d, 0x319: 0x9d, 0x31a: 0x9d, 0x31b: 0x9d, 0x31c: 0x9d, 0x31d: 0x9d, 0x31e: 0xf8, 0x31f: 0xf9, + 0x300: 0x9f, 0x301: 0x9f, 0x302: 0x9f, 0x303: 0x9f, 0x304: 0x9f, 0x305: 0x9f, 0x306: 0x9f, 0x307: 0x9f, + 0x308: 0x9f, 0x309: 0x9f, 0x30a: 0x9f, 0x30b: 0x9f, 0x30c: 0x9f, 0x30d: 0x9f, 0x30e: 0x9f, 0x30f: 0x9f, + 0x310: 0x9f, 0x311: 0x9f, 0x312: 0x9f, 0x313: 0x9f, 0x314: 0x9f, 0x315: 0x9f, 0x316: 0x9f, 0x317: 0x9f, + 0x318: 0x9f, 0x319: 0x9f, 0x31a: 0x9f, 0x31b: 0x9f, 0x31c: 0x9f, 0x31d: 0x9f, 0x31e: 0xf9, 0x31f: 0xfa, // Block 0xd, offset 0x340 - 0x340: 0xb8, 0x341: 0xb8, 0x342: 0xb8, 0x343: 0xb8, 0x344: 0xb8, 0x345: 0xb8, 0x346: 0xb8, 0x347: 0xb8, - 0x348: 0xb8, 0x349: 0xb8, 0x34a: 0xb8, 0x34b: 0xb8, 0x34c: 0xb8, 0x34d: 0xb8, 0x34e: 0xb8, 0x34f: 0xb8, - 0x350: 0xb8, 0x351: 0xb8, 0x352: 0xb8, 0x353: 0xb8, 0x354: 0xb8, 0x355: 0xb8, 0x356: 0xb8, 0x357: 0xb8, - 0x358: 0xb8, 0x359: 0xb8, 0x35a: 0xb8, 0x35b: 0xb8, 0x35c: 0xb8, 0x35d: 0xb8, 0x35e: 0xb8, 0x35f: 0xb8, - 0x360: 0xb8, 0x361: 0xb8, 0x362: 0xb8, 0x363: 0xb8, 0x364: 0xb8, 0x365: 0xb8, 0x366: 0xb8, 0x367: 0xb8, - 0x368: 0xb8, 0x369: 0xb8, 0x36a: 0xb8, 0x36b: 0xb8, 0x36c: 0xb8, 0x36d: 0xb8, 0x36e: 0xb8, 0x36f: 0xb8, - 0x370: 0xb8, 0x371: 0xb8, 0x372: 0xb8, 0x373: 0xb8, 0x374: 0xb8, 0x375: 0xb8, 0x376: 0xb8, 0x377: 0xb8, - 0x378: 0xb8, 0x379: 0xb8, 0x37a: 0xb8, 0x37b: 0xb8, 0x37c: 0xb8, 0x37d: 0xb8, 0x37e: 0xb8, 0x37f: 0xb8, + 0x340: 0xba, 0x341: 0xba, 0x342: 0xba, 0x343: 0xba, 0x344: 0xba, 0x345: 0xba, 0x346: 0xba, 0x347: 0xba, + 0x348: 0xba, 0x349: 0xba, 0x34a: 0xba, 0x34b: 0xba, 0x34c: 0xba, 0x34d: 0xba, 0x34e: 0xba, 0x34f: 0xba, + 0x350: 0xba, 0x351: 0xba, 0x352: 0xba, 0x353: 0xba, 0x354: 0xba, 0x355: 0xba, 0x356: 0xba, 0x357: 0xba, + 0x358: 0xba, 0x359: 0xba, 0x35a: 0xba, 0x35b: 0xba, 0x35c: 0xba, 0x35d: 0xba, 0x35e: 0xba, 0x35f: 0xba, + 0x360: 0xba, 0x361: 0xba, 0x362: 0xba, 0x363: 0xba, 0x364: 0xba, 0x365: 0xba, 0x366: 0xba, 0x367: 0xba, + 0x368: 0xba, 0x369: 0xba, 0x36a: 0xba, 0x36b: 0xba, 0x36c: 0xba, 0x36d: 0xba, 0x36e: 0xba, 0x36f: 0xba, + 0x370: 0xba, 0x371: 0xba, 0x372: 0xba, 0x373: 0xba, 0x374: 0xba, 0x375: 0xba, 0x376: 0xba, 0x377: 0xba, + 0x378: 0xba, 0x379: 0xba, 0x37a: 0xba, 0x37b: 0xba, 0x37c: 0xba, 0x37d: 0xba, 0x37e: 0xba, 0x37f: 0xba, // Block 0xe, offset 0x380 - 0x380: 0xb8, 0x381: 0xb8, 0x382: 0xb8, 0x383: 0xb8, 0x384: 0xb8, 0x385: 0xb8, 0x386: 0xb8, 0x387: 0xb8, - 0x388: 0xb8, 0x389: 0xb8, 0x38a: 0xb8, 0x38b: 0xb8, 0x38c: 0xb8, 0x38d: 0xb8, 0x38e: 0xb8, 0x38f: 0xb8, - 0x390: 0xb8, 0x391: 0xb8, 0x392: 0xb8, 0x393: 0xb8, 0x394: 0xb8, 0x395: 0xb8, 0x396: 0xb8, 0x397: 0xb8, - 0x398: 0xb8, 0x399: 0xb8, 0x39a: 0xb8, 0x39b: 0xb8, 0x39c: 0xb8, 0x39d: 0xb8, 0x39e: 0xb8, 0x39f: 0xb8, - 0x3a0: 0xb8, 0x3a1: 0xb8, 0x3a2: 0xb8, 0x3a3: 0xb8, 0x3a4: 0xfa, 0x3a5: 0xfb, 0x3a6: 0xfc, 0x3a7: 0xfd, - 0x3a8: 0x45, 0x3a9: 0xfe, 0x3aa: 0xff, 0x3ab: 0x46, 0x3ac: 0x47, 0x3ad: 0x48, 0x3ae: 0x49, 0x3af: 0x4a, - 0x3b0: 0x100, 0x3b1: 0x4b, 0x3b2: 0x4c, 0x3b3: 0x4d, 0x3b4: 0x4e, 0x3b5: 0x4f, 0x3b6: 0x101, 0x3b7: 0x50, - 0x3b8: 0x51, 0x3b9: 0x52, 0x3ba: 0x53, 0x3bb: 0x54, 0x3bc: 0x55, 0x3bd: 0x56, 0x3be: 0x57, 0x3bf: 0x58, + 0x380: 0xba, 0x381: 0xba, 0x382: 0xba, 0x383: 0xba, 0x384: 0xba, 0x385: 0xba, 0x386: 0xba, 0x387: 0xba, + 0x388: 0xba, 0x389: 0xba, 0x38a: 0xba, 0x38b: 0xba, 0x38c: 0xba, 0x38d: 0xba, 0x38e: 0xba, 0x38f: 0xba, + 0x390: 0xba, 0x391: 0xba, 0x392: 0xba, 0x393: 0xba, 0x394: 0xba, 0x395: 0xba, 0x396: 0xba, 0x397: 0xba, + 0x398: 0xba, 0x399: 0xba, 0x39a: 0xba, 0x39b: 0xba, 0x39c: 0xba, 0x39d: 0xba, 0x39e: 0xba, 0x39f: 0xba, + 0x3a0: 0xba, 0x3a1: 0xba, 0x3a2: 0xba, 0x3a3: 0xba, 0x3a4: 0xfb, 0x3a5: 0xfc, 0x3a6: 0xfd, 0x3a7: 0xfe, + 0x3a8: 0x47, 0x3a9: 0xff, 0x3aa: 0x100, 0x3ab: 0x48, 0x3ac: 0x49, 0x3ad: 0x4a, 0x3ae: 0x4b, 0x3af: 0x4c, + 0x3b0: 0x101, 0x3b1: 0x4d, 0x3b2: 0x4e, 0x3b3: 0x4f, 0x3b4: 0x50, 0x3b5: 0x51, 0x3b6: 0x102, 0x3b7: 0x52, + 0x3b8: 0x53, 0x3b9: 0x54, 0x3ba: 0x55, 0x3bb: 0x56, 0x3bc: 0x57, 0x3bd: 0x58, 0x3be: 0x59, 0x3bf: 0x5a, // Block 0xf, offset 0x3c0 - 0x3c0: 0x102, 0x3c1: 0x103, 0x3c2: 0x9d, 0x3c3: 0x104, 0x3c4: 0x105, 0x3c5: 0x9b, 0x3c6: 0x106, 0x3c7: 0x107, - 0x3c8: 0xb8, 0x3c9: 0xb8, 0x3ca: 0x108, 0x3cb: 0x109, 0x3cc: 0x10a, 0x3cd: 0x10b, 0x3ce: 0x10c, 0x3cf: 0x10d, - 0x3d0: 0x10e, 0x3d1: 0x9d, 0x3d2: 0x10f, 0x3d3: 0x110, 0x3d4: 0x111, 0x3d5: 0x112, 0x3d6: 0xb8, 0x3d7: 0xb8, - 0x3d8: 0x9d, 0x3d9: 0x9d, 0x3da: 0x9d, 0x3db: 0x9d, 0x3dc: 0x113, 0x3dd: 0x114, 0x3de: 0xb8, 0x3df: 0xb8, - 0x3e0: 0x115, 0x3e1: 0x116, 0x3e2: 0x117, 0x3e3: 0x118, 0x3e4: 0x119, 0x3e5: 0xb8, 0x3e6: 0x11a, 0x3e7: 0x11b, - 0x3e8: 0x11c, 0x3e9: 0x11d, 0x3ea: 0x11e, 0x3eb: 0x59, 0x3ec: 0x11f, 0x3ed: 0x120, 0x3ee: 0x5a, 0x3ef: 0xb8, - 0x3f0: 0x9d, 0x3f1: 0x121, 0x3f2: 0x122, 0x3f3: 0x123, 0x3f4: 0xb8, 0x3f5: 0xb8, 0x3f6: 0xb8, 0x3f7: 0xb8, - 0x3f8: 0xb8, 0x3f9: 0x124, 0x3fa: 0xb8, 0x3fb: 0xb8, 0x3fc: 0xb8, 0x3fd: 0xb8, 0x3fe: 0xb8, 0x3ff: 0xb8, + 0x3c0: 0x103, 0x3c1: 0x104, 0x3c2: 0x9f, 0x3c3: 0x105, 0x3c4: 0x106, 0x3c5: 0x9b, 0x3c6: 0x107, 0x3c7: 0x108, + 0x3c8: 0xba, 0x3c9: 0xba, 0x3ca: 0x109, 0x3cb: 0x10a, 0x3cc: 0x10b, 0x3cd: 0x10c, 0x3ce: 0x10d, 0x3cf: 0x10e, + 0x3d0: 0x10f, 0x3d1: 0x9f, 0x3d2: 0x110, 0x3d3: 0x111, 0x3d4: 0x112, 0x3d5: 0x113, 0x3d6: 0xba, 0x3d7: 0xba, + 0x3d8: 0x9f, 0x3d9: 0x9f, 0x3da: 0x9f, 0x3db: 0x9f, 0x3dc: 0x114, 0x3dd: 0x115, 0x3de: 0xba, 0x3df: 0xba, + 0x3e0: 0x116, 0x3e1: 0x117, 0x3e2: 0x118, 0x3e3: 0x119, 0x3e4: 0x11a, 0x3e5: 0xba, 0x3e6: 0x11b, 0x3e7: 0x11c, + 0x3e8: 0x11d, 0x3e9: 0x11e, 0x3ea: 0x11f, 0x3eb: 0x5b, 0x3ec: 0x120, 0x3ed: 0x121, 0x3ee: 0x5c, 0x3ef: 0xba, + 0x3f0: 0x122, 0x3f1: 0x123, 0x3f2: 0x124, 0x3f3: 0x125, 0x3f4: 0xba, 0x3f5: 0xba, 0x3f6: 0xba, 0x3f7: 0xba, + 0x3f8: 0xba, 0x3f9: 0x126, 0x3fa: 0xba, 0x3fb: 0xba, 0x3fc: 0xba, 0x3fd: 0xba, 0x3fe: 0xba, 0x3ff: 0xba, // Block 0x10, offset 0x400 - 0x400: 0x125, 0x401: 0x126, 0x402: 0x127, 0x403: 0x128, 0x404: 0x129, 0x405: 0x12a, 0x406: 0x12b, 0x407: 0x12c, - 0x408: 0x12d, 0x409: 0xb8, 0x40a: 0x12e, 0x40b: 0x12f, 0x40c: 0x5b, 0x40d: 0x5c, 0x40e: 0xb8, 0x40f: 0xb8, - 0x410: 0x130, 0x411: 0x131, 0x412: 0x132, 0x413: 0x133, 0x414: 0xb8, 0x415: 0xb8, 0x416: 0x134, 0x417: 0x135, - 0x418: 0x136, 0x419: 0x137, 0x41a: 0x138, 0x41b: 0x139, 0x41c: 0x13a, 0x41d: 0xb8, 0x41e: 0xb8, 0x41f: 0xb8, - 0x420: 0xb8, 0x421: 0xb8, 0x422: 0x13b, 0x423: 0x13c, 0x424: 0xb8, 0x425: 0xb8, 0x426: 0xb8, 0x427: 0xb8, - 0x428: 0xb8, 0x429: 0xb8, 0x42a: 0xb8, 0x42b: 0x13d, 0x42c: 0xb8, 0x42d: 0xb8, 0x42e: 0xb8, 0x42f: 0xb8, - 0x430: 0x13e, 0x431: 0x13f, 0x432: 0x140, 0x433: 0xb8, 0x434: 0xb8, 0x435: 0xb8, 0x436: 0xb8, 0x437: 0xb8, - 0x438: 0xb8, 0x439: 0xb8, 0x43a: 0xb8, 0x43b: 0xb8, 0x43c: 0xb8, 0x43d: 0xb8, 0x43e: 0xb8, 0x43f: 0xb8, + 0x400: 0x127, 0x401: 0x128, 0x402: 0x129, 0x403: 0x12a, 0x404: 0x12b, 0x405: 0x12c, 0x406: 0x12d, 0x407: 0x12e, + 0x408: 0x12f, 0x409: 0xba, 0x40a: 0x130, 0x40b: 0x131, 0x40c: 0x5d, 0x40d: 0x5e, 0x40e: 0xba, 0x40f: 0xba, + 0x410: 0x132, 0x411: 0x133, 0x412: 0x134, 0x413: 0x135, 0x414: 0xba, 0x415: 0xba, 0x416: 0x136, 0x417: 0x137, + 0x418: 0x138, 0x419: 0x139, 0x41a: 0x13a, 0x41b: 0x13b, 0x41c: 0x13c, 0x41d: 0xba, 0x41e: 0xba, 0x41f: 0xba, + 0x420: 0xba, 0x421: 0xba, 0x422: 0x13d, 0x423: 0x13e, 0x424: 0xba, 0x425: 0xba, 0x426: 0xba, 0x427: 0xba, + 0x428: 0x13f, 0x429: 0x140, 0x42a: 0x141, 0x42b: 0x142, 0x42c: 0xba, 0x42d: 0xba, 0x42e: 0xba, 0x42f: 0xba, + 0x430: 0x143, 0x431: 0x144, 0x432: 0x145, 0x433: 0xba, 0x434: 0x146, 0x435: 0x147, 0x436: 0xba, 0x437: 0xba, + 0x438: 0xba, 0x439: 0xba, 0x43a: 0xba, 0x43b: 0xba, 0x43c: 0xba, 0x43d: 0xba, 0x43e: 0xba, 0x43f: 0xba, // Block 0x11, offset 0x440 - 0x440: 0x9d, 0x441: 0x9d, 0x442: 0x9d, 0x443: 0x9d, 0x444: 0x9d, 0x445: 0x9d, 0x446: 0x9d, 0x447: 0x9d, - 0x448: 0x9d, 0x449: 0x9d, 0x44a: 0x9d, 0x44b: 0x9d, 0x44c: 0x9d, 0x44d: 0x9d, 0x44e: 0x141, 0x44f: 0xb8, - 0x450: 0x9b, 0x451: 0x142, 0x452: 0x9d, 0x453: 0x9d, 0x454: 0x9d, 0x455: 0x143, 0x456: 0xb8, 0x457: 0xb8, - 0x458: 0xb8, 0x459: 0xb8, 0x45a: 0xb8, 0x45b: 0xb8, 0x45c: 0xb8, 0x45d: 0xb8, 0x45e: 0xb8, 0x45f: 0xb8, - 0x460: 0xb8, 0x461: 0xb8, 0x462: 0xb8, 0x463: 0xb8, 0x464: 0xb8, 0x465: 0xb8, 0x466: 0xb8, 0x467: 0xb8, - 0x468: 0xb8, 0x469: 0xb8, 0x46a: 0xb8, 0x46b: 0xb8, 0x46c: 0xb8, 0x46d: 0xb8, 0x46e: 0xb8, 0x46f: 0xb8, - 0x470: 0xb8, 0x471: 0xb8, 0x472: 0xb8, 0x473: 0xb8, 0x474: 0xb8, 0x475: 0xb8, 0x476: 0xb8, 0x477: 0xb8, - 0x478: 0xb8, 0x479: 0xb8, 0x47a: 0xb8, 0x47b: 0xb8, 0x47c: 0xb8, 0x47d: 0xb8, 0x47e: 0xb8, 0x47f: 0xb8, + 0x440: 0x9f, 0x441: 0x9f, 0x442: 0x9f, 0x443: 0x9f, 0x444: 0x9f, 0x445: 0x9f, 0x446: 0x9f, 0x447: 0x9f, + 0x448: 0x9f, 0x449: 0x9f, 0x44a: 0x9f, 0x44b: 0x9f, 0x44c: 0x9f, 0x44d: 0x9f, 0x44e: 0x148, 0x44f: 0xba, + 0x450: 0x9b, 0x451: 0x149, 0x452: 0x9f, 0x453: 0x9f, 0x454: 0x9f, 0x455: 0x14a, 0x456: 0xba, 0x457: 0xba, + 0x458: 0xba, 0x459: 0xba, 0x45a: 0xba, 0x45b: 0xba, 0x45c: 0xba, 0x45d: 0xba, 0x45e: 0xba, 0x45f: 0xba, + 0x460: 0xba, 0x461: 0xba, 0x462: 0xba, 0x463: 0xba, 0x464: 0xba, 0x465: 0xba, 0x466: 0xba, 0x467: 0xba, + 0x468: 0xba, 0x469: 0xba, 0x46a: 0xba, 0x46b: 0xba, 0x46c: 0xba, 0x46d: 0xba, 0x46e: 0xba, 0x46f: 0xba, + 0x470: 0xba, 0x471: 0xba, 0x472: 0xba, 0x473: 0xba, 0x474: 0xba, 0x475: 0xba, 0x476: 0xba, 0x477: 0xba, + 0x478: 0xba, 0x479: 0xba, 0x47a: 0xba, 0x47b: 0xba, 0x47c: 0xba, 0x47d: 0xba, 0x47e: 0xba, 0x47f: 0xba, // Block 0x12, offset 0x480 - 0x480: 0x9d, 0x481: 0x9d, 0x482: 0x9d, 0x483: 0x9d, 0x484: 0x9d, 0x485: 0x9d, 0x486: 0x9d, 0x487: 0x9d, - 0x488: 0x9d, 0x489: 0x9d, 0x48a: 0x9d, 0x48b: 0x9d, 0x48c: 0x9d, 0x48d: 0x9d, 0x48e: 0x9d, 0x48f: 0x9d, - 0x490: 0x144, 0x491: 0xb8, 0x492: 0xb8, 0x493: 0xb8, 0x494: 0xb8, 0x495: 0xb8, 0x496: 0xb8, 0x497: 0xb8, - 0x498: 0xb8, 0x499: 0xb8, 0x49a: 0xb8, 0x49b: 0xb8, 0x49c: 0xb8, 0x49d: 0xb8, 0x49e: 0xb8, 0x49f: 0xb8, - 0x4a0: 0xb8, 0x4a1: 0xb8, 0x4a2: 0xb8, 0x4a3: 0xb8, 0x4a4: 0xb8, 0x4a5: 0xb8, 0x4a6: 0xb8, 0x4a7: 0xb8, - 0x4a8: 0xb8, 0x4a9: 0xb8, 0x4aa: 0xb8, 0x4ab: 0xb8, 0x4ac: 0xb8, 0x4ad: 0xb8, 0x4ae: 0xb8, 0x4af: 0xb8, - 0x4b0: 0xb8, 0x4b1: 0xb8, 0x4b2: 0xb8, 0x4b3: 0xb8, 0x4b4: 0xb8, 0x4b5: 0xb8, 0x4b6: 0xb8, 0x4b7: 0xb8, - 0x4b8: 0xb8, 0x4b9: 0xb8, 0x4ba: 0xb8, 0x4bb: 0xb8, 0x4bc: 0xb8, 0x4bd: 0xb8, 0x4be: 0xb8, 0x4bf: 0xb8, + 0x480: 0x9f, 0x481: 0x9f, 0x482: 0x9f, 0x483: 0x9f, 0x484: 0x9f, 0x485: 0x9f, 0x486: 0x9f, 0x487: 0x9f, + 0x488: 0x9f, 0x489: 0x9f, 0x48a: 0x9f, 0x48b: 0x9f, 0x48c: 0x9f, 0x48d: 0x9f, 0x48e: 0x9f, 0x48f: 0x9f, + 0x490: 0x14b, 0x491: 0xba, 0x492: 0xba, 0x493: 0xba, 0x494: 0xba, 0x495: 0xba, 0x496: 0xba, 0x497: 0xba, + 0x498: 0xba, 0x499: 0xba, 0x49a: 0xba, 0x49b: 0xba, 0x49c: 0xba, 0x49d: 0xba, 0x49e: 0xba, 0x49f: 0xba, + 0x4a0: 0xba, 0x4a1: 0xba, 0x4a2: 0xba, 0x4a3: 0xba, 0x4a4: 0xba, 0x4a5: 0xba, 0x4a6: 0xba, 0x4a7: 0xba, + 0x4a8: 0xba, 0x4a9: 0xba, 0x4aa: 0xba, 0x4ab: 0xba, 0x4ac: 0xba, 0x4ad: 0xba, 0x4ae: 0xba, 0x4af: 0xba, + 0x4b0: 0xba, 0x4b1: 0xba, 0x4b2: 0xba, 0x4b3: 0xba, 0x4b4: 0xba, 0x4b5: 0xba, 0x4b6: 0xba, 0x4b7: 0xba, + 0x4b8: 0xba, 0x4b9: 0xba, 0x4ba: 0xba, 0x4bb: 0xba, 0x4bc: 0xba, 0x4bd: 0xba, 0x4be: 0xba, 0x4bf: 0xba, // Block 0x13, offset 0x4c0 - 0x4c0: 0xb8, 0x4c1: 0xb8, 0x4c2: 0xb8, 0x4c3: 0xb8, 0x4c4: 0xb8, 0x4c5: 0xb8, 0x4c6: 0xb8, 0x4c7: 0xb8, - 0x4c8: 0xb8, 0x4c9: 0xb8, 0x4ca: 0xb8, 0x4cb: 0xb8, 0x4cc: 0xb8, 0x4cd: 0xb8, 0x4ce: 0xb8, 0x4cf: 0xb8, - 0x4d0: 0x9d, 0x4d1: 0x9d, 0x4d2: 0x9d, 0x4d3: 0x9d, 0x4d4: 0x9d, 0x4d5: 0x9d, 0x4d6: 0x9d, 0x4d7: 0x9d, - 0x4d8: 0x9d, 0x4d9: 0x145, 0x4da: 0xb8, 0x4db: 0xb8, 0x4dc: 0xb8, 0x4dd: 0xb8, 0x4de: 0xb8, 0x4df: 0xb8, - 0x4e0: 0xb8, 0x4e1: 0xb8, 0x4e2: 0xb8, 0x4e3: 0xb8, 0x4e4: 0xb8, 0x4e5: 0xb8, 0x4e6: 0xb8, 0x4e7: 0xb8, - 0x4e8: 0xb8, 0x4e9: 0xb8, 0x4ea: 0xb8, 0x4eb: 0xb8, 0x4ec: 0xb8, 0x4ed: 0xb8, 0x4ee: 0xb8, 0x4ef: 0xb8, - 0x4f0: 0xb8, 0x4f1: 0xb8, 0x4f2: 0xb8, 0x4f3: 0xb8, 0x4f4: 0xb8, 0x4f5: 0xb8, 0x4f6: 0xb8, 0x4f7: 0xb8, - 0x4f8: 0xb8, 0x4f9: 0xb8, 0x4fa: 0xb8, 0x4fb: 0xb8, 0x4fc: 0xb8, 0x4fd: 0xb8, 0x4fe: 0xb8, 0x4ff: 0xb8, + 0x4c0: 0xba, 0x4c1: 0xba, 0x4c2: 0xba, 0x4c3: 0xba, 0x4c4: 0xba, 0x4c5: 0xba, 0x4c6: 0xba, 0x4c7: 0xba, + 0x4c8: 0xba, 0x4c9: 0xba, 0x4ca: 0xba, 0x4cb: 0xba, 0x4cc: 0xba, 0x4cd: 0xba, 0x4ce: 0xba, 0x4cf: 0xba, + 0x4d0: 0x9f, 0x4d1: 0x9f, 0x4d2: 0x9f, 0x4d3: 0x9f, 0x4d4: 0x9f, 0x4d5: 0x9f, 0x4d6: 0x9f, 0x4d7: 0x9f, + 0x4d8: 0x9f, 0x4d9: 0x14c, 0x4da: 0xba, 0x4db: 0xba, 0x4dc: 0xba, 0x4dd: 0xba, 0x4de: 0xba, 0x4df: 0xba, + 0x4e0: 0xba, 0x4e1: 0xba, 0x4e2: 0xba, 0x4e3: 0xba, 0x4e4: 0xba, 0x4e5: 0xba, 0x4e6: 0xba, 0x4e7: 0xba, + 0x4e8: 0xba, 0x4e9: 0xba, 0x4ea: 0xba, 0x4eb: 0xba, 0x4ec: 0xba, 0x4ed: 0xba, 0x4ee: 0xba, 0x4ef: 0xba, + 0x4f0: 0xba, 0x4f1: 0xba, 0x4f2: 0xba, 0x4f3: 0xba, 0x4f4: 0xba, 0x4f5: 0xba, 0x4f6: 0xba, 0x4f7: 0xba, + 0x4f8: 0xba, 0x4f9: 0xba, 0x4fa: 0xba, 0x4fb: 0xba, 0x4fc: 0xba, 0x4fd: 0xba, 0x4fe: 0xba, 0x4ff: 0xba, // Block 0x14, offset 0x500 - 0x500: 0xb8, 0x501: 0xb8, 0x502: 0xb8, 0x503: 0xb8, 0x504: 0xb8, 0x505: 0xb8, 0x506: 0xb8, 0x507: 0xb8, - 0x508: 0xb8, 0x509: 0xb8, 0x50a: 0xb8, 0x50b: 0xb8, 0x50c: 0xb8, 0x50d: 0xb8, 0x50e: 0xb8, 0x50f: 0xb8, - 0x510: 0xb8, 0x511: 0xb8, 0x512: 0xb8, 0x513: 0xb8, 0x514: 0xb8, 0x515: 0xb8, 0x516: 0xb8, 0x517: 0xb8, - 0x518: 0xb8, 0x519: 0xb8, 0x51a: 0xb8, 0x51b: 0xb8, 0x51c: 0xb8, 0x51d: 0xb8, 0x51e: 0xb8, 0x51f: 0xb8, - 0x520: 0x9d, 0x521: 0x9d, 0x522: 0x9d, 0x523: 0x9d, 0x524: 0x9d, 0x525: 0x9d, 0x526: 0x9d, 0x527: 0x9d, - 0x528: 0x13d, 0x529: 0x146, 0x52a: 0xb8, 0x52b: 0x147, 0x52c: 0x148, 0x52d: 0x149, 0x52e: 0x14a, 0x52f: 0xb8, - 0x530: 0xb8, 0x531: 0xb8, 0x532: 0xb8, 0x533: 0xb8, 0x534: 0xb8, 0x535: 0xb8, 0x536: 0xb8, 0x537: 0xb8, - 0x538: 0xb8, 0x539: 0xb8, 0x53a: 0xb8, 0x53b: 0xb8, 0x53c: 0x9d, 0x53d: 0x14b, 0x53e: 0x14c, 0x53f: 0x14d, + 0x500: 0xba, 0x501: 0xba, 0x502: 0xba, 0x503: 0xba, 0x504: 0xba, 0x505: 0xba, 0x506: 0xba, 0x507: 0xba, + 0x508: 0xba, 0x509: 0xba, 0x50a: 0xba, 0x50b: 0xba, 0x50c: 0xba, 0x50d: 0xba, 0x50e: 0xba, 0x50f: 0xba, + 0x510: 0xba, 0x511: 0xba, 0x512: 0xba, 0x513: 0xba, 0x514: 0xba, 0x515: 0xba, 0x516: 0xba, 0x517: 0xba, + 0x518: 0xba, 0x519: 0xba, 0x51a: 0xba, 0x51b: 0xba, 0x51c: 0xba, 0x51d: 0xba, 0x51e: 0xba, 0x51f: 0xba, + 0x520: 0x9f, 0x521: 0x9f, 0x522: 0x9f, 0x523: 0x9f, 0x524: 0x9f, 0x525: 0x9f, 0x526: 0x9f, 0x527: 0x9f, + 0x528: 0x142, 0x529: 0x14d, 0x52a: 0xba, 0x52b: 0x14e, 0x52c: 0x14f, 0x52d: 0x150, 0x52e: 0x151, 0x52f: 0xba, + 0x530: 0xba, 0x531: 0xba, 0x532: 0xba, 0x533: 0xba, 0x534: 0xba, 0x535: 0xba, 0x536: 0xba, 0x537: 0xba, + 0x538: 0xba, 0x539: 0xba, 0x53a: 0xba, 0x53b: 0xba, 0x53c: 0x9f, 0x53d: 0x152, 0x53e: 0x153, 0x53f: 0x154, // Block 0x15, offset 0x540 - 0x540: 0x9d, 0x541: 0x9d, 0x542: 0x9d, 0x543: 0x9d, 0x544: 0x9d, 0x545: 0x9d, 0x546: 0x9d, 0x547: 0x9d, - 0x548: 0x9d, 0x549: 0x9d, 0x54a: 0x9d, 0x54b: 0x9d, 0x54c: 0x9d, 0x54d: 0x9d, 0x54e: 0x9d, 0x54f: 0x9d, - 0x550: 0x9d, 0x551: 0x9d, 0x552: 0x9d, 0x553: 0x9d, 0x554: 0x9d, 0x555: 0x9d, 0x556: 0x9d, 0x557: 0x9d, - 0x558: 0x9d, 0x559: 0x9d, 0x55a: 0x9d, 0x55b: 0x9d, 0x55c: 0x9d, 0x55d: 0x9d, 0x55e: 0x9d, 0x55f: 0x14e, - 0x560: 0x9d, 0x561: 0x9d, 0x562: 0x9d, 0x563: 0x9d, 0x564: 0x9d, 0x565: 0x9d, 0x566: 0x9d, 0x567: 0x9d, - 0x568: 0x9d, 0x569: 0x9d, 0x56a: 0x9d, 0x56b: 0x14f, 0x56c: 0xb8, 0x56d: 0xb8, 0x56e: 0xb8, 0x56f: 0xb8, - 0x570: 0xb8, 0x571: 0xb8, 0x572: 0xb8, 0x573: 0xb8, 0x574: 0xb8, 0x575: 0xb8, 0x576: 0xb8, 0x577: 0xb8, - 0x578: 0xb8, 0x579: 0xb8, 0x57a: 0xb8, 0x57b: 0xb8, 0x57c: 0xb8, 0x57d: 0xb8, 0x57e: 0xb8, 0x57f: 0xb8, + 0x540: 0x9f, 0x541: 0x9f, 0x542: 0x9f, 0x543: 0x9f, 0x544: 0x9f, 0x545: 0x9f, 0x546: 0x9f, 0x547: 0x9f, + 0x548: 0x9f, 0x549: 0x9f, 0x54a: 0x9f, 0x54b: 0x9f, 0x54c: 0x9f, 0x54d: 0x9f, 0x54e: 0x9f, 0x54f: 0x9f, + 0x550: 0x9f, 0x551: 0x9f, 0x552: 0x9f, 0x553: 0x9f, 0x554: 0x9f, 0x555: 0x9f, 0x556: 0x9f, 0x557: 0x9f, + 0x558: 0x9f, 0x559: 0x9f, 0x55a: 0x9f, 0x55b: 0x9f, 0x55c: 0x9f, 0x55d: 0x9f, 0x55e: 0x9f, 0x55f: 0x155, + 0x560: 0x9f, 0x561: 0x9f, 0x562: 0x9f, 0x563: 0x9f, 0x564: 0x9f, 0x565: 0x9f, 0x566: 0x9f, 0x567: 0x9f, + 0x568: 0x9f, 0x569: 0x9f, 0x56a: 0x9f, 0x56b: 0x156, 0x56c: 0xba, 0x56d: 0xba, 0x56e: 0xba, 0x56f: 0xba, + 0x570: 0xba, 0x571: 0xba, 0x572: 0xba, 0x573: 0xba, 0x574: 0xba, 0x575: 0xba, 0x576: 0xba, 0x577: 0xba, + 0x578: 0xba, 0x579: 0xba, 0x57a: 0xba, 0x57b: 0xba, 0x57c: 0xba, 0x57d: 0xba, 0x57e: 0xba, 0x57f: 0xba, // Block 0x16, offset 0x580 - 0x580: 0x150, 0x581: 0xb8, 0x582: 0xb8, 0x583: 0xb8, 0x584: 0xb8, 0x585: 0xb8, 0x586: 0xb8, 0x587: 0xb8, - 0x588: 0xb8, 0x589: 0xb8, 0x58a: 0xb8, 0x58b: 0xb8, 0x58c: 0xb8, 0x58d: 0xb8, 0x58e: 0xb8, 0x58f: 0xb8, - 0x590: 0xb8, 0x591: 0xb8, 0x592: 0xb8, 0x593: 0xb8, 0x594: 0xb8, 0x595: 0xb8, 0x596: 0xb8, 0x597: 0xb8, - 0x598: 0xb8, 0x599: 0xb8, 0x59a: 0xb8, 0x59b: 0xb8, 0x59c: 0xb8, 0x59d: 0xb8, 0x59e: 0xb8, 0x59f: 0xb8, - 0x5a0: 0xb8, 0x5a1: 0xb8, 0x5a2: 0xb8, 0x5a3: 0xb8, 0x5a4: 0xb8, 0x5a5: 0xb8, 0x5a6: 0xb8, 0x5a7: 0xb8, - 0x5a8: 0xb8, 0x5a9: 0xb8, 0x5aa: 0xb8, 0x5ab: 0xb8, 0x5ac: 0xb8, 0x5ad: 0xb8, 0x5ae: 0xb8, 0x5af: 0xb8, - 0x5b0: 0x9d, 0x5b1: 0x151, 0x5b2: 0x152, 0x5b3: 0xb8, 0x5b4: 0xb8, 0x5b5: 0xb8, 0x5b6: 0xb8, 0x5b7: 0xb8, - 0x5b8: 0xb8, 0x5b9: 0xb8, 0x5ba: 0xb8, 0x5bb: 0xb8, 0x5bc: 0xb8, 0x5bd: 0xb8, 0x5be: 0xb8, 0x5bf: 0xb8, + 0x580: 0x9f, 0x581: 0x9f, 0x582: 0x9f, 0x583: 0x9f, 0x584: 0x157, 0x585: 0x158, 0x586: 0x9f, 0x587: 0x9f, + 0x588: 0x9f, 0x589: 0x9f, 0x58a: 0x9f, 0x58b: 0x159, 0x58c: 0xba, 0x58d: 0xba, 0x58e: 0xba, 0x58f: 0xba, + 0x590: 0xba, 0x591: 0xba, 0x592: 0xba, 0x593: 0xba, 0x594: 0xba, 0x595: 0xba, 0x596: 0xba, 0x597: 0xba, + 0x598: 0xba, 0x599: 0xba, 0x59a: 0xba, 0x59b: 0xba, 0x59c: 0xba, 0x59d: 0xba, 0x59e: 0xba, 0x59f: 0xba, + 0x5a0: 0xba, 0x5a1: 0xba, 0x5a2: 0xba, 0x5a3: 0xba, 0x5a4: 0xba, 0x5a5: 0xba, 0x5a6: 0xba, 0x5a7: 0xba, + 0x5a8: 0xba, 0x5a9: 0xba, 0x5aa: 0xba, 0x5ab: 0xba, 0x5ac: 0xba, 0x5ad: 0xba, 0x5ae: 0xba, 0x5af: 0xba, + 0x5b0: 0x9f, 0x5b1: 0x15a, 0x5b2: 0x15b, 0x5b3: 0xba, 0x5b4: 0xba, 0x5b5: 0xba, 0x5b6: 0xba, 0x5b7: 0xba, + 0x5b8: 0xba, 0x5b9: 0xba, 0x5ba: 0xba, 0x5bb: 0xba, 0x5bc: 0xba, 0x5bd: 0xba, 0x5be: 0xba, 0x5bf: 0xba, // Block 0x17, offset 0x5c0 - 0x5c0: 0x9b, 0x5c1: 0x9b, 0x5c2: 0x9b, 0x5c3: 0x153, 0x5c4: 0x154, 0x5c5: 0x155, 0x5c6: 0x156, 0x5c7: 0x157, - 0x5c8: 0x9b, 0x5c9: 0x158, 0x5ca: 0xb8, 0x5cb: 0xb8, 0x5cc: 0x9b, 0x5cd: 0x159, 0x5ce: 0xb8, 0x5cf: 0xb8, - 0x5d0: 0x5d, 0x5d1: 0x5e, 0x5d2: 0x5f, 0x5d3: 0x60, 0x5d4: 0x61, 0x5d5: 0x62, 0x5d6: 0x63, 0x5d7: 0x64, - 0x5d8: 0x65, 0x5d9: 0x66, 0x5da: 0x67, 0x5db: 0x68, 0x5dc: 0x69, 0x5dd: 0x6a, 0x5de: 0x6b, 0x5df: 0x6c, + 0x5c0: 0x9b, 0x5c1: 0x9b, 0x5c2: 0x9b, 0x5c3: 0x15c, 0x5c4: 0x15d, 0x5c5: 0x15e, 0x5c6: 0x15f, 0x5c7: 0x160, + 0x5c8: 0x9b, 0x5c9: 0x161, 0x5ca: 0xba, 0x5cb: 0xba, 0x5cc: 0x9b, 0x5cd: 0x162, 0x5ce: 0xba, 0x5cf: 0xba, + 0x5d0: 0x5f, 0x5d1: 0x60, 0x5d2: 0x61, 0x5d3: 0x62, 0x5d4: 0x63, 0x5d5: 0x64, 0x5d6: 0x65, 0x5d7: 0x66, + 0x5d8: 0x67, 0x5d9: 0x68, 0x5da: 0x69, 0x5db: 0x6a, 0x5dc: 0x6b, 0x5dd: 0x6c, 0x5de: 0x6d, 0x5df: 0x6e, 0x5e0: 0x9b, 0x5e1: 0x9b, 0x5e2: 0x9b, 0x5e3: 0x9b, 0x5e4: 0x9b, 0x5e5: 0x9b, 0x5e6: 0x9b, 0x5e7: 0x9b, - 0x5e8: 0x15a, 0x5e9: 0x15b, 0x5ea: 0x15c, 0x5eb: 0xb8, 0x5ec: 0xb8, 0x5ed: 0xb8, 0x5ee: 0xb8, 0x5ef: 0xb8, - 0x5f0: 0xb8, 0x5f1: 0xb8, 0x5f2: 0xb8, 0x5f3: 0xb8, 0x5f4: 0xb8, 0x5f5: 0xb8, 0x5f6: 0xb8, 0x5f7: 0xb8, - 0x5f8: 0xb8, 0x5f9: 0xb8, 0x5fa: 0xb8, 0x5fb: 0xb8, 0x5fc: 0xb8, 0x5fd: 0xb8, 0x5fe: 0xb8, 0x5ff: 0xb8, + 0x5e8: 0x163, 0x5e9: 0x164, 0x5ea: 0x165, 0x5eb: 0xba, 0x5ec: 0xba, 0x5ed: 0xba, 0x5ee: 0xba, 0x5ef: 0xba, + 0x5f0: 0xba, 0x5f1: 0xba, 0x5f2: 0xba, 0x5f3: 0xba, 0x5f4: 0xba, 0x5f5: 0xba, 0x5f6: 0xba, 0x5f7: 0xba, + 0x5f8: 0xba, 0x5f9: 0xba, 0x5fa: 0xba, 0x5fb: 0xba, 0x5fc: 0xba, 0x5fd: 0xba, 0x5fe: 0xba, 0x5ff: 0xba, // Block 0x18, offset 0x600 - 0x600: 0x15d, 0x601: 0xb8, 0x602: 0xb8, 0x603: 0xb8, 0x604: 0xb8, 0x605: 0xb8, 0x606: 0xb8, 0x607: 0xb8, - 0x608: 0xb8, 0x609: 0xb8, 0x60a: 0xb8, 0x60b: 0xb8, 0x60c: 0xb8, 0x60d: 0xb8, 0x60e: 0xb8, 0x60f: 0xb8, - 0x610: 0xb8, 0x611: 0xb8, 0x612: 0xb8, 0x613: 0xb8, 0x614: 0xb8, 0x615: 0xb8, 0x616: 0xb8, 0x617: 0xb8, - 0x618: 0xb8, 0x619: 0xb8, 0x61a: 0xb8, 0x61b: 0xb8, 0x61c: 0xb8, 0x61d: 0xb8, 0x61e: 0xb8, 0x61f: 0xb8, - 0x620: 0x9d, 0x621: 0x9d, 0x622: 0x9d, 0x623: 0x15e, 0x624: 0x6d, 0x625: 0x15f, 0x626: 0xb8, 0x627: 0xb8, - 0x628: 0xb8, 0x629: 0xb8, 0x62a: 0xb8, 0x62b: 0xb8, 0x62c: 0xb8, 0x62d: 0xb8, 0x62e: 0xb8, 0x62f: 0xb8, - 0x630: 0xb8, 0x631: 0xb8, 0x632: 0xb8, 0x633: 0xb8, 0x634: 0xb8, 0x635: 0xb8, 0x636: 0xb8, 0x637: 0xb8, - 0x638: 0x6e, 0x639: 0x6f, 0x63a: 0x70, 0x63b: 0x160, 0x63c: 0xb8, 0x63d: 0xb8, 0x63e: 0xb8, 0x63f: 0xb8, + 0x600: 0x166, 0x601: 0xba, 0x602: 0xba, 0x603: 0xba, 0x604: 0xba, 0x605: 0xba, 0x606: 0xba, 0x607: 0xba, + 0x608: 0xba, 0x609: 0xba, 0x60a: 0xba, 0x60b: 0xba, 0x60c: 0xba, 0x60d: 0xba, 0x60e: 0xba, 0x60f: 0xba, + 0x610: 0xba, 0x611: 0xba, 0x612: 0xba, 0x613: 0xba, 0x614: 0xba, 0x615: 0xba, 0x616: 0xba, 0x617: 0xba, + 0x618: 0xba, 0x619: 0xba, 0x61a: 0xba, 0x61b: 0xba, 0x61c: 0xba, 0x61d: 0xba, 0x61e: 0xba, 0x61f: 0xba, + 0x620: 0x122, 0x621: 0x122, 0x622: 0x122, 0x623: 0x167, 0x624: 0x6f, 0x625: 0x168, 0x626: 0xba, 0x627: 0xba, + 0x628: 0xba, 0x629: 0xba, 0x62a: 0xba, 0x62b: 0xba, 0x62c: 0xba, 0x62d: 0xba, 0x62e: 0xba, 0x62f: 0xba, + 0x630: 0xba, 0x631: 0xba, 0x632: 0xba, 0x633: 0xba, 0x634: 0xba, 0x635: 0xba, 0x636: 0xba, 0x637: 0xba, + 0x638: 0x70, 0x639: 0x71, 0x63a: 0x72, 0x63b: 0x169, 0x63c: 0xba, 0x63d: 0xba, 0x63e: 0xba, 0x63f: 0xba, // Block 0x19, offset 0x640 - 0x640: 0x161, 0x641: 0x9b, 0x642: 0x162, 0x643: 0x163, 0x644: 0x71, 0x645: 0x72, 0x646: 0x164, 0x647: 0x165, - 0x648: 0x73, 0x649: 0x166, 0x64a: 0xb8, 0x64b: 0xb8, 0x64c: 0x9b, 0x64d: 0x9b, 0x64e: 0x9b, 0x64f: 0x9b, + 0x640: 0x16a, 0x641: 0x9b, 0x642: 0x16b, 0x643: 0x16c, 0x644: 0x73, 0x645: 0x74, 0x646: 0x16d, 0x647: 0x16e, + 0x648: 0x75, 0x649: 0x16f, 0x64a: 0xba, 0x64b: 0xba, 0x64c: 0x9b, 0x64d: 0x9b, 0x64e: 0x9b, 0x64f: 0x9b, 0x650: 0x9b, 0x651: 0x9b, 0x652: 0x9b, 0x653: 0x9b, 0x654: 0x9b, 0x655: 0x9b, 0x656: 0x9b, 0x657: 0x9b, - 0x658: 0x9b, 0x659: 0x9b, 0x65a: 0x9b, 0x65b: 0x167, 0x65c: 0x9b, 0x65d: 0x168, 0x65e: 0x9b, 0x65f: 0x169, - 0x660: 0x16a, 0x661: 0x16b, 0x662: 0x16c, 0x663: 0xb8, 0x664: 0x16d, 0x665: 0x16e, 0x666: 0x16f, 0x667: 0x170, - 0x668: 0xb8, 0x669: 0xb8, 0x66a: 0xb8, 0x66b: 0xb8, 0x66c: 0xb8, 0x66d: 0xb8, 0x66e: 0xb8, 0x66f: 0xb8, - 0x670: 0xb8, 0x671: 0xb8, 0x672: 0xb8, 0x673: 0xb8, 0x674: 0xb8, 0x675: 0xb8, 0x676: 0xb8, 0x677: 0xb8, - 0x678: 0xb8, 0x679: 0xb8, 0x67a: 0xb8, 0x67b: 0xb8, 0x67c: 0xb8, 0x67d: 0xb8, 0x67e: 0xb8, 0x67f: 0xb8, + 0x658: 0x9b, 0x659: 0x9b, 0x65a: 0x9b, 0x65b: 0x170, 0x65c: 0x9b, 0x65d: 0x171, 0x65e: 0x9b, 0x65f: 0x172, + 0x660: 0x173, 0x661: 0x174, 0x662: 0x175, 0x663: 0xba, 0x664: 0x176, 0x665: 0x177, 0x666: 0x178, 0x667: 0x179, + 0x668: 0xba, 0x669: 0xba, 0x66a: 0xba, 0x66b: 0xba, 0x66c: 0xba, 0x66d: 0xba, 0x66e: 0xba, 0x66f: 0xba, + 0x670: 0xba, 0x671: 0xba, 0x672: 0xba, 0x673: 0xba, 0x674: 0xba, 0x675: 0xba, 0x676: 0xba, 0x677: 0xba, + 0x678: 0xba, 0x679: 0xba, 0x67a: 0xba, 0x67b: 0xba, 0x67c: 0xba, 0x67d: 0xba, 0x67e: 0xba, 0x67f: 0xba, // Block 0x1a, offset 0x680 - 0x680: 0x9d, 0x681: 0x9d, 0x682: 0x9d, 0x683: 0x9d, 0x684: 0x9d, 0x685: 0x9d, 0x686: 0x9d, 0x687: 0x9d, - 0x688: 0x9d, 0x689: 0x9d, 0x68a: 0x9d, 0x68b: 0x9d, 0x68c: 0x9d, 0x68d: 0x9d, 0x68e: 0x9d, 0x68f: 0x9d, - 0x690: 0x9d, 0x691: 0x9d, 0x692: 0x9d, 0x693: 0x9d, 0x694: 0x9d, 0x695: 0x9d, 0x696: 0x9d, 0x697: 0x9d, - 0x698: 0x9d, 0x699: 0x9d, 0x69a: 0x9d, 0x69b: 0x171, 0x69c: 0x9d, 0x69d: 0x9d, 0x69e: 0x9d, 0x69f: 0x9d, - 0x6a0: 0x9d, 0x6a1: 0x9d, 0x6a2: 0x9d, 0x6a3: 0x9d, 0x6a4: 0x9d, 0x6a5: 0x9d, 0x6a6: 0x9d, 0x6a7: 0x9d, - 0x6a8: 0x9d, 0x6a9: 0x9d, 0x6aa: 0x9d, 0x6ab: 0x9d, 0x6ac: 0x9d, 0x6ad: 0x9d, 0x6ae: 0x9d, 0x6af: 0x9d, - 0x6b0: 0x9d, 0x6b1: 0x9d, 0x6b2: 0x9d, 0x6b3: 0x9d, 0x6b4: 0x9d, 0x6b5: 0x9d, 0x6b6: 0x9d, 0x6b7: 0x9d, - 0x6b8: 0x9d, 0x6b9: 0x9d, 0x6ba: 0x9d, 0x6bb: 0x9d, 0x6bc: 0x9d, 0x6bd: 0x9d, 0x6be: 0x9d, 0x6bf: 0x9d, + 0x680: 0x9f, 0x681: 0x9f, 0x682: 0x9f, 0x683: 0x9f, 0x684: 0x9f, 0x685: 0x9f, 0x686: 0x9f, 0x687: 0x9f, + 0x688: 0x9f, 0x689: 0x9f, 0x68a: 0x9f, 0x68b: 0x9f, 0x68c: 0x9f, 0x68d: 0x9f, 0x68e: 0x9f, 0x68f: 0x9f, + 0x690: 0x9f, 0x691: 0x9f, 0x692: 0x9f, 0x693: 0x9f, 0x694: 0x9f, 0x695: 0x9f, 0x696: 0x9f, 0x697: 0x9f, + 0x698: 0x9f, 0x699: 0x9f, 0x69a: 0x9f, 0x69b: 0x17a, 0x69c: 0x9f, 0x69d: 0x9f, 0x69e: 0x9f, 0x69f: 0x9f, + 0x6a0: 0x9f, 0x6a1: 0x9f, 0x6a2: 0x9f, 0x6a3: 0x9f, 0x6a4: 0x9f, 0x6a5: 0x9f, 0x6a6: 0x9f, 0x6a7: 0x9f, + 0x6a8: 0x9f, 0x6a9: 0x9f, 0x6aa: 0x9f, 0x6ab: 0x9f, 0x6ac: 0x9f, 0x6ad: 0x9f, 0x6ae: 0x9f, 0x6af: 0x9f, + 0x6b0: 0x9f, 0x6b1: 0x9f, 0x6b2: 0x9f, 0x6b3: 0x9f, 0x6b4: 0x9f, 0x6b5: 0x9f, 0x6b6: 0x9f, 0x6b7: 0x9f, + 0x6b8: 0x9f, 0x6b9: 0x9f, 0x6ba: 0x9f, 0x6bb: 0x9f, 0x6bc: 0x9f, 0x6bd: 0x9f, 0x6be: 0x9f, 0x6bf: 0x9f, // Block 0x1b, offset 0x6c0 - 0x6c0: 0x9d, 0x6c1: 0x9d, 0x6c2: 0x9d, 0x6c3: 0x9d, 0x6c4: 0x9d, 0x6c5: 0x9d, 0x6c6: 0x9d, 0x6c7: 0x9d, - 0x6c8: 0x9d, 0x6c9: 0x9d, 0x6ca: 0x9d, 0x6cb: 0x9d, 0x6cc: 0x9d, 0x6cd: 0x9d, 0x6ce: 0x9d, 0x6cf: 0x9d, - 0x6d0: 0x9d, 0x6d1: 0x9d, 0x6d2: 0x9d, 0x6d3: 0x9d, 0x6d4: 0x9d, 0x6d5: 0x9d, 0x6d6: 0x9d, 0x6d7: 0x9d, - 0x6d8: 0x9d, 0x6d9: 0x9d, 0x6da: 0x9d, 0x6db: 0x9d, 0x6dc: 0x172, 0x6dd: 0x9d, 0x6de: 0x9d, 0x6df: 0x9d, - 0x6e0: 0x173, 0x6e1: 0x9d, 0x6e2: 0x9d, 0x6e3: 0x9d, 0x6e4: 0x9d, 0x6e5: 0x9d, 0x6e6: 0x9d, 0x6e7: 0x9d, - 0x6e8: 0x9d, 0x6e9: 0x9d, 0x6ea: 0x9d, 0x6eb: 0x9d, 0x6ec: 0x9d, 0x6ed: 0x9d, 0x6ee: 0x9d, 0x6ef: 0x9d, - 0x6f0: 0x9d, 0x6f1: 0x9d, 0x6f2: 0x9d, 0x6f3: 0x9d, 0x6f4: 0x9d, 0x6f5: 0x9d, 0x6f6: 0x9d, 0x6f7: 0x9d, - 0x6f8: 0x9d, 0x6f9: 0x9d, 0x6fa: 0x9d, 0x6fb: 0x9d, 0x6fc: 0x9d, 0x6fd: 0x9d, 0x6fe: 0x9d, 0x6ff: 0x9d, + 0x6c0: 0x9f, 0x6c1: 0x9f, 0x6c2: 0x9f, 0x6c3: 0x9f, 0x6c4: 0x9f, 0x6c5: 0x9f, 0x6c6: 0x9f, 0x6c7: 0x9f, + 0x6c8: 0x9f, 0x6c9: 0x9f, 0x6ca: 0x9f, 0x6cb: 0x9f, 0x6cc: 0x9f, 0x6cd: 0x9f, 0x6ce: 0x9f, 0x6cf: 0x9f, + 0x6d0: 0x9f, 0x6d1: 0x9f, 0x6d2: 0x9f, 0x6d3: 0x9f, 0x6d4: 0x9f, 0x6d5: 0x9f, 0x6d6: 0x9f, 0x6d7: 0x9f, + 0x6d8: 0x9f, 0x6d9: 0x9f, 0x6da: 0x9f, 0x6db: 0x9f, 0x6dc: 0x17b, 0x6dd: 0x9f, 0x6de: 0x9f, 0x6df: 0x9f, + 0x6e0: 0x17c, 0x6e1: 0x9f, 0x6e2: 0x9f, 0x6e3: 0x9f, 0x6e4: 0x9f, 0x6e5: 0x9f, 0x6e6: 0x9f, 0x6e7: 0x9f, + 0x6e8: 0x9f, 0x6e9: 0x9f, 0x6ea: 0x9f, 0x6eb: 0x9f, 0x6ec: 0x9f, 0x6ed: 0x9f, 0x6ee: 0x9f, 0x6ef: 0x9f, + 0x6f0: 0x9f, 0x6f1: 0x9f, 0x6f2: 0x9f, 0x6f3: 0x9f, 0x6f4: 0x9f, 0x6f5: 0x9f, 0x6f6: 0x9f, 0x6f7: 0x9f, + 0x6f8: 0x9f, 0x6f9: 0x9f, 0x6fa: 0x9f, 0x6fb: 0x9f, 0x6fc: 0x9f, 0x6fd: 0x9f, 0x6fe: 0x9f, 0x6ff: 0x9f, // Block 0x1c, offset 0x700 - 0x700: 0x9d, 0x701: 0x9d, 0x702: 0x9d, 0x703: 0x9d, 0x704: 0x9d, 0x705: 0x9d, 0x706: 0x9d, 0x707: 0x9d, - 0x708: 0x9d, 0x709: 0x9d, 0x70a: 0x9d, 0x70b: 0x9d, 0x70c: 0x9d, 0x70d: 0x9d, 0x70e: 0x9d, 0x70f: 0x9d, - 0x710: 0x9d, 0x711: 0x9d, 0x712: 0x9d, 0x713: 0x9d, 0x714: 0x9d, 0x715: 0x9d, 0x716: 0x9d, 0x717: 0x9d, - 0x718: 0x9d, 0x719: 0x9d, 0x71a: 0x9d, 0x71b: 0x9d, 0x71c: 0x9d, 0x71d: 0x9d, 0x71e: 0x9d, 0x71f: 0x9d, - 0x720: 0x9d, 0x721: 0x9d, 0x722: 0x9d, 0x723: 0x9d, 0x724: 0x9d, 0x725: 0x9d, 0x726: 0x9d, 0x727: 0x9d, - 0x728: 0x9d, 0x729: 0x9d, 0x72a: 0x9d, 0x72b: 0x9d, 0x72c: 0x9d, 0x72d: 0x9d, 0x72e: 0x9d, 0x72f: 0x9d, - 0x730: 0x9d, 0x731: 0x9d, 0x732: 0x9d, 0x733: 0x9d, 0x734: 0x9d, 0x735: 0x9d, 0x736: 0x9d, 0x737: 0x9d, - 0x738: 0x9d, 0x739: 0x9d, 0x73a: 0x174, 0x73b: 0xb8, 0x73c: 0xb8, 0x73d: 0xb8, 0x73e: 0xb8, 0x73f: 0xb8, + 0x700: 0x9f, 0x701: 0x9f, 0x702: 0x9f, 0x703: 0x9f, 0x704: 0x9f, 0x705: 0x9f, 0x706: 0x9f, 0x707: 0x9f, + 0x708: 0x9f, 0x709: 0x9f, 0x70a: 0x9f, 0x70b: 0x9f, 0x70c: 0x9f, 0x70d: 0x9f, 0x70e: 0x9f, 0x70f: 0x9f, + 0x710: 0x9f, 0x711: 0x9f, 0x712: 0x9f, 0x713: 0x9f, 0x714: 0x9f, 0x715: 0x9f, 0x716: 0x9f, 0x717: 0x9f, + 0x718: 0x9f, 0x719: 0x9f, 0x71a: 0x9f, 0x71b: 0x9f, 0x71c: 0x9f, 0x71d: 0x9f, 0x71e: 0x9f, 0x71f: 0x9f, + 0x720: 0x9f, 0x721: 0x9f, 0x722: 0x9f, 0x723: 0x9f, 0x724: 0x9f, 0x725: 0x9f, 0x726: 0x9f, 0x727: 0x9f, + 0x728: 0x9f, 0x729: 0x9f, 0x72a: 0x9f, 0x72b: 0x9f, 0x72c: 0x9f, 0x72d: 0x9f, 0x72e: 0x9f, 0x72f: 0x9f, + 0x730: 0x9f, 0x731: 0x9f, 0x732: 0x9f, 0x733: 0x9f, 0x734: 0x9f, 0x735: 0x9f, 0x736: 0x9f, 0x737: 0x9f, + 0x738: 0x9f, 0x739: 0x9f, 0x73a: 0x17d, 0x73b: 0x9f, 0x73c: 0x9f, 0x73d: 0x9f, 0x73e: 0x9f, 0x73f: 0x9f, // Block 0x1d, offset 0x740 - 0x740: 0xb8, 0x741: 0xb8, 0x742: 0xb8, 0x743: 0xb8, 0x744: 0xb8, 0x745: 0xb8, 0x746: 0xb8, 0x747: 0xb8, - 0x748: 0xb8, 0x749: 0xb8, 0x74a: 0xb8, 0x74b: 0xb8, 0x74c: 0xb8, 0x74d: 0xb8, 0x74e: 0xb8, 0x74f: 0xb8, - 0x750: 0xb8, 0x751: 0xb8, 0x752: 0xb8, 0x753: 0xb8, 0x754: 0xb8, 0x755: 0xb8, 0x756: 0xb8, 0x757: 0xb8, - 0x758: 0xb8, 0x759: 0xb8, 0x75a: 0xb8, 0x75b: 0xb8, 0x75c: 0xb8, 0x75d: 0xb8, 0x75e: 0xb8, 0x75f: 0xb8, - 0x760: 0x74, 0x761: 0x75, 0x762: 0x76, 0x763: 0x175, 0x764: 0x77, 0x765: 0x78, 0x766: 0x176, 0x767: 0x79, - 0x768: 0x7a, 0x769: 0xb8, 0x76a: 0xb8, 0x76b: 0xb8, 0x76c: 0xb8, 0x76d: 0xb8, 0x76e: 0xb8, 0x76f: 0xb8, - 0x770: 0xb8, 0x771: 0xb8, 0x772: 0xb8, 0x773: 0xb8, 0x774: 0xb8, 0x775: 0xb8, 0x776: 0xb8, 0x777: 0xb8, - 0x778: 0xb8, 0x779: 0xb8, 0x77a: 0xb8, 0x77b: 0xb8, 0x77c: 0xb8, 0x77d: 0xb8, 0x77e: 0xb8, 0x77f: 0xb8, + 0x740: 0x9f, 0x741: 0x9f, 0x742: 0x9f, 0x743: 0x9f, 0x744: 0x9f, 0x745: 0x9f, 0x746: 0x9f, 0x747: 0x9f, + 0x748: 0x9f, 0x749: 0x9f, 0x74a: 0x9f, 0x74b: 0x9f, 0x74c: 0x9f, 0x74d: 0x9f, 0x74e: 0x9f, 0x74f: 0x9f, + 0x750: 0x9f, 0x751: 0x9f, 0x752: 0x9f, 0x753: 0x9f, 0x754: 0x9f, 0x755: 0x9f, 0x756: 0x9f, 0x757: 0x9f, + 0x758: 0x9f, 0x759: 0x9f, 0x75a: 0x9f, 0x75b: 0x9f, 0x75c: 0x9f, 0x75d: 0x9f, 0x75e: 0x9f, 0x75f: 0x9f, + 0x760: 0x9f, 0x761: 0x9f, 0x762: 0x9f, 0x763: 0x9f, 0x764: 0x9f, 0x765: 0x9f, 0x766: 0x9f, 0x767: 0x9f, + 0x768: 0x9f, 0x769: 0x9f, 0x76a: 0x9f, 0x76b: 0x9f, 0x76c: 0x9f, 0x76d: 0x9f, 0x76e: 0x9f, 0x76f: 0x17e, + 0x770: 0xba, 0x771: 0xba, 0x772: 0xba, 0x773: 0xba, 0x774: 0xba, 0x775: 0xba, 0x776: 0xba, 0x777: 0xba, + 0x778: 0xba, 0x779: 0xba, 0x77a: 0xba, 0x77b: 0xba, 0x77c: 0xba, 0x77d: 0xba, 0x77e: 0xba, 0x77f: 0xba, // Block 0x1e, offset 0x780 - 0x790: 0x0d, 0x791: 0x0e, 0x792: 0x0f, 0x793: 0x10, 0x794: 0x11, 0x795: 0x0b, 0x796: 0x12, 0x797: 0x07, - 0x798: 0x13, 0x799: 0x0b, 0x79a: 0x0b, 0x79b: 0x14, 0x79c: 0x0b, 0x79d: 0x15, 0x79e: 0x16, 0x79f: 0x17, - 0x7a0: 0x07, 0x7a1: 0x07, 0x7a2: 0x07, 0x7a3: 0x07, 0x7a4: 0x07, 0x7a5: 0x07, 0x7a6: 0x07, 0x7a7: 0x07, - 0x7a8: 0x07, 0x7a9: 0x07, 0x7aa: 0x18, 0x7ab: 0x19, 0x7ac: 0x1a, 0x7ad: 0x0b, 0x7ae: 0x0b, 0x7af: 0x1b, - 0x7b0: 0x0b, 0x7b1: 0x0b, 0x7b2: 0x0b, 0x7b3: 0x0b, 0x7b4: 0x0b, 0x7b5: 0x0b, 0x7b6: 0x0b, 0x7b7: 0x0b, - 0x7b8: 0x0b, 0x7b9: 0x0b, 0x7ba: 0x0b, 0x7bb: 0x0b, 0x7bc: 0x0b, 0x7bd: 0x0b, 0x7be: 0x0b, 0x7bf: 0x0b, + 0x780: 0xba, 0x781: 0xba, 0x782: 0xba, 0x783: 0xba, 0x784: 0xba, 0x785: 0xba, 0x786: 0xba, 0x787: 0xba, + 0x788: 0xba, 0x789: 0xba, 0x78a: 0xba, 0x78b: 0xba, 0x78c: 0xba, 0x78d: 0xba, 0x78e: 0xba, 0x78f: 0xba, + 0x790: 0xba, 0x791: 0xba, 0x792: 0xba, 0x793: 0xba, 0x794: 0xba, 0x795: 0xba, 0x796: 0xba, 0x797: 0xba, + 0x798: 0xba, 0x799: 0xba, 0x79a: 0xba, 0x79b: 0xba, 0x79c: 0xba, 0x79d: 0xba, 0x79e: 0xba, 0x79f: 0xba, + 0x7a0: 0x76, 0x7a1: 0x77, 0x7a2: 0x78, 0x7a3: 0x17f, 0x7a4: 0x79, 0x7a5: 0x7a, 0x7a6: 0x180, 0x7a7: 0x7b, + 0x7a8: 0x7c, 0x7a9: 0xba, 0x7aa: 0xba, 0x7ab: 0xba, 0x7ac: 0xba, 0x7ad: 0xba, 0x7ae: 0xba, 0x7af: 0xba, + 0x7b0: 0xba, 0x7b1: 0xba, 0x7b2: 0xba, 0x7b3: 0xba, 0x7b4: 0xba, 0x7b5: 0xba, 0x7b6: 0xba, 0x7b7: 0xba, + 0x7b8: 0xba, 0x7b9: 0xba, 0x7ba: 0xba, 0x7bb: 0xba, 0x7bc: 0xba, 0x7bd: 0xba, 0x7be: 0xba, 0x7bf: 0xba, // Block 0x1f, offset 0x7c0 - 0x7c0: 0x0b, 0x7c1: 0x0b, 0x7c2: 0x0b, 0x7c3: 0x0b, 0x7c4: 0x0b, 0x7c5: 0x0b, 0x7c6: 0x0b, 0x7c7: 0x0b, - 0x7c8: 0x0b, 0x7c9: 0x0b, 0x7ca: 0x0b, 0x7cb: 0x0b, 0x7cc: 0x0b, 0x7cd: 0x0b, 0x7ce: 0x0b, 0x7cf: 0x0b, - 0x7d0: 0x0b, 0x7d1: 0x0b, 0x7d2: 0x0b, 0x7d3: 0x0b, 0x7d4: 0x0b, 0x7d5: 0x0b, 0x7d6: 0x0b, 0x7d7: 0x0b, - 0x7d8: 0x0b, 0x7d9: 0x0b, 0x7da: 0x0b, 0x7db: 0x0b, 0x7dc: 0x0b, 0x7dd: 0x0b, 0x7de: 0x0b, 0x7df: 0x0b, - 0x7e0: 0x0b, 0x7e1: 0x0b, 0x7e2: 0x0b, 0x7e3: 0x0b, 0x7e4: 0x0b, 0x7e5: 0x0b, 0x7e6: 0x0b, 0x7e7: 0x0b, - 0x7e8: 0x0b, 0x7e9: 0x0b, 0x7ea: 0x0b, 0x7eb: 0x0b, 0x7ec: 0x0b, 0x7ed: 0x0b, 0x7ee: 0x0b, 0x7ef: 0x0b, + 0x7d0: 0x0d, 0x7d1: 0x0e, 0x7d2: 0x0f, 0x7d3: 0x10, 0x7d4: 0x11, 0x7d5: 0x0b, 0x7d6: 0x12, 0x7d7: 0x07, + 0x7d8: 0x13, 0x7d9: 0x0b, 0x7da: 0x0b, 0x7db: 0x14, 0x7dc: 0x0b, 0x7dd: 0x15, 0x7de: 0x16, 0x7df: 0x17, + 0x7e0: 0x07, 0x7e1: 0x07, 0x7e2: 0x07, 0x7e3: 0x07, 0x7e4: 0x07, 0x7e5: 0x07, 0x7e6: 0x07, 0x7e7: 0x07, + 0x7e8: 0x07, 0x7e9: 0x07, 0x7ea: 0x18, 0x7eb: 0x19, 0x7ec: 0x1a, 0x7ed: 0x07, 0x7ee: 0x1b, 0x7ef: 0x1c, 0x7f0: 0x0b, 0x7f1: 0x0b, 0x7f2: 0x0b, 0x7f3: 0x0b, 0x7f4: 0x0b, 0x7f5: 0x0b, 0x7f6: 0x0b, 0x7f7: 0x0b, 0x7f8: 0x0b, 0x7f9: 0x0b, 0x7fa: 0x0b, 0x7fb: 0x0b, 0x7fc: 0x0b, 0x7fd: 0x0b, 0x7fe: 0x0b, 0x7ff: 0x0b, // Block 0x20, offset 0x800 - 0x800: 0x177, 0x801: 0x178, 0x802: 0xb8, 0x803: 0xb8, 0x804: 0x179, 0x805: 0x179, 0x806: 0x179, 0x807: 0x17a, - 0x808: 0xb8, 0x809: 0xb8, 0x80a: 0xb8, 0x80b: 0xb8, 0x80c: 0xb8, 0x80d: 0xb8, 0x80e: 0xb8, 0x80f: 0xb8, - 0x810: 0xb8, 0x811: 0xb8, 0x812: 0xb8, 0x813: 0xb8, 0x814: 0xb8, 0x815: 0xb8, 0x816: 0xb8, 0x817: 0xb8, - 0x818: 0xb8, 0x819: 0xb8, 0x81a: 0xb8, 0x81b: 0xb8, 0x81c: 0xb8, 0x81d: 0xb8, 0x81e: 0xb8, 0x81f: 0xb8, - 0x820: 0xb8, 0x821: 0xb8, 0x822: 0xb8, 0x823: 0xb8, 0x824: 0xb8, 0x825: 0xb8, 0x826: 0xb8, 0x827: 0xb8, - 0x828: 0xb8, 0x829: 0xb8, 0x82a: 0xb8, 0x82b: 0xb8, 0x82c: 0xb8, 0x82d: 0xb8, 0x82e: 0xb8, 0x82f: 0xb8, - 0x830: 0xb8, 0x831: 0xb8, 0x832: 0xb8, 0x833: 0xb8, 0x834: 0xb8, 0x835: 0xb8, 0x836: 0xb8, 0x837: 0xb8, - 0x838: 0xb8, 0x839: 0xb8, 0x83a: 0xb8, 0x83b: 0xb8, 0x83c: 0xb8, 0x83d: 0xb8, 0x83e: 0xb8, 0x83f: 0xb8, + 0x800: 0x0b, 0x801: 0x0b, 0x802: 0x0b, 0x803: 0x0b, 0x804: 0x0b, 0x805: 0x0b, 0x806: 0x0b, 0x807: 0x0b, + 0x808: 0x0b, 0x809: 0x0b, 0x80a: 0x0b, 0x80b: 0x0b, 0x80c: 0x0b, 0x80d: 0x0b, 0x80e: 0x0b, 0x80f: 0x0b, + 0x810: 0x0b, 0x811: 0x0b, 0x812: 0x0b, 0x813: 0x0b, 0x814: 0x0b, 0x815: 0x0b, 0x816: 0x0b, 0x817: 0x0b, + 0x818: 0x0b, 0x819: 0x0b, 0x81a: 0x0b, 0x81b: 0x0b, 0x81c: 0x0b, 0x81d: 0x0b, 0x81e: 0x0b, 0x81f: 0x0b, + 0x820: 0x0b, 0x821: 0x0b, 0x822: 0x0b, 0x823: 0x0b, 0x824: 0x0b, 0x825: 0x0b, 0x826: 0x0b, 0x827: 0x0b, + 0x828: 0x0b, 0x829: 0x0b, 0x82a: 0x0b, 0x82b: 0x0b, 0x82c: 0x0b, 0x82d: 0x0b, 0x82e: 0x0b, 0x82f: 0x0b, + 0x830: 0x0b, 0x831: 0x0b, 0x832: 0x0b, 0x833: 0x0b, 0x834: 0x0b, 0x835: 0x0b, 0x836: 0x0b, 0x837: 0x0b, + 0x838: 0x0b, 0x839: 0x0b, 0x83a: 0x0b, 0x83b: 0x0b, 0x83c: 0x0b, 0x83d: 0x0b, 0x83e: 0x0b, 0x83f: 0x0b, // Block 0x21, offset 0x840 - 0x840: 0x0b, 0x841: 0x0b, 0x842: 0x0b, 0x843: 0x0b, 0x844: 0x0b, 0x845: 0x0b, 0x846: 0x0b, 0x847: 0x0b, - 0x848: 0x0b, 0x849: 0x0b, 0x84a: 0x0b, 0x84b: 0x0b, 0x84c: 0x0b, 0x84d: 0x0b, 0x84e: 0x0b, 0x84f: 0x0b, - 0x850: 0x0b, 0x851: 0x0b, 0x852: 0x0b, 0x853: 0x0b, 0x854: 0x0b, 0x855: 0x0b, 0x856: 0x0b, 0x857: 0x0b, - 0x858: 0x0b, 0x859: 0x0b, 0x85a: 0x0b, 0x85b: 0x0b, 0x85c: 0x0b, 0x85d: 0x0b, 0x85e: 0x0b, 0x85f: 0x0b, - 0x860: 0x1e, 0x861: 0x0b, 0x862: 0x0b, 0x863: 0x0b, 0x864: 0x0b, 0x865: 0x0b, 0x866: 0x0b, 0x867: 0x0b, - 0x868: 0x0b, 0x869: 0x0b, 0x86a: 0x0b, 0x86b: 0x0b, 0x86c: 0x0b, 0x86d: 0x0b, 0x86e: 0x0b, 0x86f: 0x0b, - 0x870: 0x0b, 0x871: 0x0b, 0x872: 0x0b, 0x873: 0x0b, 0x874: 0x0b, 0x875: 0x0b, 0x876: 0x0b, 0x877: 0x0b, - 0x878: 0x0b, 0x879: 0x0b, 0x87a: 0x0b, 0x87b: 0x0b, 0x87c: 0x0b, 0x87d: 0x0b, 0x87e: 0x0b, 0x87f: 0x0b, + 0x840: 0x181, 0x841: 0x182, 0x842: 0xba, 0x843: 0xba, 0x844: 0x183, 0x845: 0x183, 0x846: 0x183, 0x847: 0x184, + 0x848: 0xba, 0x849: 0xba, 0x84a: 0xba, 0x84b: 0xba, 0x84c: 0xba, 0x84d: 0xba, 0x84e: 0xba, 0x84f: 0xba, + 0x850: 0xba, 0x851: 0xba, 0x852: 0xba, 0x853: 0xba, 0x854: 0xba, 0x855: 0xba, 0x856: 0xba, 0x857: 0xba, + 0x858: 0xba, 0x859: 0xba, 0x85a: 0xba, 0x85b: 0xba, 0x85c: 0xba, 0x85d: 0xba, 0x85e: 0xba, 0x85f: 0xba, + 0x860: 0xba, 0x861: 0xba, 0x862: 0xba, 0x863: 0xba, 0x864: 0xba, 0x865: 0xba, 0x866: 0xba, 0x867: 0xba, + 0x868: 0xba, 0x869: 0xba, 0x86a: 0xba, 0x86b: 0xba, 0x86c: 0xba, 0x86d: 0xba, 0x86e: 0xba, 0x86f: 0xba, + 0x870: 0xba, 0x871: 0xba, 0x872: 0xba, 0x873: 0xba, 0x874: 0xba, 0x875: 0xba, 0x876: 0xba, 0x877: 0xba, + 0x878: 0xba, 0x879: 0xba, 0x87a: 0xba, 0x87b: 0xba, 0x87c: 0xba, 0x87d: 0xba, 0x87e: 0xba, 0x87f: 0xba, // Block 0x22, offset 0x880 0x880: 0x0b, 0x881: 0x0b, 0x882: 0x0b, 0x883: 0x0b, 0x884: 0x0b, 0x885: 0x0b, 0x886: 0x0b, 0x887: 0x0b, 0x888: 0x0b, 0x889: 0x0b, 0x88a: 0x0b, 0x88b: 0x0b, 0x88c: 0x0b, 0x88d: 0x0b, 0x88e: 0x0b, 0x88f: 0x0b, + 0x890: 0x0b, 0x891: 0x0b, 0x892: 0x0b, 0x893: 0x0b, 0x894: 0x0b, 0x895: 0x0b, 0x896: 0x0b, 0x897: 0x0b, + 0x898: 0x0b, 0x899: 0x0b, 0x89a: 0x0b, 0x89b: 0x0b, 0x89c: 0x0b, 0x89d: 0x0b, 0x89e: 0x0b, 0x89f: 0x0b, + 0x8a0: 0x1f, 0x8a1: 0x0b, 0x8a2: 0x0b, 0x8a3: 0x0b, 0x8a4: 0x0b, 0x8a5: 0x0b, 0x8a6: 0x0b, 0x8a7: 0x0b, + 0x8a8: 0x0b, 0x8a9: 0x0b, 0x8aa: 0x0b, 0x8ab: 0x0b, 0x8ac: 0x0b, 0x8ad: 0x0b, 0x8ae: 0x0b, 0x8af: 0x0b, + 0x8b0: 0x0b, 0x8b1: 0x0b, 0x8b2: 0x0b, 0x8b3: 0x0b, 0x8b4: 0x0b, 0x8b5: 0x0b, 0x8b6: 0x0b, 0x8b7: 0x0b, + 0x8b8: 0x0b, 0x8b9: 0x0b, 0x8ba: 0x0b, 0x8bb: 0x0b, 0x8bc: 0x0b, 0x8bd: 0x0b, 0x8be: 0x0b, 0x8bf: 0x0b, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x0b, 0x8c1: 0x0b, 0x8c2: 0x0b, 0x8c3: 0x0b, 0x8c4: 0x0b, 0x8c5: 0x0b, 0x8c6: 0x0b, 0x8c7: 0x0b, + 0x8c8: 0x0b, 0x8c9: 0x0b, 0x8ca: 0x0b, 0x8cb: 0x0b, 0x8cc: 0x0b, 0x8cd: 0x0b, 0x8ce: 0x0b, 0x8cf: 0x0b, } -// idnaSparseOffset: 256 entries, 512 bytes -var idnaSparseOffset = []uint16{0x0, 0x8, 0x19, 0x25, 0x27, 0x2c, 0x34, 0x3f, 0x4b, 0x5c, 0x60, 0x6f, 0x74, 0x7b, 0x87, 0x95, 0xa3, 0xa8, 0xb1, 0xc1, 0xcf, 0xdc, 0xe8, 0xf9, 0x103, 0x10a, 0x117, 0x128, 0x12f, 0x13a, 0x149, 0x157, 0x161, 0x163, 0x167, 0x169, 0x175, 0x180, 0x188, 0x18e, 0x194, 0x199, 0x19e, 0x1a1, 0x1a5, 0x1ab, 0x1b0, 0x1bc, 0x1c6, 0x1cc, 0x1dd, 0x1e7, 0x1ea, 0x1f2, 0x1f5, 0x202, 0x20a, 0x20e, 0x215, 0x21d, 0x22d, 0x239, 0x23b, 0x245, 0x251, 0x25d, 0x269, 0x271, 0x276, 0x280, 0x291, 0x295, 0x2a0, 0x2a4, 0x2ad, 0x2b5, 0x2bb, 0x2c0, 0x2c3, 0x2c6, 0x2ca, 0x2d0, 0x2d4, 0x2d8, 0x2de, 0x2e5, 0x2eb, 0x2f3, 0x2fa, 0x305, 0x30f, 0x313, 0x316, 0x31c, 0x320, 0x322, 0x325, 0x327, 0x32a, 0x334, 0x337, 0x346, 0x34a, 0x34f, 0x352, 0x356, 0x35b, 0x360, 0x366, 0x36c, 0x37b, 0x381, 0x385, 0x394, 0x399, 0x3a1, 0x3ab, 0x3b6, 0x3be, 0x3cf, 0x3d8, 0x3e8, 0x3f5, 0x3ff, 0x404, 0x411, 0x415, 0x41a, 0x41c, 0x420, 0x422, 0x426, 0x42f, 0x435, 0x439, 0x449, 0x453, 0x458, 0x45b, 0x461, 0x468, 0x46d, 0x471, 0x477, 0x47c, 0x485, 0x48a, 0x490, 0x497, 0x49e, 0x4a5, 0x4a9, 0x4ae, 0x4b1, 0x4b6, 0x4c2, 0x4c8, 0x4cd, 0x4d4, 0x4dc, 0x4e1, 0x4e5, 0x4f5, 0x4fc, 0x500, 0x504, 0x50b, 0x50e, 0x511, 0x515, 0x519, 0x51f, 0x528, 0x534, 0x53b, 0x544, 0x54c, 0x553, 0x561, 0x56e, 0x57b, 0x584, 0x588, 0x596, 0x59e, 0x5a9, 0x5b2, 0x5b8, 0x5c0, 0x5c9, 0x5d3, 0x5d6, 0x5e2, 0x5e5, 0x5ea, 0x5ed, 0x5f7, 0x600, 0x60c, 0x60f, 0x614, 0x617, 0x61a, 0x61d, 0x624, 0x62b, 0x62f, 0x63a, 0x63d, 0x643, 0x648, 0x64c, 0x64f, 0x652, 0x655, 0x65a, 0x664, 0x667, 0x66b, 0x67a, 0x686, 0x68a, 0x68f, 0x694, 0x698, 0x69d, 0x6a6, 0x6b1, 0x6b7, 0x6bf, 0x6c3, 0x6c7, 0x6cd, 0x6d3, 0x6d8, 0x6db, 0x6e9, 0x6f0, 0x6f3, 0x6f6, 0x6fa, 0x700, 0x705, 0x70f, 0x714, 0x717, 0x71a, 0x71d, 0x720, 0x724, 0x727, 0x737, 0x748, 0x74d, 0x74f, 0x751} +// idnaSparseOffset: 264 entries, 528 bytes +var idnaSparseOffset = []uint16{0x0, 0x8, 0x19, 0x25, 0x27, 0x2c, 0x34, 0x3f, 0x4b, 0x4f, 0x5e, 0x63, 0x6b, 0x77, 0x85, 0x8a, 0x93, 0xa3, 0xb1, 0xbd, 0xc9, 0xda, 0xe4, 0xeb, 0xf8, 0x109, 0x110, 0x11b, 0x12a, 0x138, 0x142, 0x144, 0x149, 0x14c, 0x14f, 0x151, 0x15d, 0x168, 0x170, 0x176, 0x17c, 0x181, 0x186, 0x189, 0x18d, 0x193, 0x198, 0x1a4, 0x1ae, 0x1b4, 0x1c5, 0x1cf, 0x1d2, 0x1da, 0x1dd, 0x1ea, 0x1f2, 0x1f6, 0x1fd, 0x205, 0x215, 0x221, 0x223, 0x22d, 0x239, 0x245, 0x251, 0x259, 0x25e, 0x268, 0x279, 0x27d, 0x288, 0x28c, 0x295, 0x29d, 0x2a3, 0x2a8, 0x2ab, 0x2af, 0x2b5, 0x2b9, 0x2bd, 0x2c3, 0x2ca, 0x2d0, 0x2d8, 0x2df, 0x2ea, 0x2f4, 0x2f8, 0x2fb, 0x301, 0x305, 0x307, 0x30a, 0x30c, 0x30f, 0x319, 0x31c, 0x32b, 0x32f, 0x334, 0x337, 0x33b, 0x340, 0x345, 0x34b, 0x351, 0x360, 0x366, 0x36a, 0x379, 0x37e, 0x386, 0x390, 0x39b, 0x3a3, 0x3b4, 0x3bd, 0x3cd, 0x3da, 0x3e4, 0x3e9, 0x3f6, 0x3fa, 0x3ff, 0x401, 0x405, 0x407, 0x40b, 0x414, 0x41a, 0x41e, 0x42e, 0x438, 0x43d, 0x440, 0x446, 0x44d, 0x452, 0x456, 0x45c, 0x461, 0x46a, 0x46f, 0x475, 0x47c, 0x483, 0x48a, 0x48e, 0x493, 0x496, 0x49b, 0x4a7, 0x4ad, 0x4b2, 0x4b9, 0x4c1, 0x4c6, 0x4ca, 0x4da, 0x4e1, 0x4e5, 0x4e9, 0x4f0, 0x4f2, 0x4f5, 0x4f8, 0x4fc, 0x500, 0x506, 0x50f, 0x51b, 0x522, 0x52b, 0x533, 0x53a, 0x548, 0x555, 0x562, 0x56b, 0x56f, 0x57d, 0x585, 0x590, 0x599, 0x59f, 0x5a7, 0x5b0, 0x5ba, 0x5bd, 0x5c9, 0x5cc, 0x5d1, 0x5de, 0x5e7, 0x5f3, 0x5f6, 0x600, 0x609, 0x615, 0x622, 0x62a, 0x62d, 0x632, 0x635, 0x638, 0x63b, 0x642, 0x649, 0x64d, 0x658, 0x65b, 0x661, 0x666, 0x66a, 0x66d, 0x670, 0x673, 0x676, 0x679, 0x67e, 0x688, 0x68b, 0x68f, 0x69e, 0x6aa, 0x6ae, 0x6b3, 0x6b8, 0x6bc, 0x6c1, 0x6ca, 0x6d5, 0x6db, 0x6e3, 0x6e7, 0x6eb, 0x6f1, 0x6f7, 0x6fc, 0x6ff, 0x70f, 0x716, 0x719, 0x71c, 0x720, 0x726, 0x72b, 0x730, 0x735, 0x738, 0x73d, 0x740, 0x743, 0x747, 0x74b, 0x74e, 0x75e, 0x76f, 0x774, 0x776, 0x778} -// idnaSparseValues: 1876 entries, 7504 bytes -var idnaSparseValues = [1876]valueRange{ +// idnaSparseValues: 1915 entries, 7660 bytes +var idnaSparseValues = [1915]valueRange{ // Block 0x0, offset 0x0 {value: 0x0000, lo: 0x07}, {value: 0xe105, lo: 0x80, hi: 0x96}, @@ -2382,7 +2415,7 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0008, lo: 0xb9, hi: 0xbf}, // Block 0x3, offset 0x25 {value: 0x0000, lo: 0x01}, - {value: 0x1308, lo: 0x80, hi: 0xbf}, + {value: 0x3308, lo: 0x80, hi: 0xbf}, // Block 0x4, offset 0x27 {value: 0x0000, lo: 0x04}, {value: 0x03f5, lo: 0x80, hi: 0x8f}, @@ -2407,155 +2440,123 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0040, lo: 0x8b, hi: 0x8c}, {value: 0x0018, lo: 0x8d, hi: 0x8f}, {value: 0x0040, lo: 0x90, hi: 0x90}, - {value: 0x1308, lo: 0x91, hi: 0xbd}, - {value: 0x0018, lo: 0xbe, hi: 0xbe}, - {value: 0x1308, lo: 0xbf, hi: 0xbf}, + {value: 0x3308, lo: 0x91, hi: 0xbd}, + {value: 0x0818, lo: 0xbe, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, // Block 0x7, offset 0x3f {value: 0x0000, lo: 0x0b}, - {value: 0x0018, lo: 0x80, hi: 0x80}, - {value: 0x1308, lo: 0x81, hi: 0x82}, - {value: 0x0018, lo: 0x83, hi: 0x83}, - {value: 0x1308, lo: 0x84, hi: 0x85}, - {value: 0x0018, lo: 0x86, hi: 0x86}, - {value: 0x1308, lo: 0x87, hi: 0x87}, + {value: 0x0818, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x82}, + {value: 0x0818, lo: 0x83, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x85}, + {value: 0x0818, lo: 0x86, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x87}, {value: 0x0040, lo: 0x88, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0xaa}, + {value: 0x0808, lo: 0x90, hi: 0xaa}, {value: 0x0040, lo: 0xab, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xb4}, + {value: 0x0808, lo: 0xb0, hi: 0xb4}, {value: 0x0040, lo: 0xb5, hi: 0xbf}, // Block 0x8, offset 0x4b - {value: 0x0000, lo: 0x10}, - {value: 0x0018, lo: 0x80, hi: 0x80}, - {value: 0x0208, lo: 0x81, hi: 0x87}, - {value: 0x0408, lo: 0x88, hi: 0x88}, - {value: 0x0208, lo: 0x89, hi: 0x8a}, - {value: 0x1308, lo: 0x8b, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xa9}, - {value: 0x0018, lo: 0xaa, hi: 0xad}, - {value: 0x0208, lo: 0xae, hi: 0xaf}, - {value: 0x1308, lo: 0xb0, hi: 0xb0}, - {value: 0x0408, lo: 0xb1, hi: 0xb3}, - {value: 0x0008, lo: 0xb4, hi: 0xb4}, - {value: 0x0429, lo: 0xb5, hi: 0xb5}, - {value: 0x0451, lo: 0xb6, hi: 0xb6}, - {value: 0x0479, lo: 0xb7, hi: 0xb7}, - {value: 0x04a1, lo: 0xb8, hi: 0xb8}, - {value: 0x0208, lo: 0xb9, hi: 0xbf}, - // Block 0x9, offset 0x5c {value: 0x0000, lo: 0x03}, - {value: 0x0208, lo: 0x80, hi: 0x87}, - {value: 0x0408, lo: 0x88, hi: 0x99}, - {value: 0x0208, lo: 0x9a, hi: 0xbf}, - // Block 0xa, offset 0x60 + {value: 0x0a08, lo: 0x80, hi: 0x87}, + {value: 0x0c08, lo: 0x88, hi: 0x99}, + {value: 0x0a08, lo: 0x9a, hi: 0xbf}, + // Block 0x9, offset 0x4f {value: 0x0000, lo: 0x0e}, - {value: 0x1308, lo: 0x80, hi: 0x8a}, + {value: 0x3308, lo: 0x80, hi: 0x8a}, {value: 0x0040, lo: 0x8b, hi: 0x8c}, - {value: 0x0408, lo: 0x8d, hi: 0x8d}, - {value: 0x0208, lo: 0x8e, hi: 0x98}, - {value: 0x0408, lo: 0x99, hi: 0x9b}, - {value: 0x0208, lo: 0x9c, hi: 0xaa}, - {value: 0x0408, lo: 0xab, hi: 0xac}, - {value: 0x0208, lo: 0xad, hi: 0xb0}, - {value: 0x0408, lo: 0xb1, hi: 0xb1}, - {value: 0x0208, lo: 0xb2, hi: 0xb2}, - {value: 0x0408, lo: 0xb3, hi: 0xb4}, - {value: 0x0208, lo: 0xb5, hi: 0xb7}, - {value: 0x0408, lo: 0xb8, hi: 0xb9}, - {value: 0x0208, lo: 0xba, hi: 0xbf}, - // Block 0xb, offset 0x6f + {value: 0x0c08, lo: 0x8d, hi: 0x8d}, + {value: 0x0a08, lo: 0x8e, hi: 0x98}, + {value: 0x0c08, lo: 0x99, hi: 0x9b}, + {value: 0x0a08, lo: 0x9c, hi: 0xaa}, + {value: 0x0c08, lo: 0xab, hi: 0xac}, + {value: 0x0a08, lo: 0xad, hi: 0xb0}, + {value: 0x0c08, lo: 0xb1, hi: 0xb1}, + {value: 0x0a08, lo: 0xb2, hi: 0xb2}, + {value: 0x0c08, lo: 0xb3, hi: 0xb4}, + {value: 0x0a08, lo: 0xb5, hi: 0xb7}, + {value: 0x0c08, lo: 0xb8, hi: 0xb9}, + {value: 0x0a08, lo: 0xba, hi: 0xbf}, + // Block 0xa, offset 0x5e {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0xa5}, - {value: 0x1308, lo: 0xa6, hi: 0xb0}, - {value: 0x0008, lo: 0xb1, hi: 0xb1}, + {value: 0x0808, lo: 0x80, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xb0}, + {value: 0x0808, lo: 0xb1, hi: 0xb1}, {value: 0x0040, lo: 0xb2, hi: 0xbf}, - // Block 0xc, offset 0x74 - {value: 0x0000, lo: 0x06}, - {value: 0x0008, lo: 0x80, hi: 0x89}, - {value: 0x0208, lo: 0x8a, hi: 0xaa}, - {value: 0x1308, lo: 0xab, hi: 0xb3}, - {value: 0x0008, lo: 0xb4, hi: 0xb5}, - {value: 0x0018, lo: 0xb6, hi: 0xba}, + // Block 0xb, offset 0x63 + {value: 0x0000, lo: 0x07}, + {value: 0x0808, lo: 0x80, hi: 0x89}, + {value: 0x0a08, lo: 0x8a, hi: 0xaa}, + {value: 0x3308, lo: 0xab, hi: 0xb3}, + {value: 0x0808, lo: 0xb4, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xb9}, + {value: 0x0818, lo: 0xba, hi: 0xba}, {value: 0x0040, lo: 0xbb, hi: 0xbf}, - // Block 0xd, offset 0x7b + // Block 0xc, offset 0x6b {value: 0x0000, lo: 0x0b}, - {value: 0x0008, lo: 0x80, hi: 0x95}, - {value: 0x1308, lo: 0x96, hi: 0x99}, - {value: 0x0008, lo: 0x9a, hi: 0x9a}, - {value: 0x1308, lo: 0x9b, hi: 0xa3}, - {value: 0x0008, lo: 0xa4, hi: 0xa4}, - {value: 0x1308, lo: 0xa5, hi: 0xa7}, - {value: 0x0008, lo: 0xa8, hi: 0xa8}, - {value: 0x1308, lo: 0xa9, hi: 0xad}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x3308, lo: 0x96, hi: 0x99}, + {value: 0x0808, lo: 0x9a, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0xa3}, + {value: 0x0808, lo: 0xa4, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa7}, + {value: 0x0808, lo: 0xa8, hi: 0xa8}, + {value: 0x3308, lo: 0xa9, hi: 0xad}, {value: 0x0040, lo: 0xae, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xbe}, + {value: 0x0818, lo: 0xb0, hi: 0xbe}, {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0xe, offset 0x87 - {value: 0x0000, lo: 0x0d}, - {value: 0x0408, lo: 0x80, hi: 0x80}, - {value: 0x0208, lo: 0x81, hi: 0x85}, - {value: 0x0408, lo: 0x86, hi: 0x87}, - {value: 0x0208, lo: 0x88, hi: 0x88}, - {value: 0x0408, lo: 0x89, hi: 0x89}, - {value: 0x0208, lo: 0x8a, hi: 0x93}, - {value: 0x0408, lo: 0x94, hi: 0x94}, - {value: 0x0208, lo: 0x95, hi: 0x95}, - {value: 0x0008, lo: 0x96, hi: 0x98}, - {value: 0x1308, lo: 0x99, hi: 0x9b}, - {value: 0x0040, lo: 0x9c, hi: 0x9d}, - {value: 0x0018, lo: 0x9e, hi: 0x9e}, - {value: 0x0040, lo: 0x9f, hi: 0xbf}, - // Block 0xf, offset 0x95 + // Block 0xd, offset 0x77 {value: 0x0000, lo: 0x0d}, {value: 0x0040, lo: 0x80, hi: 0x9f}, - {value: 0x0208, lo: 0xa0, hi: 0xa9}, - {value: 0x0408, lo: 0xaa, hi: 0xac}, - {value: 0x0008, lo: 0xad, hi: 0xad}, - {value: 0x0408, lo: 0xae, hi: 0xae}, - {value: 0x0208, lo: 0xaf, hi: 0xb0}, - {value: 0x0408, lo: 0xb1, hi: 0xb2}, - {value: 0x0208, lo: 0xb3, hi: 0xb4}, + {value: 0x0a08, lo: 0xa0, hi: 0xa9}, + {value: 0x0c08, lo: 0xaa, hi: 0xac}, + {value: 0x0808, lo: 0xad, hi: 0xad}, + {value: 0x0c08, lo: 0xae, hi: 0xae}, + {value: 0x0a08, lo: 0xaf, hi: 0xb0}, + {value: 0x0c08, lo: 0xb1, hi: 0xb2}, + {value: 0x0a08, lo: 0xb3, hi: 0xb4}, {value: 0x0040, lo: 0xb5, hi: 0xb5}, - {value: 0x0208, lo: 0xb6, hi: 0xb8}, - {value: 0x0408, lo: 0xb9, hi: 0xb9}, - {value: 0x0208, lo: 0xba, hi: 0xbd}, + {value: 0x0a08, lo: 0xb6, hi: 0xb8}, + {value: 0x0c08, lo: 0xb9, hi: 0xb9}, + {value: 0x0a08, lo: 0xba, hi: 0xbd}, {value: 0x0040, lo: 0xbe, hi: 0xbf}, - // Block 0x10, offset 0xa3 + // Block 0xe, offset 0x85 {value: 0x0000, lo: 0x04}, {value: 0x0040, lo: 0x80, hi: 0x93}, - {value: 0x1308, lo: 0x94, hi: 0xa1}, - {value: 0x0040, lo: 0xa2, hi: 0xa2}, - {value: 0x1308, lo: 0xa3, hi: 0xbf}, - // Block 0x11, offset 0xa8 + {value: 0x3308, lo: 0x94, hi: 0xa1}, + {value: 0x0840, lo: 0xa2, hi: 0xa2}, + {value: 0x3308, lo: 0xa3, hi: 0xbf}, + // Block 0xf, offset 0x8a {value: 0x0000, lo: 0x08}, - {value: 0x1308, lo: 0x80, hi: 0x82}, - {value: 0x1008, lo: 0x83, hi: 0x83}, + {value: 0x3308, lo: 0x80, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, {value: 0x0008, lo: 0x84, hi: 0xb9}, - {value: 0x1308, lo: 0xba, hi: 0xba}, - {value: 0x1008, lo: 0xbb, hi: 0xbb}, - {value: 0x1308, lo: 0xbc, hi: 0xbc}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, {value: 0x0008, lo: 0xbd, hi: 0xbd}, - {value: 0x1008, lo: 0xbe, hi: 0xbf}, - // Block 0x12, offset 0xb1 + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x10, offset 0x93 {value: 0x0000, lo: 0x0f}, - {value: 0x1308, lo: 0x80, hi: 0x80}, - {value: 0x1008, lo: 0x81, hi: 0x82}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3008, lo: 0x81, hi: 0x82}, {value: 0x0040, lo: 0x83, hi: 0x85}, - {value: 0x1008, lo: 0x86, hi: 0x88}, + {value: 0x3008, lo: 0x86, hi: 0x88}, {value: 0x0040, lo: 0x89, hi: 0x89}, - {value: 0x1008, lo: 0x8a, hi: 0x8c}, - {value: 0x1b08, lo: 0x8d, hi: 0x8d}, + {value: 0x3008, lo: 0x8a, hi: 0x8c}, + {value: 0x3b08, lo: 0x8d, hi: 0x8d}, {value: 0x0040, lo: 0x8e, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x90}, {value: 0x0040, lo: 0x91, hi: 0x96}, - {value: 0x1008, lo: 0x97, hi: 0x97}, + {value: 0x3008, lo: 0x97, hi: 0x97}, {value: 0x0040, lo: 0x98, hi: 0xa5}, {value: 0x0008, lo: 0xa6, hi: 0xaf}, {value: 0x0018, lo: 0xb0, hi: 0xba}, {value: 0x0040, lo: 0xbb, hi: 0xbf}, - // Block 0x13, offset 0xc1 + // Block 0x11, offset 0xa3 {value: 0x0000, lo: 0x0d}, - {value: 0x1308, lo: 0x80, hi: 0x80}, - {value: 0x1008, lo: 0x81, hi: 0x83}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3008, lo: 0x81, hi: 0x83}, {value: 0x0040, lo: 0x84, hi: 0x84}, {value: 0x0008, lo: 0x85, hi: 0x8c}, {value: 0x0040, lo: 0x8d, hi: 0x8d}, @@ -2566,25 +2567,24 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0008, lo: 0xaa, hi: 0xb9}, {value: 0x0040, lo: 0xba, hi: 0xbc}, {value: 0x0008, lo: 0xbd, hi: 0xbd}, - {value: 0x1308, lo: 0xbe, hi: 0xbf}, - // Block 0x14, offset 0xcf - {value: 0x0000, lo: 0x0c}, - {value: 0x0040, lo: 0x80, hi: 0x80}, - {value: 0x1308, lo: 0x81, hi: 0x81}, - {value: 0x1008, lo: 0x82, hi: 0x83}, + {value: 0x3308, lo: 0xbe, hi: 0xbf}, + // Block 0x12, offset 0xb1 + {value: 0x0000, lo: 0x0b}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x83}, {value: 0x0040, lo: 0x84, hi: 0x84}, {value: 0x0008, lo: 0x85, hi: 0x8c}, {value: 0x0040, lo: 0x8d, hi: 0x8d}, {value: 0x0008, lo: 0x8e, hi: 0x90}, {value: 0x0040, lo: 0x91, hi: 0x91}, {value: 0x0008, lo: 0x92, hi: 0xba}, - {value: 0x0040, lo: 0xbb, hi: 0xbc}, + {value: 0x3b08, lo: 0xbb, hi: 0xbc}, {value: 0x0008, lo: 0xbd, hi: 0xbd}, - {value: 0x1008, lo: 0xbe, hi: 0xbf}, - // Block 0x15, offset 0xdc + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x13, offset 0xbd {value: 0x0000, lo: 0x0b}, {value: 0x0040, lo: 0x80, hi: 0x81}, - {value: 0x1008, lo: 0x82, hi: 0x83}, + {value: 0x3008, lo: 0x82, hi: 0x83}, {value: 0x0040, lo: 0x84, hi: 0x84}, {value: 0x0008, lo: 0x85, hi: 0x96}, {value: 0x0040, lo: 0x97, hi: 0x99}, @@ -2594,50 +2594,50 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0040, lo: 0xbc, hi: 0xbc}, {value: 0x0008, lo: 0xbd, hi: 0xbd}, {value: 0x0040, lo: 0xbe, hi: 0xbf}, - // Block 0x16, offset 0xe8 + // Block 0x14, offset 0xc9 {value: 0x0000, lo: 0x10}, {value: 0x0008, lo: 0x80, hi: 0x86}, {value: 0x0040, lo: 0x87, hi: 0x89}, - {value: 0x1b08, lo: 0x8a, hi: 0x8a}, + {value: 0x3b08, lo: 0x8a, hi: 0x8a}, {value: 0x0040, lo: 0x8b, hi: 0x8e}, - {value: 0x1008, lo: 0x8f, hi: 0x91}, - {value: 0x1308, lo: 0x92, hi: 0x94}, + {value: 0x3008, lo: 0x8f, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0x94}, {value: 0x0040, lo: 0x95, hi: 0x95}, - {value: 0x1308, lo: 0x96, hi: 0x96}, + {value: 0x3308, lo: 0x96, hi: 0x96}, {value: 0x0040, lo: 0x97, hi: 0x97}, - {value: 0x1008, lo: 0x98, hi: 0x9f}, + {value: 0x3008, lo: 0x98, hi: 0x9f}, {value: 0x0040, lo: 0xa0, hi: 0xa5}, {value: 0x0008, lo: 0xa6, hi: 0xaf}, {value: 0x0040, lo: 0xb0, hi: 0xb1}, - {value: 0x1008, lo: 0xb2, hi: 0xb3}, + {value: 0x3008, lo: 0xb2, hi: 0xb3}, {value: 0x0018, lo: 0xb4, hi: 0xb4}, {value: 0x0040, lo: 0xb5, hi: 0xbf}, - // Block 0x17, offset 0xf9 + // Block 0x15, offset 0xda {value: 0x0000, lo: 0x09}, {value: 0x0040, lo: 0x80, hi: 0x80}, {value: 0x0008, lo: 0x81, hi: 0xb0}, - {value: 0x1308, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb1, hi: 0xb1}, {value: 0x0008, lo: 0xb2, hi: 0xb2}, {value: 0x08f1, lo: 0xb3, hi: 0xb3}, - {value: 0x1308, lo: 0xb4, hi: 0xb9}, - {value: 0x1b08, lo: 0xba, hi: 0xba}, + {value: 0x3308, lo: 0xb4, hi: 0xb9}, + {value: 0x3b08, lo: 0xba, hi: 0xba}, {value: 0x0040, lo: 0xbb, hi: 0xbe}, {value: 0x0018, lo: 0xbf, hi: 0xbf}, - // Block 0x18, offset 0x103 + // Block 0x16, offset 0xe4 {value: 0x0000, lo: 0x06}, {value: 0x0008, lo: 0x80, hi: 0x86}, - {value: 0x1308, lo: 0x87, hi: 0x8e}, + {value: 0x3308, lo: 0x87, hi: 0x8e}, {value: 0x0018, lo: 0x8f, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x99}, {value: 0x0018, lo: 0x9a, hi: 0x9b}, {value: 0x0040, lo: 0x9c, hi: 0xbf}, - // Block 0x19, offset 0x10a + // Block 0x17, offset 0xeb {value: 0x0000, lo: 0x0c}, {value: 0x0008, lo: 0x80, hi: 0x84}, {value: 0x0040, lo: 0x85, hi: 0x85}, {value: 0x0008, lo: 0x86, hi: 0x86}, {value: 0x0040, lo: 0x87, hi: 0x87}, - {value: 0x1308, lo: 0x88, hi: 0x8d}, + {value: 0x3308, lo: 0x88, hi: 0x8d}, {value: 0x0040, lo: 0x8e, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x99}, {value: 0x0040, lo: 0x9a, hi: 0x9b}, @@ -2645,76 +2645,76 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0999, lo: 0x9d, hi: 0x9d}, {value: 0x0008, lo: 0x9e, hi: 0x9f}, {value: 0x0040, lo: 0xa0, hi: 0xbf}, - // Block 0x1a, offset 0x117 + // Block 0x18, offset 0xf8 {value: 0x0000, lo: 0x10}, {value: 0x0008, lo: 0x80, hi: 0x80}, {value: 0x0018, lo: 0x81, hi: 0x8a}, {value: 0x0008, lo: 0x8b, hi: 0x8b}, {value: 0xe03d, lo: 0x8c, hi: 0x8c}, {value: 0x0018, lo: 0x8d, hi: 0x97}, - {value: 0x1308, lo: 0x98, hi: 0x99}, + {value: 0x3308, lo: 0x98, hi: 0x99}, {value: 0x0018, lo: 0x9a, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xa9}, {value: 0x0018, lo: 0xaa, hi: 0xb4}, - {value: 0x1308, lo: 0xb5, hi: 0xb5}, + {value: 0x3308, lo: 0xb5, hi: 0xb5}, {value: 0x0018, lo: 0xb6, hi: 0xb6}, - {value: 0x1308, lo: 0xb7, hi: 0xb7}, + {value: 0x3308, lo: 0xb7, hi: 0xb7}, {value: 0x0018, lo: 0xb8, hi: 0xb8}, - {value: 0x1308, lo: 0xb9, hi: 0xb9}, + {value: 0x3308, lo: 0xb9, hi: 0xb9}, {value: 0x0018, lo: 0xba, hi: 0xbd}, - {value: 0x1008, lo: 0xbe, hi: 0xbf}, - // Block 0x1b, offset 0x128 + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x19, offset 0x109 {value: 0x0000, lo: 0x06}, {value: 0x0018, lo: 0x80, hi: 0x85}, - {value: 0x1308, lo: 0x86, hi: 0x86}, + {value: 0x3308, lo: 0x86, hi: 0x86}, {value: 0x0018, lo: 0x87, hi: 0x8c}, {value: 0x0040, lo: 0x8d, hi: 0x8d}, {value: 0x0018, lo: 0x8e, hi: 0x9a}, {value: 0x0040, lo: 0x9b, hi: 0xbf}, - // Block 0x1c, offset 0x12f + // Block 0x1a, offset 0x110 {value: 0x0000, lo: 0x0a}, {value: 0x0008, lo: 0x80, hi: 0xaa}, - {value: 0x1008, lo: 0xab, hi: 0xac}, - {value: 0x1308, lo: 0xad, hi: 0xb0}, - {value: 0x1008, lo: 0xb1, hi: 0xb1}, - {value: 0x1308, lo: 0xb2, hi: 0xb7}, - {value: 0x1008, lo: 0xb8, hi: 0xb8}, - {value: 0x1b08, lo: 0xb9, hi: 0xba}, - {value: 0x1008, lo: 0xbb, hi: 0xbc}, - {value: 0x1308, lo: 0xbd, hi: 0xbe}, + {value: 0x3008, lo: 0xab, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xb0}, + {value: 0x3008, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb7}, + {value: 0x3008, lo: 0xb8, hi: 0xb8}, + {value: 0x3b08, lo: 0xb9, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbe}, {value: 0x0008, lo: 0xbf, hi: 0xbf}, - // Block 0x1d, offset 0x13a + // Block 0x1b, offset 0x11b {value: 0x0000, lo: 0x0e}, {value: 0x0008, lo: 0x80, hi: 0x89}, {value: 0x0018, lo: 0x8a, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x95}, - {value: 0x1008, lo: 0x96, hi: 0x97}, - {value: 0x1308, lo: 0x98, hi: 0x99}, + {value: 0x3008, lo: 0x96, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x99}, {value: 0x0008, lo: 0x9a, hi: 0x9d}, - {value: 0x1308, lo: 0x9e, hi: 0xa0}, + {value: 0x3308, lo: 0x9e, hi: 0xa0}, {value: 0x0008, lo: 0xa1, hi: 0xa1}, - {value: 0x1008, lo: 0xa2, hi: 0xa4}, + {value: 0x3008, lo: 0xa2, hi: 0xa4}, {value: 0x0008, lo: 0xa5, hi: 0xa6}, - {value: 0x1008, lo: 0xa7, hi: 0xad}, + {value: 0x3008, lo: 0xa7, hi: 0xad}, {value: 0x0008, lo: 0xae, hi: 0xb0}, - {value: 0x1308, lo: 0xb1, hi: 0xb4}, + {value: 0x3308, lo: 0xb1, hi: 0xb4}, {value: 0x0008, lo: 0xb5, hi: 0xbf}, - // Block 0x1e, offset 0x149 + // Block 0x1c, offset 0x12a {value: 0x0000, lo: 0x0d}, {value: 0x0008, lo: 0x80, hi: 0x81}, - {value: 0x1308, lo: 0x82, hi: 0x82}, - {value: 0x1008, lo: 0x83, hi: 0x84}, - {value: 0x1308, lo: 0x85, hi: 0x86}, - {value: 0x1008, lo: 0x87, hi: 0x8c}, - {value: 0x1308, lo: 0x8d, hi: 0x8d}, + {value: 0x3308, lo: 0x82, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x86}, + {value: 0x3008, lo: 0x87, hi: 0x8c}, + {value: 0x3308, lo: 0x8d, hi: 0x8d}, {value: 0x0008, lo: 0x8e, hi: 0x8e}, - {value: 0x1008, lo: 0x8f, hi: 0x8f}, + {value: 0x3008, lo: 0x8f, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x1008, lo: 0x9a, hi: 0x9c}, - {value: 0x1308, lo: 0x9d, hi: 0x9d}, + {value: 0x3008, lo: 0x9a, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9d}, {value: 0x0018, lo: 0x9e, hi: 0x9f}, {value: 0x0040, lo: 0xa0, hi: 0xbf}, - // Block 0x1f, offset 0x157 + // Block 0x1d, offset 0x138 {value: 0x0000, lo: 0x09}, {value: 0x0040, lo: 0x80, hi: 0x86}, {value: 0x055d, lo: 0x87, hi: 0x87}, @@ -2725,18 +2725,27 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0018, lo: 0xbb, hi: 0xbb}, {value: 0xe105, lo: 0xbc, hi: 0xbc}, {value: 0x0008, lo: 0xbd, hi: 0xbf}, - // Block 0x20, offset 0x161 + // Block 0x1e, offset 0x142 {value: 0x0000, lo: 0x01}, {value: 0x0018, lo: 0x80, hi: 0xbf}, - // Block 0x21, offset 0x163 - {value: 0x0000, lo: 0x03}, + // Block 0x1f, offset 0x144 + {value: 0x0000, lo: 0x04}, {value: 0x0018, lo: 0x80, hi: 0x9e}, {value: 0x0040, lo: 0x9f, hi: 0xa0}, - {value: 0x0018, lo: 0xa1, hi: 0xbf}, - // Block 0x22, offset 0x167 + {value: 0x2018, lo: 0xa1, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xbf}, + // Block 0x20, offset 0x149 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xa7}, + {value: 0x2018, lo: 0xa8, hi: 0xbf}, + // Block 0x21, offset 0x14c + {value: 0x0000, lo: 0x02}, + {value: 0x2018, lo: 0x80, hi: 0x82}, + {value: 0x0018, lo: 0x83, hi: 0xbf}, + // Block 0x22, offset 0x14f {value: 0x0000, lo: 0x01}, {value: 0x0008, lo: 0x80, hi: 0xbf}, - // Block 0x23, offset 0x169 + // Block 0x23, offset 0x151 {value: 0x0000, lo: 0x0b}, {value: 0x0008, lo: 0x80, hi: 0x88}, {value: 0x0040, lo: 0x89, hi: 0x89}, @@ -2749,7 +2758,7 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0008, lo: 0x9a, hi: 0x9d}, {value: 0x0040, lo: 0x9e, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xbf}, - // Block 0x24, offset 0x175 + // Block 0x24, offset 0x15d {value: 0x0000, lo: 0x0a}, {value: 0x0008, lo: 0x80, hi: 0x88}, {value: 0x0040, lo: 0x89, hi: 0x89}, @@ -2761,7 +2770,7 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0040, lo: 0xb6, hi: 0xb7}, {value: 0x0008, lo: 0xb8, hi: 0xbe}, {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0x25, offset 0x180 + // Block 0x25, offset 0x168 {value: 0x0000, lo: 0x07}, {value: 0x0008, lo: 0x80, hi: 0x80}, {value: 0x0040, lo: 0x81, hi: 0x81}, @@ -2770,146 +2779,146 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0008, lo: 0x88, hi: 0x96}, {value: 0x0040, lo: 0x97, hi: 0x97}, {value: 0x0008, lo: 0x98, hi: 0xbf}, - // Block 0x26, offset 0x188 + // Block 0x26, offset 0x170 {value: 0x0000, lo: 0x05}, {value: 0x0008, lo: 0x80, hi: 0x90}, {value: 0x0040, lo: 0x91, hi: 0x91}, {value: 0x0008, lo: 0x92, hi: 0x95}, {value: 0x0040, lo: 0x96, hi: 0x97}, {value: 0x0008, lo: 0x98, hi: 0xbf}, - // Block 0x27, offset 0x18e + // Block 0x27, offset 0x176 {value: 0x0000, lo: 0x05}, {value: 0x0008, lo: 0x80, hi: 0x9a}, {value: 0x0040, lo: 0x9b, hi: 0x9c}, - {value: 0x1308, lo: 0x9d, hi: 0x9f}, + {value: 0x3308, lo: 0x9d, hi: 0x9f}, {value: 0x0018, lo: 0xa0, hi: 0xbc}, {value: 0x0040, lo: 0xbd, hi: 0xbf}, - // Block 0x28, offset 0x194 + // Block 0x28, offset 0x17c {value: 0x0000, lo: 0x04}, {value: 0x0008, lo: 0x80, hi: 0x8f}, {value: 0x0018, lo: 0x90, hi: 0x99}, {value: 0x0040, lo: 0x9a, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xbf}, - // Block 0x29, offset 0x199 + // Block 0x29, offset 0x181 {value: 0x0000, lo: 0x04}, {value: 0x0008, lo: 0x80, hi: 0xb5}, {value: 0x0040, lo: 0xb6, hi: 0xb7}, {value: 0xe045, lo: 0xb8, hi: 0xbd}, {value: 0x0040, lo: 0xbe, hi: 0xbf}, - // Block 0x2a, offset 0x19e + // Block 0x2a, offset 0x186 {value: 0x0000, lo: 0x02}, {value: 0x0018, lo: 0x80, hi: 0x80}, {value: 0x0008, lo: 0x81, hi: 0xbf}, - // Block 0x2b, offset 0x1a1 + // Block 0x2b, offset 0x189 {value: 0x0000, lo: 0x03}, {value: 0x0008, lo: 0x80, hi: 0xac}, {value: 0x0018, lo: 0xad, hi: 0xae}, {value: 0x0008, lo: 0xaf, hi: 0xbf}, - // Block 0x2c, offset 0x1a5 + // Block 0x2c, offset 0x18d {value: 0x0000, lo: 0x05}, {value: 0x0040, lo: 0x80, hi: 0x80}, {value: 0x0008, lo: 0x81, hi: 0x9a}, {value: 0x0018, lo: 0x9b, hi: 0x9c}, {value: 0x0040, lo: 0x9d, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xbf}, - // Block 0x2d, offset 0x1ab + // Block 0x2d, offset 0x193 {value: 0x0000, lo: 0x04}, {value: 0x0008, lo: 0x80, hi: 0xaa}, {value: 0x0018, lo: 0xab, hi: 0xb0}, {value: 0x0008, lo: 0xb1, hi: 0xb8}, {value: 0x0040, lo: 0xb9, hi: 0xbf}, - // Block 0x2e, offset 0x1b0 + // Block 0x2e, offset 0x198 {value: 0x0000, lo: 0x0b}, {value: 0x0008, lo: 0x80, hi: 0x8c}, {value: 0x0040, lo: 0x8d, hi: 0x8d}, {value: 0x0008, lo: 0x8e, hi: 0x91}, - {value: 0x1308, lo: 0x92, hi: 0x93}, - {value: 0x1b08, lo: 0x94, hi: 0x94}, + {value: 0x3308, lo: 0x92, hi: 0x93}, + {value: 0x3b08, lo: 0x94, hi: 0x94}, {value: 0x0040, lo: 0x95, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xb1}, - {value: 0x1308, lo: 0xb2, hi: 0xb3}, - {value: 0x1b08, lo: 0xb4, hi: 0xb4}, + {value: 0x3308, lo: 0xb2, hi: 0xb3}, + {value: 0x3b08, lo: 0xb4, hi: 0xb4}, {value: 0x0018, lo: 0xb5, hi: 0xb6}, {value: 0x0040, lo: 0xb7, hi: 0xbf}, - // Block 0x2f, offset 0x1bc + // Block 0x2f, offset 0x1a4 {value: 0x0000, lo: 0x09}, {value: 0x0008, lo: 0x80, hi: 0x91}, - {value: 0x1308, lo: 0x92, hi: 0x93}, + {value: 0x3308, lo: 0x92, hi: 0x93}, {value: 0x0040, lo: 0x94, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xac}, {value: 0x0040, lo: 0xad, hi: 0xad}, {value: 0x0008, lo: 0xae, hi: 0xb0}, {value: 0x0040, lo: 0xb1, hi: 0xb1}, - {value: 0x1308, lo: 0xb2, hi: 0xb3}, + {value: 0x3308, lo: 0xb2, hi: 0xb3}, {value: 0x0040, lo: 0xb4, hi: 0xbf}, - // Block 0x30, offset 0x1c6 + // Block 0x30, offset 0x1ae {value: 0x0000, lo: 0x05}, {value: 0x0008, lo: 0x80, hi: 0xb3}, - {value: 0x1340, lo: 0xb4, hi: 0xb5}, - {value: 0x1008, lo: 0xb6, hi: 0xb6}, - {value: 0x1308, lo: 0xb7, hi: 0xbd}, - {value: 0x1008, lo: 0xbe, hi: 0xbf}, - // Block 0x31, offset 0x1cc + {value: 0x3340, lo: 0xb4, hi: 0xb5}, + {value: 0x3008, lo: 0xb6, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x31, offset 0x1b4 {value: 0x0000, lo: 0x10}, - {value: 0x1008, lo: 0x80, hi: 0x85}, - {value: 0x1308, lo: 0x86, hi: 0x86}, - {value: 0x1008, lo: 0x87, hi: 0x88}, - {value: 0x1308, lo: 0x89, hi: 0x91}, - {value: 0x1b08, lo: 0x92, hi: 0x92}, - {value: 0x1308, lo: 0x93, hi: 0x93}, + {value: 0x3008, lo: 0x80, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x86}, + {value: 0x3008, lo: 0x87, hi: 0x88}, + {value: 0x3308, lo: 0x89, hi: 0x91}, + {value: 0x3b08, lo: 0x92, hi: 0x92}, + {value: 0x3308, lo: 0x93, hi: 0x93}, {value: 0x0018, lo: 0x94, hi: 0x96}, {value: 0x0008, lo: 0x97, hi: 0x97}, {value: 0x0018, lo: 0x98, hi: 0x9b}, {value: 0x0008, lo: 0x9c, hi: 0x9c}, - {value: 0x1308, lo: 0x9d, hi: 0x9d}, + {value: 0x3308, lo: 0x9d, hi: 0x9d}, {value: 0x0040, lo: 0x9e, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xa9}, {value: 0x0040, lo: 0xaa, hi: 0xaf}, {value: 0x0018, lo: 0xb0, hi: 0xb9}, {value: 0x0040, lo: 0xba, hi: 0xbf}, - // Block 0x32, offset 0x1dd + // Block 0x32, offset 0x1c5 {value: 0x0000, lo: 0x09}, {value: 0x0018, lo: 0x80, hi: 0x85}, {value: 0x0040, lo: 0x86, hi: 0x86}, {value: 0x0218, lo: 0x87, hi: 0x87}, {value: 0x0018, lo: 0x88, hi: 0x8a}, - {value: 0x13c0, lo: 0x8b, hi: 0x8d}, + {value: 0x33c0, lo: 0x8b, hi: 0x8d}, {value: 0x0040, lo: 0x8e, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x99}, {value: 0x0040, lo: 0x9a, hi: 0x9f}, {value: 0x0208, lo: 0xa0, hi: 0xbf}, - // Block 0x33, offset 0x1e7 + // Block 0x33, offset 0x1cf {value: 0x0000, lo: 0x02}, {value: 0x0208, lo: 0x80, hi: 0xb7}, {value: 0x0040, lo: 0xb8, hi: 0xbf}, - // Block 0x34, offset 0x1ea + // Block 0x34, offset 0x1d2 {value: 0x0000, lo: 0x07}, {value: 0x0008, lo: 0x80, hi: 0x84}, - {value: 0x1308, lo: 0x85, hi: 0x86}, + {value: 0x3308, lo: 0x85, hi: 0x86}, {value: 0x0208, lo: 0x87, hi: 0xa8}, - {value: 0x1308, lo: 0xa9, hi: 0xa9}, + {value: 0x3308, lo: 0xa9, hi: 0xa9}, {value: 0x0208, lo: 0xaa, hi: 0xaa}, {value: 0x0040, lo: 0xab, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x35, offset 0x1f2 + // Block 0x35, offset 0x1da {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0xb5}, {value: 0x0040, lo: 0xb6, hi: 0xbf}, - // Block 0x36, offset 0x1f5 + // Block 0x36, offset 0x1dd {value: 0x0000, lo: 0x0c}, {value: 0x0008, lo: 0x80, hi: 0x9e}, {value: 0x0040, lo: 0x9f, hi: 0x9f}, - {value: 0x1308, lo: 0xa0, hi: 0xa2}, - {value: 0x1008, lo: 0xa3, hi: 0xa6}, - {value: 0x1308, lo: 0xa7, hi: 0xa8}, - {value: 0x1008, lo: 0xa9, hi: 0xab}, + {value: 0x3308, lo: 0xa0, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa6}, + {value: 0x3308, lo: 0xa7, hi: 0xa8}, + {value: 0x3008, lo: 0xa9, hi: 0xab}, {value: 0x0040, lo: 0xac, hi: 0xaf}, - {value: 0x1008, lo: 0xb0, hi: 0xb1}, - {value: 0x1308, lo: 0xb2, hi: 0xb2}, - {value: 0x1008, lo: 0xb3, hi: 0xb8}, - {value: 0x1308, lo: 0xb9, hi: 0xbb}, + {value: 0x3008, lo: 0xb0, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb2}, + {value: 0x3008, lo: 0xb3, hi: 0xb8}, + {value: 0x3308, lo: 0xb9, hi: 0xbb}, {value: 0x0040, lo: 0xbc, hi: 0xbf}, - // Block 0x37, offset 0x202 + // Block 0x37, offset 0x1ea {value: 0x0000, lo: 0x07}, {value: 0x0018, lo: 0x80, hi: 0x80}, {value: 0x0040, lo: 0x81, hi: 0x83}, @@ -2918,12 +2927,12 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0040, lo: 0xae, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xb4}, {value: 0x0040, lo: 0xb5, hi: 0xbf}, - // Block 0x38, offset 0x20a + // Block 0x38, offset 0x1f2 {value: 0x0000, lo: 0x03}, {value: 0x0008, lo: 0x80, hi: 0xab}, {value: 0x0040, lo: 0xac, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x39, offset 0x20e + // Block 0x39, offset 0x1f6 {value: 0x0000, lo: 0x06}, {value: 0x0008, lo: 0x80, hi: 0x89}, {value: 0x0040, lo: 0x8a, hi: 0x8f}, @@ -2931,33 +2940,33 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0028, lo: 0x9a, hi: 0x9a}, {value: 0x0040, lo: 0x9b, hi: 0x9d}, {value: 0x0018, lo: 0x9e, hi: 0xbf}, - // Block 0x3a, offset 0x215 + // Block 0x3a, offset 0x1fd {value: 0x0000, lo: 0x07}, {value: 0x0008, lo: 0x80, hi: 0x96}, - {value: 0x1308, lo: 0x97, hi: 0x98}, - {value: 0x1008, lo: 0x99, hi: 0x9a}, - {value: 0x1308, lo: 0x9b, hi: 0x9b}, + {value: 0x3308, lo: 0x97, hi: 0x98}, + {value: 0x3008, lo: 0x99, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0x9b}, {value: 0x0040, lo: 0x9c, hi: 0x9d}, {value: 0x0018, lo: 0x9e, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xbf}, - // Block 0x3b, offset 0x21d + // Block 0x3b, offset 0x205 {value: 0x0000, lo: 0x0f}, {value: 0x0008, lo: 0x80, hi: 0x94}, - {value: 0x1008, lo: 0x95, hi: 0x95}, - {value: 0x1308, lo: 0x96, hi: 0x96}, - {value: 0x1008, lo: 0x97, hi: 0x97}, - {value: 0x1308, lo: 0x98, hi: 0x9e}, + {value: 0x3008, lo: 0x95, hi: 0x95}, + {value: 0x3308, lo: 0x96, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x9e}, {value: 0x0040, lo: 0x9f, hi: 0x9f}, - {value: 0x1b08, lo: 0xa0, hi: 0xa0}, - {value: 0x1008, lo: 0xa1, hi: 0xa1}, - {value: 0x1308, lo: 0xa2, hi: 0xa2}, - {value: 0x1008, lo: 0xa3, hi: 0xa4}, - {value: 0x1308, lo: 0xa5, hi: 0xac}, - {value: 0x1008, lo: 0xad, hi: 0xb2}, - {value: 0x1308, lo: 0xb3, hi: 0xbc}, + {value: 0x3b08, lo: 0xa0, hi: 0xa0}, + {value: 0x3008, lo: 0xa1, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xac}, + {value: 0x3008, lo: 0xad, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xbc}, {value: 0x0040, lo: 0xbd, hi: 0xbe}, - {value: 0x1308, lo: 0xbf, hi: 0xbf}, - // Block 0x3c, offset 0x22d + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0x3c, offset 0x215 {value: 0x0000, lo: 0x0b}, {value: 0x0008, lo: 0x80, hi: 0x89}, {value: 0x0040, lo: 0x8a, hi: 0x8f}, @@ -2967,78 +2976,78 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0008, lo: 0xa7, hi: 0xa7}, {value: 0x0018, lo: 0xa8, hi: 0xad}, {value: 0x0040, lo: 0xae, hi: 0xaf}, - {value: 0x1308, lo: 0xb0, hi: 0xbd}, - {value: 0x1318, lo: 0xbe, hi: 0xbe}, + {value: 0x3308, lo: 0xb0, hi: 0xbd}, + {value: 0x3318, lo: 0xbe, hi: 0xbe}, {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0x3d, offset 0x239 + // Block 0x3d, offset 0x221 {value: 0x0000, lo: 0x01}, {value: 0x0040, lo: 0x80, hi: 0xbf}, - // Block 0x3e, offset 0x23b + // Block 0x3e, offset 0x223 {value: 0x0000, lo: 0x09}, - {value: 0x1308, lo: 0x80, hi: 0x83}, - {value: 0x1008, lo: 0x84, hi: 0x84}, + {value: 0x3308, lo: 0x80, hi: 0x83}, + {value: 0x3008, lo: 0x84, hi: 0x84}, {value: 0x0008, lo: 0x85, hi: 0xb3}, - {value: 0x1308, lo: 0xb4, hi: 0xb4}, - {value: 0x1008, lo: 0xb5, hi: 0xb5}, - {value: 0x1308, lo: 0xb6, hi: 0xba}, - {value: 0x1008, lo: 0xbb, hi: 0xbb}, - {value: 0x1308, lo: 0xbc, hi: 0xbc}, - {value: 0x1008, lo: 0xbd, hi: 0xbf}, - // Block 0x3f, offset 0x245 + {value: 0x3308, lo: 0xb4, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x3008, lo: 0xbd, hi: 0xbf}, + // Block 0x3f, offset 0x22d {value: 0x0000, lo: 0x0b}, - {value: 0x1008, lo: 0x80, hi: 0x81}, - {value: 0x1308, lo: 0x82, hi: 0x82}, - {value: 0x1008, lo: 0x83, hi: 0x83}, - {value: 0x1808, lo: 0x84, hi: 0x84}, + {value: 0x3008, lo: 0x80, hi: 0x81}, + {value: 0x3308, lo: 0x82, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, + {value: 0x3808, lo: 0x84, hi: 0x84}, {value: 0x0008, lo: 0x85, hi: 0x8b}, {value: 0x0040, lo: 0x8c, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x99}, {value: 0x0018, lo: 0x9a, hi: 0xaa}, - {value: 0x1308, lo: 0xab, hi: 0xb3}, + {value: 0x3308, lo: 0xab, hi: 0xb3}, {value: 0x0018, lo: 0xb4, hi: 0xbc}, {value: 0x0040, lo: 0xbd, hi: 0xbf}, - // Block 0x40, offset 0x251 + // Block 0x40, offset 0x239 {value: 0x0000, lo: 0x0b}, - {value: 0x1308, lo: 0x80, hi: 0x81}, - {value: 0x1008, lo: 0x82, hi: 0x82}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, {value: 0x0008, lo: 0x83, hi: 0xa0}, - {value: 0x1008, lo: 0xa1, hi: 0xa1}, - {value: 0x1308, lo: 0xa2, hi: 0xa5}, - {value: 0x1008, lo: 0xa6, hi: 0xa7}, - {value: 0x1308, lo: 0xa8, hi: 0xa9}, - {value: 0x1808, lo: 0xaa, hi: 0xaa}, - {value: 0x1b08, lo: 0xab, hi: 0xab}, - {value: 0x1308, lo: 0xac, hi: 0xad}, + {value: 0x3008, lo: 0xa1, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa5}, + {value: 0x3008, lo: 0xa6, hi: 0xa7}, + {value: 0x3308, lo: 0xa8, hi: 0xa9}, + {value: 0x3808, lo: 0xaa, hi: 0xaa}, + {value: 0x3b08, lo: 0xab, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xad}, {value: 0x0008, lo: 0xae, hi: 0xbf}, - // Block 0x41, offset 0x25d + // Block 0x41, offset 0x245 {value: 0x0000, lo: 0x0b}, {value: 0x0008, lo: 0x80, hi: 0xa5}, - {value: 0x1308, lo: 0xa6, hi: 0xa6}, - {value: 0x1008, lo: 0xa7, hi: 0xa7}, - {value: 0x1308, lo: 0xa8, hi: 0xa9}, - {value: 0x1008, lo: 0xaa, hi: 0xac}, - {value: 0x1308, lo: 0xad, hi: 0xad}, - {value: 0x1008, lo: 0xae, hi: 0xae}, - {value: 0x1308, lo: 0xaf, hi: 0xb1}, - {value: 0x1808, lo: 0xb2, hi: 0xb3}, + {value: 0x3308, lo: 0xa6, hi: 0xa6}, + {value: 0x3008, lo: 0xa7, hi: 0xa7}, + {value: 0x3308, lo: 0xa8, hi: 0xa9}, + {value: 0x3008, lo: 0xaa, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xad}, + {value: 0x3008, lo: 0xae, hi: 0xae}, + {value: 0x3308, lo: 0xaf, hi: 0xb1}, + {value: 0x3808, lo: 0xb2, hi: 0xb3}, {value: 0x0040, lo: 0xb4, hi: 0xbb}, {value: 0x0018, lo: 0xbc, hi: 0xbf}, - // Block 0x42, offset 0x269 + // Block 0x42, offset 0x251 {value: 0x0000, lo: 0x07}, {value: 0x0008, lo: 0x80, hi: 0xa3}, - {value: 0x1008, lo: 0xa4, hi: 0xab}, - {value: 0x1308, lo: 0xac, hi: 0xb3}, - {value: 0x1008, lo: 0xb4, hi: 0xb5}, - {value: 0x1308, lo: 0xb6, hi: 0xb7}, + {value: 0x3008, lo: 0xa4, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xb7}, {value: 0x0040, lo: 0xb8, hi: 0xba}, {value: 0x0018, lo: 0xbb, hi: 0xbf}, - // Block 0x43, offset 0x271 + // Block 0x43, offset 0x259 {value: 0x0000, lo: 0x04}, {value: 0x0008, lo: 0x80, hi: 0x89}, {value: 0x0040, lo: 0x8a, hi: 0x8c}, {value: 0x0008, lo: 0x8d, hi: 0xbd}, {value: 0x0018, lo: 0xbe, hi: 0xbf}, - // Block 0x44, offset 0x276 + // Block 0x44, offset 0x25e {value: 0x0000, lo: 0x09}, {value: 0x0e29, lo: 0x80, hi: 0x80}, {value: 0x0e41, lo: 0x81, hi: 0x81}, @@ -3049,30 +3058,30 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0eb9, lo: 0x87, hi: 0x87}, {value: 0x057d, lo: 0x88, hi: 0x88}, {value: 0x0040, lo: 0x89, hi: 0xbf}, - // Block 0x45, offset 0x280 + // Block 0x45, offset 0x268 {value: 0x0000, lo: 0x10}, {value: 0x0018, lo: 0x80, hi: 0x87}, {value: 0x0040, lo: 0x88, hi: 0x8f}, - {value: 0x1308, lo: 0x90, hi: 0x92}, + {value: 0x3308, lo: 0x90, hi: 0x92}, {value: 0x0018, lo: 0x93, hi: 0x93}, - {value: 0x1308, lo: 0x94, hi: 0xa0}, - {value: 0x1008, lo: 0xa1, hi: 0xa1}, - {value: 0x1308, lo: 0xa2, hi: 0xa8}, + {value: 0x3308, lo: 0x94, hi: 0xa0}, + {value: 0x3008, lo: 0xa1, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa8}, {value: 0x0008, lo: 0xa9, hi: 0xac}, - {value: 0x1308, lo: 0xad, hi: 0xad}, + {value: 0x3308, lo: 0xad, hi: 0xad}, {value: 0x0008, lo: 0xae, hi: 0xb1}, - {value: 0x1008, lo: 0xb2, hi: 0xb3}, - {value: 0x1308, lo: 0xb4, hi: 0xb4}, + {value: 0x3008, lo: 0xb2, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb4}, {value: 0x0008, lo: 0xb5, hi: 0xb6}, - {value: 0x0040, lo: 0xb7, hi: 0xb7}, - {value: 0x1308, lo: 0xb8, hi: 0xb9}, + {value: 0x3008, lo: 0xb7, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xb9}, {value: 0x0040, lo: 0xba, hi: 0xbf}, - // Block 0x46, offset 0x291 + // Block 0x46, offset 0x279 {value: 0x0000, lo: 0x03}, - {value: 0x1308, lo: 0x80, hi: 0xb5}, - {value: 0x0040, lo: 0xb6, hi: 0xba}, - {value: 0x1308, lo: 0xbb, hi: 0xbf}, - // Block 0x47, offset 0x295 + {value: 0x3308, lo: 0x80, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xba}, + {value: 0x3308, lo: 0xbb, hi: 0xbf}, + // Block 0x47, offset 0x27d {value: 0x0000, lo: 0x0a}, {value: 0x0008, lo: 0x80, hi: 0x87}, {value: 0xe045, lo: 0x88, hi: 0x8f}, @@ -3084,12 +3093,12 @@ var idnaSparseValues = [1876]valueRange{ {value: 0xe045, lo: 0xa8, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xb7}, {value: 0xe045, lo: 0xb8, hi: 0xbf}, - // Block 0x48, offset 0x2a0 + // Block 0x48, offset 0x288 {value: 0x0000, lo: 0x03}, {value: 0x0040, lo: 0x80, hi: 0x8f}, - {value: 0x1318, lo: 0x90, hi: 0xb0}, + {value: 0x3318, lo: 0x90, hi: 0xb0}, {value: 0x0040, lo: 0xb1, hi: 0xbf}, - // Block 0x49, offset 0x2a4 + // Block 0x49, offset 0x28c {value: 0x0000, lo: 0x08}, {value: 0x0018, lo: 0x80, hi: 0x82}, {value: 0x0040, lo: 0x83, hi: 0x83}, @@ -3099,7 +3108,7 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0018, lo: 0x8a, hi: 0x8b}, {value: 0x0040, lo: 0x8c, hi: 0x8f}, {value: 0x0018, lo: 0x90, hi: 0xbf}, - // Block 0x4a, offset 0x2ad + // Block 0x4a, offset 0x295 {value: 0x0000, lo: 0x07}, {value: 0x0018, lo: 0x80, hi: 0xab}, {value: 0x24f1, lo: 0xac, hi: 0xac}, @@ -3108,72 +3117,68 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x2579, lo: 0xaf, hi: 0xaf}, {value: 0x25b1, lo: 0xb0, hi: 0xb0}, {value: 0x0018, lo: 0xb1, hi: 0xbf}, - // Block 0x4b, offset 0x2b5 + // Block 0x4b, offset 0x29d {value: 0x0000, lo: 0x05}, {value: 0x0018, lo: 0x80, hi: 0x9f}, {value: 0x0080, lo: 0xa0, hi: 0xa0}, {value: 0x0018, lo: 0xa1, hi: 0xad}, {value: 0x0080, lo: 0xae, hi: 0xaf}, {value: 0x0018, lo: 0xb0, hi: 0xbf}, - // Block 0x4c, offset 0x2bb + // Block 0x4c, offset 0x2a3 {value: 0x0000, lo: 0x04}, {value: 0x0018, lo: 0x80, hi: 0xa8}, {value: 0x09c5, lo: 0xa9, hi: 0xa9}, {value: 0x09e5, lo: 0xaa, hi: 0xaa}, {value: 0x0018, lo: 0xab, hi: 0xbf}, - // Block 0x4d, offset 0x2c0 - {value: 0x0000, lo: 0x02}, - {value: 0x0018, lo: 0x80, hi: 0xbe}, - {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0x4e, offset 0x2c3 + // Block 0x4d, offset 0x2a8 {value: 0x0000, lo: 0x02}, {value: 0x0018, lo: 0x80, hi: 0xa6}, {value: 0x0040, lo: 0xa7, hi: 0xbf}, - // Block 0x4f, offset 0x2c6 + // Block 0x4e, offset 0x2ab {value: 0x0000, lo: 0x03}, {value: 0x0018, lo: 0x80, hi: 0x8b}, {value: 0x28c1, lo: 0x8c, hi: 0x8c}, {value: 0x0018, lo: 0x8d, hi: 0xbf}, - // Block 0x50, offset 0x2ca + // Block 0x4f, offset 0x2af {value: 0x0000, lo: 0x05}, {value: 0x0018, lo: 0x80, hi: 0xb3}, {value: 0x0e66, lo: 0xb4, hi: 0xb4}, {value: 0x292a, lo: 0xb5, hi: 0xb5}, {value: 0x0e86, lo: 0xb6, hi: 0xb6}, {value: 0x0018, lo: 0xb7, hi: 0xbf}, - // Block 0x51, offset 0x2d0 + // Block 0x50, offset 0x2b5 {value: 0x0000, lo: 0x03}, {value: 0x0018, lo: 0x80, hi: 0x9b}, {value: 0x2941, lo: 0x9c, hi: 0x9c}, {value: 0x0018, lo: 0x9d, hi: 0xbf}, - // Block 0x52, offset 0x2d4 + // Block 0x51, offset 0x2b9 {value: 0x0000, lo: 0x03}, {value: 0x0018, lo: 0x80, hi: 0xb3}, {value: 0x0040, lo: 0xb4, hi: 0xb5}, {value: 0x0018, lo: 0xb6, hi: 0xbf}, - // Block 0x53, offset 0x2d8 + // Block 0x52, offset 0x2bd {value: 0x0000, lo: 0x05}, {value: 0x0018, lo: 0x80, hi: 0x95}, {value: 0x0040, lo: 0x96, hi: 0x97}, {value: 0x0018, lo: 0x98, hi: 0xb9}, {value: 0x0040, lo: 0xba, hi: 0xbc}, {value: 0x0018, lo: 0xbd, hi: 0xbf}, - // Block 0x54, offset 0x2de + // Block 0x53, offset 0x2c3 {value: 0x0000, lo: 0x06}, {value: 0x0018, lo: 0x80, hi: 0x88}, {value: 0x0040, lo: 0x89, hi: 0x89}, - {value: 0x0018, lo: 0x8a, hi: 0x91}, - {value: 0x0040, lo: 0x92, hi: 0xab}, + {value: 0x0018, lo: 0x8a, hi: 0x92}, + {value: 0x0040, lo: 0x93, hi: 0xab}, {value: 0x0018, lo: 0xac, hi: 0xaf}, {value: 0x0040, lo: 0xb0, hi: 0xbf}, - // Block 0x55, offset 0x2e5 + // Block 0x54, offset 0x2ca {value: 0x0000, lo: 0x05}, {value: 0xe185, lo: 0x80, hi: 0x8f}, {value: 0x03f5, lo: 0x90, hi: 0x9f}, {value: 0x0ea5, lo: 0xa0, hi: 0xae}, {value: 0x0040, lo: 0xaf, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x56, offset 0x2eb + // Block 0x55, offset 0x2d0 {value: 0x0000, lo: 0x07}, {value: 0x0008, lo: 0x80, hi: 0xa5}, {value: 0x0040, lo: 0xa6, hi: 0xa6}, @@ -3182,15 +3187,15 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0008, lo: 0xad, hi: 0xad}, {value: 0x0040, lo: 0xae, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x57, offset 0x2f3 + // Block 0x56, offset 0x2d8 {value: 0x0000, lo: 0x06}, {value: 0x0008, lo: 0x80, hi: 0xa7}, {value: 0x0040, lo: 0xa8, hi: 0xae}, {value: 0xe075, lo: 0xaf, hi: 0xaf}, {value: 0x0018, lo: 0xb0, hi: 0xb0}, {value: 0x0040, lo: 0xb1, hi: 0xbe}, - {value: 0x1b08, lo: 0xbf, hi: 0xbf}, - // Block 0x58, offset 0x2fa + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0x57, offset 0x2df {value: 0x0000, lo: 0x0a}, {value: 0x0008, lo: 0x80, hi: 0x96}, {value: 0x0040, lo: 0x97, hi: 0x9f}, @@ -3202,7 +3207,7 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0040, lo: 0xb7, hi: 0xb7}, {value: 0x0008, lo: 0xb8, hi: 0xbe}, {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0x59, offset 0x305 + // Block 0x58, offset 0x2ea {value: 0x0000, lo: 0x09}, {value: 0x0008, lo: 0x80, hi: 0x86}, {value: 0x0040, lo: 0x87, hi: 0x87}, @@ -3212,62 +3217,62 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0040, lo: 0x97, hi: 0x97}, {value: 0x0008, lo: 0x98, hi: 0x9e}, {value: 0x0040, lo: 0x9f, hi: 0x9f}, - {value: 0x1308, lo: 0xa0, hi: 0xbf}, - // Block 0x5a, offset 0x30f + {value: 0x3308, lo: 0xa0, hi: 0xbf}, + // Block 0x59, offset 0x2f4 {value: 0x0000, lo: 0x03}, {value: 0x0018, lo: 0x80, hi: 0xae}, {value: 0x0008, lo: 0xaf, hi: 0xaf}, {value: 0x0018, lo: 0xb0, hi: 0xbf}, - // Block 0x5b, offset 0x313 + // Block 0x5a, offset 0x2f8 {value: 0x0000, lo: 0x02}, - {value: 0x0018, lo: 0x80, hi: 0x84}, - {value: 0x0040, lo: 0x85, hi: 0xbf}, - // Block 0x5c, offset 0x316 + {value: 0x0018, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0xbf}, + // Block 0x5b, offset 0x2fb {value: 0x0000, lo: 0x05}, {value: 0x0018, lo: 0x80, hi: 0x99}, {value: 0x0040, lo: 0x9a, hi: 0x9a}, {value: 0x0018, lo: 0x9b, hi: 0x9e}, {value: 0x0edd, lo: 0x9f, hi: 0x9f}, {value: 0x0018, lo: 0xa0, hi: 0xbf}, - // Block 0x5d, offset 0x31c + // Block 0x5c, offset 0x301 {value: 0x0000, lo: 0x03}, {value: 0x0018, lo: 0x80, hi: 0xb2}, {value: 0x0efd, lo: 0xb3, hi: 0xb3}, {value: 0x0040, lo: 0xb4, hi: 0xbf}, - // Block 0x5e, offset 0x320 + // Block 0x5d, offset 0x305 {value: 0x0020, lo: 0x01}, {value: 0x0f1d, lo: 0x80, hi: 0xbf}, - // Block 0x5f, offset 0x322 + // Block 0x5e, offset 0x307 {value: 0x0020, lo: 0x02}, {value: 0x171d, lo: 0x80, hi: 0x8f}, {value: 0x18fd, lo: 0x90, hi: 0xbf}, - // Block 0x60, offset 0x325 + // Block 0x5f, offset 0x30a {value: 0x0020, lo: 0x01}, {value: 0x1efd, lo: 0x80, hi: 0xbf}, - // Block 0x61, offset 0x327 + // Block 0x60, offset 0x30c {value: 0x0000, lo: 0x02}, {value: 0x0040, lo: 0x80, hi: 0x80}, {value: 0x0008, lo: 0x81, hi: 0xbf}, - // Block 0x62, offset 0x32a + // Block 0x61, offset 0x30f {value: 0x0000, lo: 0x09}, {value: 0x0008, lo: 0x80, hi: 0x96}, {value: 0x0040, lo: 0x97, hi: 0x98}, - {value: 0x1308, lo: 0x99, hi: 0x9a}, + {value: 0x3308, lo: 0x99, hi: 0x9a}, {value: 0x29e2, lo: 0x9b, hi: 0x9b}, {value: 0x2a0a, lo: 0x9c, hi: 0x9c}, {value: 0x0008, lo: 0x9d, hi: 0x9e}, {value: 0x2a31, lo: 0x9f, hi: 0x9f}, {value: 0x0018, lo: 0xa0, hi: 0xa0}, {value: 0x0008, lo: 0xa1, hi: 0xbf}, - // Block 0x63, offset 0x334 + // Block 0x62, offset 0x319 {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0xbe}, {value: 0x2a69, lo: 0xbf, hi: 0xbf}, - // Block 0x64, offset 0x337 + // Block 0x63, offset 0x31c {value: 0x0000, lo: 0x0e}, {value: 0x0040, lo: 0x80, hi: 0x84}, - {value: 0x0008, lo: 0x85, hi: 0xad}, - {value: 0x0040, lo: 0xae, hi: 0xb0}, + {value: 0x0008, lo: 0x85, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xb0}, {value: 0x2a1d, lo: 0xb1, hi: 0xb1}, {value: 0x2a3d, lo: 0xb2, hi: 0xb2}, {value: 0x2a5d, lo: 0xb3, hi: 0xb3}, @@ -3279,150 +3284,150 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x2afd, lo: 0xba, hi: 0xbb}, {value: 0x2b1d, lo: 0xbc, hi: 0xbd}, {value: 0x2afd, lo: 0xbe, hi: 0xbf}, - // Block 0x65, offset 0x346 + // Block 0x64, offset 0x32b {value: 0x0000, lo: 0x03}, {value: 0x0018, lo: 0x80, hi: 0xa3}, {value: 0x0040, lo: 0xa4, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x66, offset 0x34a + // Block 0x65, offset 0x32f {value: 0x0030, lo: 0x04}, {value: 0x2aa2, lo: 0x80, hi: 0x9d}, {value: 0x305a, lo: 0x9e, hi: 0x9e}, {value: 0x0040, lo: 0x9f, hi: 0x9f}, {value: 0x30a2, lo: 0xa0, hi: 0xbf}, - // Block 0x67, offset 0x34f + // Block 0x66, offset 0x334 {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0x95}, - {value: 0x0040, lo: 0x96, hi: 0xbf}, - // Block 0x68, offset 0x352 + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xbf}, + // Block 0x67, offset 0x337 {value: 0x0000, lo: 0x03}, {value: 0x0008, lo: 0x80, hi: 0x8c}, {value: 0x0040, lo: 0x8d, hi: 0x8f}, {value: 0x0018, lo: 0x90, hi: 0xbf}, - // Block 0x69, offset 0x356 + // Block 0x68, offset 0x33b {value: 0x0000, lo: 0x04}, {value: 0x0018, lo: 0x80, hi: 0x86}, {value: 0x0040, lo: 0x87, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0xbd}, {value: 0x0018, lo: 0xbe, hi: 0xbf}, - // Block 0x6a, offset 0x35b + // Block 0x69, offset 0x340 {value: 0x0000, lo: 0x04}, {value: 0x0008, lo: 0x80, hi: 0x8c}, {value: 0x0018, lo: 0x8d, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0xab}, {value: 0x0040, lo: 0xac, hi: 0xbf}, - // Block 0x6b, offset 0x360 + // Block 0x6a, offset 0x345 {value: 0x0000, lo: 0x05}, {value: 0x0008, lo: 0x80, hi: 0xa5}, {value: 0x0018, lo: 0xa6, hi: 0xaf}, - {value: 0x1308, lo: 0xb0, hi: 0xb1}, + {value: 0x3308, lo: 0xb0, hi: 0xb1}, {value: 0x0018, lo: 0xb2, hi: 0xb7}, {value: 0x0040, lo: 0xb8, hi: 0xbf}, - // Block 0x6c, offset 0x366 + // Block 0x6b, offset 0x34b {value: 0x0000, lo: 0x05}, {value: 0x0040, lo: 0x80, hi: 0xb6}, {value: 0x0008, lo: 0xb7, hi: 0xb7}, {value: 0x2009, lo: 0xb8, hi: 0xb8}, {value: 0x6e89, lo: 0xb9, hi: 0xb9}, {value: 0x0008, lo: 0xba, hi: 0xbf}, - // Block 0x6d, offset 0x36c + // Block 0x6c, offset 0x351 {value: 0x0000, lo: 0x0e}, {value: 0x0008, lo: 0x80, hi: 0x81}, - {value: 0x1308, lo: 0x82, hi: 0x82}, + {value: 0x3308, lo: 0x82, hi: 0x82}, {value: 0x0008, lo: 0x83, hi: 0x85}, - {value: 0x1b08, lo: 0x86, hi: 0x86}, + {value: 0x3b08, lo: 0x86, hi: 0x86}, {value: 0x0008, lo: 0x87, hi: 0x8a}, - {value: 0x1308, lo: 0x8b, hi: 0x8b}, + {value: 0x3308, lo: 0x8b, hi: 0x8b}, {value: 0x0008, lo: 0x8c, hi: 0xa2}, - {value: 0x1008, lo: 0xa3, hi: 0xa4}, - {value: 0x1308, lo: 0xa5, hi: 0xa6}, - {value: 0x1008, lo: 0xa7, hi: 0xa7}, + {value: 0x3008, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa6}, + {value: 0x3008, lo: 0xa7, hi: 0xa7}, {value: 0x0018, lo: 0xa8, hi: 0xab}, {value: 0x0040, lo: 0xac, hi: 0xaf}, {value: 0x0018, lo: 0xb0, hi: 0xb9}, {value: 0x0040, lo: 0xba, hi: 0xbf}, - // Block 0x6e, offset 0x37b + // Block 0x6d, offset 0x360 {value: 0x0000, lo: 0x05}, {value: 0x0208, lo: 0x80, hi: 0xb1}, {value: 0x0108, lo: 0xb2, hi: 0xb2}, {value: 0x0008, lo: 0xb3, hi: 0xb3}, {value: 0x0018, lo: 0xb4, hi: 0xb7}, {value: 0x0040, lo: 0xb8, hi: 0xbf}, - // Block 0x6f, offset 0x381 + // Block 0x6e, offset 0x366 {value: 0x0000, lo: 0x03}, - {value: 0x1008, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x80, hi: 0x81}, {value: 0x0008, lo: 0x82, hi: 0xb3}, - {value: 0x1008, lo: 0xb4, hi: 0xbf}, - // Block 0x70, offset 0x385 + {value: 0x3008, lo: 0xb4, hi: 0xbf}, + // Block 0x6f, offset 0x36a {value: 0x0000, lo: 0x0e}, - {value: 0x1008, lo: 0x80, hi: 0x83}, - {value: 0x1b08, lo: 0x84, hi: 0x84}, - {value: 0x1308, lo: 0x85, hi: 0x85}, + {value: 0x3008, lo: 0x80, hi: 0x83}, + {value: 0x3b08, lo: 0x84, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x85}, {value: 0x0040, lo: 0x86, hi: 0x8d}, {value: 0x0018, lo: 0x8e, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x99}, {value: 0x0040, lo: 0x9a, hi: 0x9f}, - {value: 0x1308, lo: 0xa0, hi: 0xb1}, + {value: 0x3308, lo: 0xa0, hi: 0xb1}, {value: 0x0008, lo: 0xb2, hi: 0xb7}, {value: 0x0018, lo: 0xb8, hi: 0xba}, {value: 0x0008, lo: 0xbb, hi: 0xbb}, {value: 0x0018, lo: 0xbc, hi: 0xbc}, {value: 0x0008, lo: 0xbd, hi: 0xbd}, {value: 0x0040, lo: 0xbe, hi: 0xbf}, - // Block 0x71, offset 0x394 + // Block 0x70, offset 0x379 {value: 0x0000, lo: 0x04}, {value: 0x0008, lo: 0x80, hi: 0xa5}, - {value: 0x1308, lo: 0xa6, hi: 0xad}, + {value: 0x3308, lo: 0xa6, hi: 0xad}, {value: 0x0018, lo: 0xae, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x72, offset 0x399 + // Block 0x71, offset 0x37e {value: 0x0000, lo: 0x07}, {value: 0x0008, lo: 0x80, hi: 0x86}, - {value: 0x1308, lo: 0x87, hi: 0x91}, - {value: 0x1008, lo: 0x92, hi: 0x92}, - {value: 0x1808, lo: 0x93, hi: 0x93}, + {value: 0x3308, lo: 0x87, hi: 0x91}, + {value: 0x3008, lo: 0x92, hi: 0x92}, + {value: 0x3808, lo: 0x93, hi: 0x93}, {value: 0x0040, lo: 0x94, hi: 0x9e}, {value: 0x0018, lo: 0x9f, hi: 0xbc}, {value: 0x0040, lo: 0xbd, hi: 0xbf}, - // Block 0x73, offset 0x3a1 + // Block 0x72, offset 0x386 {value: 0x0000, lo: 0x09}, - {value: 0x1308, lo: 0x80, hi: 0x82}, - {value: 0x1008, lo: 0x83, hi: 0x83}, + {value: 0x3308, lo: 0x80, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, {value: 0x0008, lo: 0x84, hi: 0xb2}, - {value: 0x1308, lo: 0xb3, hi: 0xb3}, - {value: 0x1008, lo: 0xb4, hi: 0xb5}, - {value: 0x1308, lo: 0xb6, hi: 0xb9}, - {value: 0x1008, lo: 0xba, hi: 0xbb}, - {value: 0x1308, lo: 0xbc, hi: 0xbc}, - {value: 0x1008, lo: 0xbd, hi: 0xbf}, - // Block 0x74, offset 0x3ab + {value: 0x3308, lo: 0xb3, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xb9}, + {value: 0x3008, lo: 0xba, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x3008, lo: 0xbd, hi: 0xbf}, + // Block 0x73, offset 0x390 {value: 0x0000, lo: 0x0a}, - {value: 0x1808, lo: 0x80, hi: 0x80}, + {value: 0x3808, lo: 0x80, hi: 0x80}, {value: 0x0018, lo: 0x81, hi: 0x8d}, {value: 0x0040, lo: 0x8e, hi: 0x8e}, {value: 0x0008, lo: 0x8f, hi: 0x99}, {value: 0x0040, lo: 0x9a, hi: 0x9d}, {value: 0x0018, lo: 0x9e, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xa4}, - {value: 0x1308, lo: 0xa5, hi: 0xa5}, + {value: 0x3308, lo: 0xa5, hi: 0xa5}, {value: 0x0008, lo: 0xa6, hi: 0xbe}, {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0x75, offset 0x3b6 + // Block 0x74, offset 0x39b {value: 0x0000, lo: 0x07}, {value: 0x0008, lo: 0x80, hi: 0xa8}, - {value: 0x1308, lo: 0xa9, hi: 0xae}, - {value: 0x1008, lo: 0xaf, hi: 0xb0}, - {value: 0x1308, lo: 0xb1, hi: 0xb2}, - {value: 0x1008, lo: 0xb3, hi: 0xb4}, - {value: 0x1308, lo: 0xb5, hi: 0xb6}, + {value: 0x3308, lo: 0xa9, hi: 0xae}, + {value: 0x3008, lo: 0xaf, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb2}, + {value: 0x3008, lo: 0xb3, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb6}, {value: 0x0040, lo: 0xb7, hi: 0xbf}, - // Block 0x76, offset 0x3be + // Block 0x75, offset 0x3a3 {value: 0x0000, lo: 0x10}, {value: 0x0008, lo: 0x80, hi: 0x82}, - {value: 0x1308, lo: 0x83, hi: 0x83}, + {value: 0x3308, lo: 0x83, hi: 0x83}, {value: 0x0008, lo: 0x84, hi: 0x8b}, - {value: 0x1308, lo: 0x8c, hi: 0x8c}, - {value: 0x1008, lo: 0x8d, hi: 0x8d}, + {value: 0x3308, lo: 0x8c, hi: 0x8c}, + {value: 0x3008, lo: 0x8d, hi: 0x8d}, {value: 0x0040, lo: 0x8e, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x99}, {value: 0x0040, lo: 0x9a, hi: 0x9b}, @@ -3430,38 +3435,38 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0008, lo: 0xa0, hi: 0xb6}, {value: 0x0018, lo: 0xb7, hi: 0xb9}, {value: 0x0008, lo: 0xba, hi: 0xba}, - {value: 0x1008, lo: 0xbb, hi: 0xbb}, - {value: 0x1308, lo: 0xbc, hi: 0xbc}, - {value: 0x1008, lo: 0xbd, hi: 0xbd}, + {value: 0x3008, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x3008, lo: 0xbd, hi: 0xbd}, {value: 0x0008, lo: 0xbe, hi: 0xbf}, - // Block 0x77, offset 0x3cf + // Block 0x76, offset 0x3b4 {value: 0x0000, lo: 0x08}, {value: 0x0008, lo: 0x80, hi: 0xaf}, - {value: 0x1308, lo: 0xb0, hi: 0xb0}, + {value: 0x3308, lo: 0xb0, hi: 0xb0}, {value: 0x0008, lo: 0xb1, hi: 0xb1}, - {value: 0x1308, lo: 0xb2, hi: 0xb4}, + {value: 0x3308, lo: 0xb2, hi: 0xb4}, {value: 0x0008, lo: 0xb5, hi: 0xb6}, - {value: 0x1308, lo: 0xb7, hi: 0xb8}, + {value: 0x3308, lo: 0xb7, hi: 0xb8}, {value: 0x0008, lo: 0xb9, hi: 0xbd}, - {value: 0x1308, lo: 0xbe, hi: 0xbf}, - // Block 0x78, offset 0x3d8 + {value: 0x3308, lo: 0xbe, hi: 0xbf}, + // Block 0x77, offset 0x3bd {value: 0x0000, lo: 0x0f}, {value: 0x0008, lo: 0x80, hi: 0x80}, - {value: 0x1308, lo: 0x81, hi: 0x81}, + {value: 0x3308, lo: 0x81, hi: 0x81}, {value: 0x0008, lo: 0x82, hi: 0x82}, {value: 0x0040, lo: 0x83, hi: 0x9a}, {value: 0x0008, lo: 0x9b, hi: 0x9d}, {value: 0x0018, lo: 0x9e, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xaa}, - {value: 0x1008, lo: 0xab, hi: 0xab}, - {value: 0x1308, lo: 0xac, hi: 0xad}, - {value: 0x1008, lo: 0xae, hi: 0xaf}, + {value: 0x3008, lo: 0xab, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xad}, + {value: 0x3008, lo: 0xae, hi: 0xaf}, {value: 0x0018, lo: 0xb0, hi: 0xb1}, {value: 0x0008, lo: 0xb2, hi: 0xb4}, - {value: 0x1008, lo: 0xb5, hi: 0xb5}, - {value: 0x1b08, lo: 0xb6, hi: 0xb6}, + {value: 0x3008, lo: 0xb5, hi: 0xb5}, + {value: 0x3b08, lo: 0xb6, hi: 0xb6}, {value: 0x0040, lo: 0xb7, hi: 0xbf}, - // Block 0x79, offset 0x3e8 + // Block 0x78, offset 0x3cd {value: 0x0000, lo: 0x0c}, {value: 0x0040, lo: 0x80, hi: 0x80}, {value: 0x0008, lo: 0x81, hi: 0x86}, @@ -3475,7 +3480,7 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0008, lo: 0xa8, hi: 0xae}, {value: 0x0040, lo: 0xaf, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x7a, offset 0x3f5 + // Block 0x79, offset 0x3da {value: 0x0000, lo: 0x09}, {value: 0x0008, lo: 0x80, hi: 0x9a}, {value: 0x0018, lo: 0x9b, hi: 0x9b}, @@ -3486,54 +3491,54 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0008, lo: 0xa0, hi: 0xa5}, {value: 0x0040, lo: 0xa6, hi: 0xaf}, {value: 0x4495, lo: 0xb0, hi: 0xbf}, - // Block 0x7b, offset 0x3ff + // Block 0x7a, offset 0x3e4 {value: 0x0000, lo: 0x04}, {value: 0x44b5, lo: 0x80, hi: 0x8f}, {value: 0x44d5, lo: 0x90, hi: 0x9f}, {value: 0x44f5, lo: 0xa0, hi: 0xaf}, {value: 0x44d5, lo: 0xb0, hi: 0xbf}, - // Block 0x7c, offset 0x404 + // Block 0x7b, offset 0x3e9 {value: 0x0000, lo: 0x0c}, {value: 0x0008, lo: 0x80, hi: 0xa2}, - {value: 0x1008, lo: 0xa3, hi: 0xa4}, - {value: 0x1308, lo: 0xa5, hi: 0xa5}, - {value: 0x1008, lo: 0xa6, hi: 0xa7}, - {value: 0x1308, lo: 0xa8, hi: 0xa8}, - {value: 0x1008, lo: 0xa9, hi: 0xaa}, + {value: 0x3008, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa5}, + {value: 0x3008, lo: 0xa6, hi: 0xa7}, + {value: 0x3308, lo: 0xa8, hi: 0xa8}, + {value: 0x3008, lo: 0xa9, hi: 0xaa}, {value: 0x0018, lo: 0xab, hi: 0xab}, - {value: 0x1008, lo: 0xac, hi: 0xac}, - {value: 0x1b08, lo: 0xad, hi: 0xad}, + {value: 0x3008, lo: 0xac, hi: 0xac}, + {value: 0x3b08, lo: 0xad, hi: 0xad}, {value: 0x0040, lo: 0xae, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xb9}, {value: 0x0040, lo: 0xba, hi: 0xbf}, - // Block 0x7d, offset 0x411 + // Block 0x7c, offset 0x3f6 {value: 0x0000, lo: 0x03}, {value: 0x0008, lo: 0x80, hi: 0xa3}, {value: 0x0040, lo: 0xa4, hi: 0xaf}, {value: 0x0018, lo: 0xb0, hi: 0xbf}, - // Block 0x7e, offset 0x415 + // Block 0x7d, offset 0x3fa {value: 0x0000, lo: 0x04}, {value: 0x0018, lo: 0x80, hi: 0x86}, {value: 0x0040, lo: 0x87, hi: 0x8a}, {value: 0x0018, lo: 0x8b, hi: 0xbb}, {value: 0x0040, lo: 0xbc, hi: 0xbf}, - // Block 0x7f, offset 0x41a + // Block 0x7e, offset 0x3ff {value: 0x0020, lo: 0x01}, {value: 0x4515, lo: 0x80, hi: 0xbf}, - // Block 0x80, offset 0x41c + // Block 0x7f, offset 0x401 {value: 0x0020, lo: 0x03}, {value: 0x4d15, lo: 0x80, hi: 0x94}, {value: 0x4ad5, lo: 0x95, hi: 0x95}, {value: 0x4fb5, lo: 0x96, hi: 0xbf}, - // Block 0x81, offset 0x420 + // Block 0x80, offset 0x405 {value: 0x0020, lo: 0x01}, {value: 0x54f5, lo: 0x80, hi: 0xbf}, - // Block 0x82, offset 0x422 + // Block 0x81, offset 0x407 {value: 0x0020, lo: 0x03}, {value: 0x5cf5, lo: 0x80, hi: 0x84}, {value: 0x5655, lo: 0x85, hi: 0x85}, {value: 0x5d95, lo: 0x86, hi: 0xbf}, - // Block 0x83, offset 0x426 + // Block 0x82, offset 0x40b {value: 0x0020, lo: 0x08}, {value: 0x6b55, lo: 0x80, hi: 0x8f}, {value: 0x6d15, lo: 0x90, hi: 0x90}, @@ -3543,19 +3548,19 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0040, lo: 0xae, hi: 0xae}, {value: 0x0040, lo: 0xaf, hi: 0xaf}, {value: 0x70d5, lo: 0xb0, hi: 0xbf}, - // Block 0x84, offset 0x42f + // Block 0x83, offset 0x414 {value: 0x0020, lo: 0x05}, {value: 0x72d5, lo: 0x80, hi: 0xad}, {value: 0x6535, lo: 0xae, hi: 0xae}, {value: 0x7895, lo: 0xaf, hi: 0xb5}, {value: 0x6f55, lo: 0xb6, hi: 0xb6}, {value: 0x7975, lo: 0xb7, hi: 0xbf}, - // Block 0x85, offset 0x435 + // Block 0x84, offset 0x41a {value: 0x0028, lo: 0x03}, {value: 0x7c21, lo: 0x80, hi: 0x82}, {value: 0x7be1, lo: 0x83, hi: 0x83}, {value: 0x7c99, lo: 0x84, hi: 0xbf}, - // Block 0x86, offset 0x439 + // Block 0x85, offset 0x41e {value: 0x0038, lo: 0x0f}, {value: 0x9db1, lo: 0x80, hi: 0x83}, {value: 0x9e59, lo: 0x84, hi: 0x85}, @@ -3572,7 +3577,7 @@ var idnaSparseValues = [1876]valueRange{ {value: 0xa869, lo: 0xbc, hi: 0xbc}, {value: 0xa7f9, lo: 0xbd, hi: 0xbd}, {value: 0xa8d9, lo: 0xbe, hi: 0xbf}, - // Block 0x87, offset 0x449 + // Block 0x86, offset 0x42e {value: 0x0000, lo: 0x09}, {value: 0x0008, lo: 0x80, hi: 0x8b}, {value: 0x0040, lo: 0x8c, hi: 0x8c}, @@ -3583,24 +3588,24 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0008, lo: 0xbc, hi: 0xbd}, {value: 0x0040, lo: 0xbe, hi: 0xbe}, {value: 0x0008, lo: 0xbf, hi: 0xbf}, - // Block 0x88, offset 0x453 + // Block 0x87, offset 0x438 {value: 0x0000, lo: 0x04}, {value: 0x0008, lo: 0x80, hi: 0x8d}, {value: 0x0040, lo: 0x8e, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x9d}, {value: 0x0040, lo: 0x9e, hi: 0xbf}, - // Block 0x89, offset 0x458 + // Block 0x88, offset 0x43d {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0xba}, {value: 0x0040, lo: 0xbb, hi: 0xbf}, - // Block 0x8a, offset 0x45b + // Block 0x89, offset 0x440 {value: 0x0000, lo: 0x05}, {value: 0x0018, lo: 0x80, hi: 0x82}, {value: 0x0040, lo: 0x83, hi: 0x86}, {value: 0x0018, lo: 0x87, hi: 0xb3}, {value: 0x0040, lo: 0xb4, hi: 0xb6}, {value: 0x0018, lo: 0xb7, hi: 0xbf}, - // Block 0x8b, offset 0x461 + // Block 0x8a, offset 0x446 {value: 0x0000, lo: 0x06}, {value: 0x0018, lo: 0x80, hi: 0x8e}, {value: 0x0040, lo: 0x8f, hi: 0x8f}, @@ -3608,31 +3613,31 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0040, lo: 0x9c, hi: 0x9f}, {value: 0x0018, lo: 0xa0, hi: 0xa0}, {value: 0x0040, lo: 0xa1, hi: 0xbf}, - // Block 0x8c, offset 0x468 + // Block 0x8b, offset 0x44d {value: 0x0000, lo: 0x04}, {value: 0x0040, lo: 0x80, hi: 0x8f}, {value: 0x0018, lo: 0x90, hi: 0xbc}, - {value: 0x1308, lo: 0xbd, hi: 0xbd}, + {value: 0x3308, lo: 0xbd, hi: 0xbd}, {value: 0x0040, lo: 0xbe, hi: 0xbf}, - // Block 0x8d, offset 0x46d + // Block 0x8c, offset 0x452 {value: 0x0000, lo: 0x03}, {value: 0x0008, lo: 0x80, hi: 0x9c}, {value: 0x0040, lo: 0x9d, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xbf}, - // Block 0x8e, offset 0x471 + // Block 0x8d, offset 0x456 {value: 0x0000, lo: 0x05}, {value: 0x0008, lo: 0x80, hi: 0x90}, {value: 0x0040, lo: 0x91, hi: 0x9f}, - {value: 0x1308, lo: 0xa0, hi: 0xa0}, + {value: 0x3308, lo: 0xa0, hi: 0xa0}, {value: 0x0018, lo: 0xa1, hi: 0xbb}, {value: 0x0040, lo: 0xbc, hi: 0xbf}, - // Block 0x8f, offset 0x477 + // Block 0x8e, offset 0x45c {value: 0x0000, lo: 0x04}, {value: 0x0008, lo: 0x80, hi: 0x9f}, {value: 0x0018, lo: 0xa0, hi: 0xa3}, - {value: 0x0040, lo: 0xa4, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x90, offset 0x47c + {value: 0x0040, lo: 0xa4, hi: 0xac}, + {value: 0x0008, lo: 0xad, hi: 0xbf}, + // Block 0x8f, offset 0x461 {value: 0x0000, lo: 0x08}, {value: 0x0008, lo: 0x80, hi: 0x80}, {value: 0x0018, lo: 0x81, hi: 0x81}, @@ -3640,22 +3645,22 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0018, lo: 0x8a, hi: 0x8a}, {value: 0x0040, lo: 0x8b, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0xb5}, - {value: 0x1308, lo: 0xb6, hi: 0xba}, + {value: 0x3308, lo: 0xb6, hi: 0xba}, {value: 0x0040, lo: 0xbb, hi: 0xbf}, - // Block 0x91, offset 0x485 + // Block 0x90, offset 0x46a {value: 0x0000, lo: 0x04}, {value: 0x0008, lo: 0x80, hi: 0x9d}, {value: 0x0040, lo: 0x9e, hi: 0x9e}, {value: 0x0018, lo: 0x9f, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xbf}, - // Block 0x92, offset 0x48a + // Block 0x91, offset 0x46f {value: 0x0000, lo: 0x05}, {value: 0x0008, lo: 0x80, hi: 0x83}, {value: 0x0040, lo: 0x84, hi: 0x87}, {value: 0x0008, lo: 0x88, hi: 0x8f}, {value: 0x0018, lo: 0x90, hi: 0x95}, {value: 0x0040, lo: 0x96, hi: 0xbf}, - // Block 0x93, offset 0x490 + // Block 0x92, offset 0x475 {value: 0x0000, lo: 0x06}, {value: 0xe145, lo: 0x80, hi: 0x87}, {value: 0xe1c5, lo: 0x88, hi: 0x8f}, @@ -3663,7 +3668,7 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x8ad5, lo: 0x98, hi: 0x9f}, {value: 0x8aed, lo: 0xa0, hi: 0xa7}, {value: 0x0008, lo: 0xa8, hi: 0xbf}, - // Block 0x94, offset 0x497 + // Block 0x93, offset 0x47c {value: 0x0000, lo: 0x06}, {value: 0x0008, lo: 0x80, hi: 0x9d}, {value: 0x0040, lo: 0x9e, hi: 0x9f}, @@ -3671,7 +3676,7 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0040, lo: 0xaa, hi: 0xaf}, {value: 0x8aed, lo: 0xb0, hi: 0xb7}, {value: 0x8ad5, lo: 0xb8, hi: 0xbf}, - // Block 0x95, offset 0x49e + // Block 0x94, offset 0x483 {value: 0x0000, lo: 0x06}, {value: 0xe145, lo: 0x80, hi: 0x87}, {value: 0xe1c5, lo: 0x88, hi: 0x8f}, @@ -3679,173 +3684,176 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0040, lo: 0x94, hi: 0x97}, {value: 0x0008, lo: 0x98, hi: 0xbb}, {value: 0x0040, lo: 0xbc, hi: 0xbf}, - // Block 0x96, offset 0x4a5 + // Block 0x95, offset 0x48a {value: 0x0000, lo: 0x03}, {value: 0x0008, lo: 0x80, hi: 0xa7}, {value: 0x0040, lo: 0xa8, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x97, offset 0x4a9 + // Block 0x96, offset 0x48e {value: 0x0000, lo: 0x04}, {value: 0x0008, lo: 0x80, hi: 0xa3}, {value: 0x0040, lo: 0xa4, hi: 0xae}, {value: 0x0018, lo: 0xaf, hi: 0xaf}, {value: 0x0040, lo: 0xb0, hi: 0xbf}, - // Block 0x98, offset 0x4ae + // Block 0x97, offset 0x493 {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0xb6}, {value: 0x0040, lo: 0xb7, hi: 0xbf}, - // Block 0x99, offset 0x4b1 + // Block 0x98, offset 0x496 {value: 0x0000, lo: 0x04}, {value: 0x0008, lo: 0x80, hi: 0x95}, {value: 0x0040, lo: 0x96, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xa7}, {value: 0x0040, lo: 0xa8, hi: 0xbf}, - // Block 0x9a, offset 0x4b6 + // Block 0x99, offset 0x49b {value: 0x0000, lo: 0x0b}, - {value: 0x0008, lo: 0x80, hi: 0x85}, + {value: 0x0808, lo: 0x80, hi: 0x85}, {value: 0x0040, lo: 0x86, hi: 0x87}, - {value: 0x0008, lo: 0x88, hi: 0x88}, + {value: 0x0808, lo: 0x88, hi: 0x88}, {value: 0x0040, lo: 0x89, hi: 0x89}, - {value: 0x0008, lo: 0x8a, hi: 0xb5}, + {value: 0x0808, lo: 0x8a, hi: 0xb5}, {value: 0x0040, lo: 0xb6, hi: 0xb6}, - {value: 0x0008, lo: 0xb7, hi: 0xb8}, + {value: 0x0808, lo: 0xb7, hi: 0xb8}, {value: 0x0040, lo: 0xb9, hi: 0xbb}, - {value: 0x0008, lo: 0xbc, hi: 0xbc}, + {value: 0x0808, lo: 0xbc, hi: 0xbc}, {value: 0x0040, lo: 0xbd, hi: 0xbe}, - {value: 0x0008, lo: 0xbf, hi: 0xbf}, - // Block 0x9b, offset 0x4c2 + {value: 0x0808, lo: 0xbf, hi: 0xbf}, + // Block 0x9a, offset 0x4a7 {value: 0x0000, lo: 0x05}, - {value: 0x0008, lo: 0x80, hi: 0x95}, + {value: 0x0808, lo: 0x80, hi: 0x95}, {value: 0x0040, lo: 0x96, hi: 0x96}, - {value: 0x0018, lo: 0x97, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xb6}, - {value: 0x0018, lo: 0xb7, hi: 0xbf}, - // Block 0x9c, offset 0x4c8 + {value: 0x0818, lo: 0x97, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb6}, + {value: 0x0818, lo: 0xb7, hi: 0xbf}, + // Block 0x9b, offset 0x4ad {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x0808, lo: 0x80, hi: 0x9e}, {value: 0x0040, lo: 0x9f, hi: 0xa6}, - {value: 0x0018, lo: 0xa7, hi: 0xaf}, + {value: 0x0818, lo: 0xa7, hi: 0xaf}, {value: 0x0040, lo: 0xb0, hi: 0xbf}, - // Block 0x9d, offset 0x4cd + // Block 0x9c, offset 0x4b2 {value: 0x0000, lo: 0x06}, {value: 0x0040, lo: 0x80, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xb2}, + {value: 0x0808, lo: 0xa0, hi: 0xb2}, {value: 0x0040, lo: 0xb3, hi: 0xb3}, - {value: 0x0008, lo: 0xb4, hi: 0xb5}, + {value: 0x0808, lo: 0xb4, hi: 0xb5}, {value: 0x0040, lo: 0xb6, hi: 0xba}, - {value: 0x0018, lo: 0xbb, hi: 0xbf}, - // Block 0x9e, offset 0x4d4 + {value: 0x0818, lo: 0xbb, hi: 0xbf}, + // Block 0x9d, offset 0x4b9 {value: 0x0000, lo: 0x07}, - {value: 0x0008, lo: 0x80, hi: 0x95}, - {value: 0x0018, lo: 0x96, hi: 0x9b}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x0818, lo: 0x96, hi: 0x9b}, {value: 0x0040, lo: 0x9c, hi: 0x9e}, {value: 0x0018, lo: 0x9f, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xb9}, + {value: 0x0808, lo: 0xa0, hi: 0xb9}, {value: 0x0040, lo: 0xba, hi: 0xbe}, - {value: 0x0018, lo: 0xbf, hi: 0xbf}, - // Block 0x9f, offset 0x4dc + {value: 0x0818, lo: 0xbf, hi: 0xbf}, + // Block 0x9e, offset 0x4c1 {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0xb7}, + {value: 0x0808, lo: 0x80, hi: 0xb7}, {value: 0x0040, lo: 0xb8, hi: 0xbb}, - {value: 0x0018, lo: 0xbc, hi: 0xbd}, - {value: 0x0008, lo: 0xbe, hi: 0xbf}, - // Block 0xa0, offset 0x4e1 + {value: 0x0818, lo: 0xbc, hi: 0xbd}, + {value: 0x0808, lo: 0xbe, hi: 0xbf}, + // Block 0x9f, offset 0x4c6 {value: 0x0000, lo: 0x03}, - {value: 0x0018, lo: 0x80, hi: 0x8f}, + {value: 0x0818, lo: 0x80, hi: 0x8f}, {value: 0x0040, lo: 0x90, hi: 0x91}, - {value: 0x0018, lo: 0x92, hi: 0xbf}, - // Block 0xa1, offset 0x4e5 + {value: 0x0818, lo: 0x92, hi: 0xbf}, + // Block 0xa0, offset 0x4ca {value: 0x0000, lo: 0x0f}, - {value: 0x0008, lo: 0x80, hi: 0x80}, - {value: 0x1308, lo: 0x81, hi: 0x83}, + {value: 0x0808, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x83}, {value: 0x0040, lo: 0x84, hi: 0x84}, - {value: 0x1308, lo: 0x85, hi: 0x86}, + {value: 0x3308, lo: 0x85, hi: 0x86}, {value: 0x0040, lo: 0x87, hi: 0x8b}, - {value: 0x1308, lo: 0x8c, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x93}, + {value: 0x3308, lo: 0x8c, hi: 0x8f}, + {value: 0x0808, lo: 0x90, hi: 0x93}, {value: 0x0040, lo: 0x94, hi: 0x94}, - {value: 0x0008, lo: 0x95, hi: 0x97}, + {value: 0x0808, lo: 0x95, hi: 0x97}, {value: 0x0040, lo: 0x98, hi: 0x98}, - {value: 0x0008, lo: 0x99, hi: 0xb3}, + {value: 0x0808, lo: 0x99, hi: 0xb3}, {value: 0x0040, lo: 0xb4, hi: 0xb7}, - {value: 0x1308, lo: 0xb8, hi: 0xba}, + {value: 0x3308, lo: 0xb8, hi: 0xba}, {value: 0x0040, lo: 0xbb, hi: 0xbe}, - {value: 0x1b08, lo: 0xbf, hi: 0xbf}, - // Block 0xa2, offset 0x4f5 + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xa1, offset 0x4da {value: 0x0000, lo: 0x06}, - {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0818, lo: 0x80, hi: 0x87}, {value: 0x0040, lo: 0x88, hi: 0x8f}, - {value: 0x0018, lo: 0x90, hi: 0x98}, + {value: 0x0818, lo: 0x90, hi: 0x98}, {value: 0x0040, lo: 0x99, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xbc}, - {value: 0x0018, lo: 0xbd, hi: 0xbf}, - // Block 0xa3, offset 0x4fc + {value: 0x0808, lo: 0xa0, hi: 0xbc}, + {value: 0x0818, lo: 0xbd, hi: 0xbf}, + // Block 0xa2, offset 0x4e1 {value: 0x0000, lo: 0x03}, - {value: 0x0008, lo: 0x80, hi: 0x9c}, - {value: 0x0018, lo: 0x9d, hi: 0x9f}, + {value: 0x0808, lo: 0x80, hi: 0x9c}, + {value: 0x0818, lo: 0x9d, hi: 0x9f}, {value: 0x0040, lo: 0xa0, hi: 0xbf}, - // Block 0xa4, offset 0x500 + // Block 0xa3, offset 0x4e5 {value: 0x0000, lo: 0x03}, - {value: 0x0008, lo: 0x80, hi: 0xb5}, + {value: 0x0808, lo: 0x80, hi: 0xb5}, {value: 0x0040, lo: 0xb6, hi: 0xb8}, {value: 0x0018, lo: 0xb9, hi: 0xbf}, - // Block 0xa5, offset 0x504 + // Block 0xa4, offset 0x4e9 {value: 0x0000, lo: 0x06}, - {value: 0x0008, lo: 0x80, hi: 0x95}, + {value: 0x0808, lo: 0x80, hi: 0x95}, {value: 0x0040, lo: 0x96, hi: 0x97}, - {value: 0x0018, lo: 0x98, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xb2}, + {value: 0x0818, lo: 0x98, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb2}, {value: 0x0040, lo: 0xb3, hi: 0xb7}, - {value: 0x0018, lo: 0xb8, hi: 0xbf}, - // Block 0xa6, offset 0x50b + {value: 0x0818, lo: 0xb8, hi: 0xbf}, + // Block 0xa5, offset 0x4f0 + {value: 0x0000, lo: 0x01}, + {value: 0x0808, lo: 0x80, hi: 0xbf}, + // Block 0xa6, offset 0x4f2 {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0808, lo: 0x80, hi: 0x88}, {value: 0x0040, lo: 0x89, hi: 0xbf}, - // Block 0xa7, offset 0x50e + // Block 0xa7, offset 0x4f5 {value: 0x0000, lo: 0x02}, {value: 0x03dd, lo: 0x80, hi: 0xb2}, {value: 0x0040, lo: 0xb3, hi: 0xbf}, - // Block 0xa8, offset 0x511 + // Block 0xa8, offset 0x4f8 {value: 0x0000, lo: 0x03}, - {value: 0x0008, lo: 0x80, hi: 0xb2}, + {value: 0x0808, lo: 0x80, hi: 0xb2}, {value: 0x0040, lo: 0xb3, hi: 0xb9}, - {value: 0x0018, lo: 0xba, hi: 0xbf}, - // Block 0xa9, offset 0x515 + {value: 0x0818, lo: 0xba, hi: 0xbf}, + // Block 0xa9, offset 0x4fc {value: 0x0000, lo: 0x03}, {value: 0x0040, lo: 0x80, hi: 0x9f}, - {value: 0x0018, lo: 0xa0, hi: 0xbe}, + {value: 0x0818, lo: 0xa0, hi: 0xbe}, {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0xaa, offset 0x519 + // Block 0xaa, offset 0x500 {value: 0x0000, lo: 0x05}, - {value: 0x1008, lo: 0x80, hi: 0x80}, - {value: 0x1308, lo: 0x81, hi: 0x81}, - {value: 0x1008, lo: 0x82, hi: 0x82}, + {value: 0x3008, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, {value: 0x0008, lo: 0x83, hi: 0xb7}, - {value: 0x1308, lo: 0xb8, hi: 0xbf}, - // Block 0xab, offset 0x51f + {value: 0x3308, lo: 0xb8, hi: 0xbf}, + // Block 0xab, offset 0x506 {value: 0x0000, lo: 0x08}, - {value: 0x1308, lo: 0x80, hi: 0x85}, - {value: 0x1b08, lo: 0x86, hi: 0x86}, + {value: 0x3308, lo: 0x80, hi: 0x85}, + {value: 0x3b08, lo: 0x86, hi: 0x86}, {value: 0x0018, lo: 0x87, hi: 0x8d}, {value: 0x0040, lo: 0x8e, hi: 0x91}, {value: 0x0018, lo: 0x92, hi: 0xa5}, {value: 0x0008, lo: 0xa6, hi: 0xaf}, {value: 0x0040, lo: 0xb0, hi: 0xbe}, - {value: 0x1b08, lo: 0xbf, hi: 0xbf}, - // Block 0xac, offset 0x528 + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xac, offset 0x50f {value: 0x0000, lo: 0x0b}, - {value: 0x1308, lo: 0x80, hi: 0x81}, - {value: 0x1008, lo: 0x82, hi: 0x82}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, {value: 0x0008, lo: 0x83, hi: 0xaf}, - {value: 0x1008, lo: 0xb0, hi: 0xb2}, - {value: 0x1308, lo: 0xb3, hi: 0xb6}, - {value: 0x1008, lo: 0xb7, hi: 0xb8}, - {value: 0x1b08, lo: 0xb9, hi: 0xb9}, - {value: 0x1308, lo: 0xba, hi: 0xba}, + {value: 0x3008, lo: 0xb0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb6}, + {value: 0x3008, lo: 0xb7, hi: 0xb8}, + {value: 0x3b08, lo: 0xb9, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, {value: 0x0018, lo: 0xbb, hi: 0xbc}, {value: 0x0340, lo: 0xbd, hi: 0xbd}, {value: 0x0018, lo: 0xbe, hi: 0xbf}, - // Block 0xad, offset 0x534 + // Block 0xad, offset 0x51b {value: 0x0000, lo: 0x06}, {value: 0x0018, lo: 0x80, hi: 0x81}, {value: 0x0040, lo: 0x82, hi: 0x8f}, @@ -3853,39 +3861,39 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0040, lo: 0xa9, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xb9}, {value: 0x0040, lo: 0xba, hi: 0xbf}, - // Block 0xae, offset 0x53b + // Block 0xae, offset 0x522 {value: 0x0000, lo: 0x08}, - {value: 0x1308, lo: 0x80, hi: 0x82}, + {value: 0x3308, lo: 0x80, hi: 0x82}, {value: 0x0008, lo: 0x83, hi: 0xa6}, - {value: 0x1308, lo: 0xa7, hi: 0xab}, - {value: 0x1008, lo: 0xac, hi: 0xac}, - {value: 0x1308, lo: 0xad, hi: 0xb2}, - {value: 0x1b08, lo: 0xb3, hi: 0xb4}, + {value: 0x3308, lo: 0xa7, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xb2}, + {value: 0x3b08, lo: 0xb3, hi: 0xb4}, {value: 0x0040, lo: 0xb5, hi: 0xb5}, {value: 0x0008, lo: 0xb6, hi: 0xbf}, - // Block 0xaf, offset 0x544 + // Block 0xaf, offset 0x52b {value: 0x0000, lo: 0x07}, {value: 0x0018, lo: 0x80, hi: 0x83}, {value: 0x0040, lo: 0x84, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0xb2}, - {value: 0x1308, lo: 0xb3, hi: 0xb3}, + {value: 0x3308, lo: 0xb3, hi: 0xb3}, {value: 0x0018, lo: 0xb4, hi: 0xb5}, {value: 0x0008, lo: 0xb6, hi: 0xb6}, {value: 0x0040, lo: 0xb7, hi: 0xbf}, - // Block 0xb0, offset 0x54c + // Block 0xb0, offset 0x533 {value: 0x0000, lo: 0x06}, - {value: 0x1308, lo: 0x80, hi: 0x81}, - {value: 0x1008, lo: 0x82, hi: 0x82}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, {value: 0x0008, lo: 0x83, hi: 0xb2}, - {value: 0x1008, lo: 0xb3, hi: 0xb5}, - {value: 0x1308, lo: 0xb6, hi: 0xbe}, - {value: 0x1008, lo: 0xbf, hi: 0xbf}, - // Block 0xb1, offset 0x553 + {value: 0x3008, lo: 0xb3, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xbe}, + {value: 0x3008, lo: 0xbf, hi: 0xbf}, + // Block 0xb1, offset 0x53a {value: 0x0000, lo: 0x0d}, - {value: 0x1808, lo: 0x80, hi: 0x80}, + {value: 0x3808, lo: 0x80, hi: 0x80}, {value: 0x0008, lo: 0x81, hi: 0x84}, {value: 0x0018, lo: 0x85, hi: 0x89}, - {value: 0x1308, lo: 0x8a, hi: 0x8c}, + {value: 0x3308, lo: 0x8a, hi: 0x8c}, {value: 0x0018, lo: 0x8d, hi: 0x8d}, {value: 0x0040, lo: 0x8e, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x9a}, @@ -3895,21 +3903,21 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0040, lo: 0xa0, hi: 0xa0}, {value: 0x0018, lo: 0xa1, hi: 0xb4}, {value: 0x0040, lo: 0xb5, hi: 0xbf}, - // Block 0xb2, offset 0x561 + // Block 0xb2, offset 0x548 {value: 0x0000, lo: 0x0c}, {value: 0x0008, lo: 0x80, hi: 0x91}, {value: 0x0040, lo: 0x92, hi: 0x92}, {value: 0x0008, lo: 0x93, hi: 0xab}, - {value: 0x1008, lo: 0xac, hi: 0xae}, - {value: 0x1308, lo: 0xaf, hi: 0xb1}, - {value: 0x1008, lo: 0xb2, hi: 0xb3}, - {value: 0x1308, lo: 0xb4, hi: 0xb4}, - {value: 0x1808, lo: 0xb5, hi: 0xb5}, - {value: 0x1308, lo: 0xb6, hi: 0xb7}, + {value: 0x3008, lo: 0xac, hi: 0xae}, + {value: 0x3308, lo: 0xaf, hi: 0xb1}, + {value: 0x3008, lo: 0xb2, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb4}, + {value: 0x3808, lo: 0xb5, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xb7}, {value: 0x0018, lo: 0xb8, hi: 0xbd}, - {value: 0x1308, lo: 0xbe, hi: 0xbe}, + {value: 0x3308, lo: 0xbe, hi: 0xbe}, {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0xb3, offset 0x56e + // Block 0xb3, offset 0x555 {value: 0x0000, lo: 0x0c}, {value: 0x0008, lo: 0x80, hi: 0x86}, {value: 0x0040, lo: 0x87, hi: 0x87}, @@ -3923,28 +3931,28 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0018, lo: 0xa9, hi: 0xa9}, {value: 0x0040, lo: 0xaa, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0xb4, offset 0x57b + // Block 0xb4, offset 0x562 {value: 0x0000, lo: 0x08}, {value: 0x0008, lo: 0x80, hi: 0x9e}, - {value: 0x1308, lo: 0x9f, hi: 0x9f}, - {value: 0x1008, lo: 0xa0, hi: 0xa2}, - {value: 0x1308, lo: 0xa3, hi: 0xa9}, - {value: 0x1b08, lo: 0xaa, hi: 0xaa}, + {value: 0x3308, lo: 0x9f, hi: 0x9f}, + {value: 0x3008, lo: 0xa0, hi: 0xa2}, + {value: 0x3308, lo: 0xa3, hi: 0xa9}, + {value: 0x3b08, lo: 0xaa, hi: 0xaa}, {value: 0x0040, lo: 0xab, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xb9}, {value: 0x0040, lo: 0xba, hi: 0xbf}, - // Block 0xb5, offset 0x584 + // Block 0xb5, offset 0x56b {value: 0x0000, lo: 0x03}, {value: 0x0008, lo: 0x80, hi: 0xb4}, - {value: 0x1008, lo: 0xb5, hi: 0xb7}, - {value: 0x1308, lo: 0xb8, hi: 0xbf}, - // Block 0xb6, offset 0x588 + {value: 0x3008, lo: 0xb5, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xbf}, + // Block 0xb6, offset 0x56f {value: 0x0000, lo: 0x0d}, - {value: 0x1008, lo: 0x80, hi: 0x81}, - {value: 0x1b08, lo: 0x82, hi: 0x82}, - {value: 0x1308, lo: 0x83, hi: 0x84}, - {value: 0x1008, lo: 0x85, hi: 0x85}, - {value: 0x1308, lo: 0x86, hi: 0x86}, + {value: 0x3008, lo: 0x80, hi: 0x81}, + {value: 0x3b08, lo: 0x82, hi: 0x82}, + {value: 0x3308, lo: 0x83, hi: 0x84}, + {value: 0x3008, lo: 0x85, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x86}, {value: 0x0008, lo: 0x87, hi: 0x8a}, {value: 0x0018, lo: 0x8b, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x99}, @@ -3953,56 +3961,56 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0040, lo: 0x9c, hi: 0x9c}, {value: 0x0018, lo: 0x9d, hi: 0x9d}, {value: 0x0040, lo: 0x9e, hi: 0xbf}, - // Block 0xb7, offset 0x596 + // Block 0xb7, offset 0x57d {value: 0x0000, lo: 0x07}, {value: 0x0008, lo: 0x80, hi: 0xaf}, - {value: 0x1008, lo: 0xb0, hi: 0xb2}, - {value: 0x1308, lo: 0xb3, hi: 0xb8}, - {value: 0x1008, lo: 0xb9, hi: 0xb9}, - {value: 0x1308, lo: 0xba, hi: 0xba}, - {value: 0x1008, lo: 0xbb, hi: 0xbe}, - {value: 0x1308, lo: 0xbf, hi: 0xbf}, - // Block 0xb8, offset 0x59e + {value: 0x3008, lo: 0xb0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb8}, + {value: 0x3008, lo: 0xb9, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0xb8, offset 0x585 {value: 0x0000, lo: 0x0a}, - {value: 0x1308, lo: 0x80, hi: 0x80}, - {value: 0x1008, lo: 0x81, hi: 0x81}, - {value: 0x1b08, lo: 0x82, hi: 0x82}, - {value: 0x1308, lo: 0x83, hi: 0x83}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3008, lo: 0x81, hi: 0x81}, + {value: 0x3b08, lo: 0x82, hi: 0x82}, + {value: 0x3308, lo: 0x83, hi: 0x83}, {value: 0x0008, lo: 0x84, hi: 0x85}, {value: 0x0018, lo: 0x86, hi: 0x86}, {value: 0x0008, lo: 0x87, hi: 0x87}, {value: 0x0040, lo: 0x88, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x99}, {value: 0x0040, lo: 0x9a, hi: 0xbf}, - // Block 0xb9, offset 0x5a9 + // Block 0xb9, offset 0x590 {value: 0x0000, lo: 0x08}, {value: 0x0008, lo: 0x80, hi: 0xae}, - {value: 0x1008, lo: 0xaf, hi: 0xb1}, - {value: 0x1308, lo: 0xb2, hi: 0xb5}, + {value: 0x3008, lo: 0xaf, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb5}, {value: 0x0040, lo: 0xb6, hi: 0xb7}, - {value: 0x1008, lo: 0xb8, hi: 0xbb}, - {value: 0x1308, lo: 0xbc, hi: 0xbd}, - {value: 0x1008, lo: 0xbe, hi: 0xbe}, - {value: 0x1b08, lo: 0xbf, hi: 0xbf}, - // Block 0xba, offset 0x5b2 + {value: 0x3008, lo: 0xb8, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xba, offset 0x599 {value: 0x0000, lo: 0x05}, - {value: 0x1308, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x80, hi: 0x80}, {value: 0x0018, lo: 0x81, hi: 0x97}, {value: 0x0008, lo: 0x98, hi: 0x9b}, - {value: 0x1308, lo: 0x9c, hi: 0x9d}, + {value: 0x3308, lo: 0x9c, hi: 0x9d}, {value: 0x0040, lo: 0x9e, hi: 0xbf}, - // Block 0xbb, offset 0x5b8 + // Block 0xbb, offset 0x59f {value: 0x0000, lo: 0x07}, {value: 0x0008, lo: 0x80, hi: 0xaf}, - {value: 0x1008, lo: 0xb0, hi: 0xb2}, - {value: 0x1308, lo: 0xb3, hi: 0xba}, - {value: 0x1008, lo: 0xbb, hi: 0xbc}, - {value: 0x1308, lo: 0xbd, hi: 0xbd}, - {value: 0x1008, lo: 0xbe, hi: 0xbe}, - {value: 0x1b08, lo: 0xbf, hi: 0xbf}, - // Block 0xbc, offset 0x5c0 + {value: 0x3008, lo: 0xb0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xbc, offset 0x5a7 {value: 0x0000, lo: 0x08}, - {value: 0x1308, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x80, hi: 0x80}, {value: 0x0018, lo: 0x81, hi: 0x83}, {value: 0x0008, lo: 0x84, hi: 0x84}, {value: 0x0040, lo: 0x85, hi: 0x8f}, @@ -4010,60 +4018,97 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0040, lo: 0x9a, hi: 0x9f}, {value: 0x0018, lo: 0xa0, hi: 0xac}, {value: 0x0040, lo: 0xad, hi: 0xbf}, - // Block 0xbd, offset 0x5c9 + // Block 0xbd, offset 0x5b0 {value: 0x0000, lo: 0x09}, {value: 0x0008, lo: 0x80, hi: 0xaa}, - {value: 0x1308, lo: 0xab, hi: 0xab}, - {value: 0x1008, lo: 0xac, hi: 0xac}, - {value: 0x1308, lo: 0xad, hi: 0xad}, - {value: 0x1008, lo: 0xae, hi: 0xaf}, - {value: 0x1308, lo: 0xb0, hi: 0xb5}, - {value: 0x1808, lo: 0xb6, hi: 0xb6}, - {value: 0x1308, lo: 0xb7, hi: 0xb7}, + {value: 0x3308, lo: 0xab, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xad}, + {value: 0x3008, lo: 0xae, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb5}, + {value: 0x3808, lo: 0xb6, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xb7}, {value: 0x0040, lo: 0xb8, hi: 0xbf}, - // Block 0xbe, offset 0x5d3 + // Block 0xbe, offset 0x5ba {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0x89}, {value: 0x0040, lo: 0x8a, hi: 0xbf}, - // Block 0xbf, offset 0x5d6 + // Block 0xbf, offset 0x5bd {value: 0x0000, lo: 0x0b}, {value: 0x0008, lo: 0x80, hi: 0x99}, {value: 0x0040, lo: 0x9a, hi: 0x9c}, - {value: 0x1308, lo: 0x9d, hi: 0x9f}, - {value: 0x1008, lo: 0xa0, hi: 0xa1}, - {value: 0x1308, lo: 0xa2, hi: 0xa5}, - {value: 0x1008, lo: 0xa6, hi: 0xa6}, - {value: 0x1308, lo: 0xa7, hi: 0xaa}, - {value: 0x1b08, lo: 0xab, hi: 0xab}, + {value: 0x3308, lo: 0x9d, hi: 0x9f}, + {value: 0x3008, lo: 0xa0, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa5}, + {value: 0x3008, lo: 0xa6, hi: 0xa6}, + {value: 0x3308, lo: 0xa7, hi: 0xaa}, + {value: 0x3b08, lo: 0xab, hi: 0xab}, {value: 0x0040, lo: 0xac, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xb9}, {value: 0x0018, lo: 0xba, hi: 0xbf}, - // Block 0xc0, offset 0x5e2 + // Block 0xc0, offset 0x5c9 {value: 0x0000, lo: 0x02}, {value: 0x0040, lo: 0x80, hi: 0x9f}, {value: 0x049d, lo: 0xa0, hi: 0xbf}, - // Block 0xc1, offset 0x5e5 + // Block 0xc1, offset 0x5cc {value: 0x0000, lo: 0x04}, {value: 0x0008, lo: 0x80, hi: 0xa9}, {value: 0x0018, lo: 0xaa, hi: 0xb2}, {value: 0x0040, lo: 0xb3, hi: 0xbe}, {value: 0x0008, lo: 0xbf, hi: 0xbf}, - // Block 0xc2, offset 0x5ea + // Block 0xc2, offset 0x5d1 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x86}, + {value: 0x3008, lo: 0x87, hi: 0x88}, + {value: 0x3308, lo: 0x89, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb3}, + {value: 0x3b08, lo: 0xb4, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb8}, + {value: 0x3008, lo: 0xb9, hi: 0xb9}, + {value: 0x0008, lo: 0xba, hi: 0xba}, + {value: 0x3308, lo: 0xbb, hi: 0xbe}, + {value: 0x0018, lo: 0xbf, hi: 0xbf}, + // Block 0xc3, offset 0x5de + {value: 0x0000, lo: 0x08}, + {value: 0x0018, lo: 0x80, hi: 0x86}, + {value: 0x3b08, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x90}, + {value: 0x3308, lo: 0x91, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x98}, + {value: 0x3308, lo: 0x99, hi: 0x9b}, + {value: 0x0008, lo: 0x9c, hi: 0xbf}, + // Block 0xc4, offset 0x5e7 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0x89}, + {value: 0x3308, lo: 0x8a, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x98}, + {value: 0x3b08, lo: 0x99, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9c}, + {value: 0x0040, lo: 0x9d, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0xa2}, + {value: 0x0040, lo: 0xa3, hi: 0xbf}, + // Block 0xc5, offset 0x5f3 {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0xb8}, {value: 0x0040, lo: 0xb9, hi: 0xbf}, - // Block 0xc3, offset 0x5ed + // Block 0xc6, offset 0x5f6 {value: 0x0000, lo: 0x09}, {value: 0x0008, lo: 0x80, hi: 0x88}, {value: 0x0040, lo: 0x89, hi: 0x89}, {value: 0x0008, lo: 0x8a, hi: 0xae}, - {value: 0x1008, lo: 0xaf, hi: 0xaf}, - {value: 0x1308, lo: 0xb0, hi: 0xb6}, + {value: 0x3008, lo: 0xaf, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb6}, {value: 0x0040, lo: 0xb7, hi: 0xb7}, - {value: 0x1308, lo: 0xb8, hi: 0xbd}, - {value: 0x1008, lo: 0xbe, hi: 0xbe}, - {value: 0x1b08, lo: 0xbf, hi: 0xbf}, - // Block 0xc4, offset 0x5f7 + {value: 0x3308, lo: 0xb8, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xc7, offset 0x600 {value: 0x0000, lo: 0x08}, {value: 0x0008, lo: 0x80, hi: 0x80}, {value: 0x0018, lo: 0x81, hi: 0x85}, @@ -4073,42 +4118,65 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0040, lo: 0xad, hi: 0xaf}, {value: 0x0018, lo: 0xb0, hi: 0xb1}, {value: 0x0008, lo: 0xb2, hi: 0xbf}, - // Block 0xc5, offset 0x600 + // Block 0xc8, offset 0x609 {value: 0x0000, lo: 0x0b}, {value: 0x0008, lo: 0x80, hi: 0x8f}, {value: 0x0040, lo: 0x90, hi: 0x91}, - {value: 0x1308, lo: 0x92, hi: 0xa7}, + {value: 0x3308, lo: 0x92, hi: 0xa7}, {value: 0x0040, lo: 0xa8, hi: 0xa8}, - {value: 0x1008, lo: 0xa9, hi: 0xa9}, - {value: 0x1308, lo: 0xaa, hi: 0xb0}, - {value: 0x1008, lo: 0xb1, hi: 0xb1}, - {value: 0x1308, lo: 0xb2, hi: 0xb3}, - {value: 0x1008, lo: 0xb4, hi: 0xb4}, - {value: 0x1308, lo: 0xb5, hi: 0xb6}, + {value: 0x3008, lo: 0xa9, hi: 0xa9}, + {value: 0x3308, lo: 0xaa, hi: 0xb0}, + {value: 0x3008, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb6}, {value: 0x0040, lo: 0xb7, hi: 0xbf}, - // Block 0xc6, offset 0x60c + // Block 0xc9, offset 0x615 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0xca, offset 0x622 + {value: 0x0000, lo: 0x07}, + {value: 0x3308, lo: 0x80, hi: 0x83}, + {value: 0x3b08, lo: 0x84, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0xbf}, + // Block 0xcb, offset 0x62a {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0x99}, {value: 0x0040, lo: 0x9a, hi: 0xbf}, - // Block 0xc7, offset 0x60f + // Block 0xcc, offset 0x62d {value: 0x0000, lo: 0x04}, {value: 0x0018, lo: 0x80, hi: 0xae}, {value: 0x0040, lo: 0xaf, hi: 0xaf}, {value: 0x0018, lo: 0xb0, hi: 0xb4}, {value: 0x0040, lo: 0xb5, hi: 0xbf}, - // Block 0xc8, offset 0x614 + // Block 0xcd, offset 0x632 {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0x83}, {value: 0x0040, lo: 0x84, hi: 0xbf}, - // Block 0xc9, offset 0x617 + // Block 0xce, offset 0x635 {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0xae}, {value: 0x0040, lo: 0xaf, hi: 0xbf}, - // Block 0xca, offset 0x61a + // Block 0xcf, offset 0x638 {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0x86}, {value: 0x0040, lo: 0x87, hi: 0xbf}, - // Block 0xcb, offset 0x61d + // Block 0xd0, offset 0x63b {value: 0x0000, lo: 0x06}, {value: 0x0008, lo: 0x80, hi: 0x9e}, {value: 0x0040, lo: 0x9f, hi: 0x9f}, @@ -4116,20 +4184,20 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0040, lo: 0xaa, hi: 0xad}, {value: 0x0018, lo: 0xae, hi: 0xaf}, {value: 0x0040, lo: 0xb0, hi: 0xbf}, - // Block 0xcc, offset 0x624 + // Block 0xd1, offset 0x642 {value: 0x0000, lo: 0x06}, {value: 0x0040, lo: 0x80, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0xad}, {value: 0x0040, lo: 0xae, hi: 0xaf}, - {value: 0x1308, lo: 0xb0, hi: 0xb4}, + {value: 0x3308, lo: 0xb0, hi: 0xb4}, {value: 0x0018, lo: 0xb5, hi: 0xb5}, {value: 0x0040, lo: 0xb6, hi: 0xbf}, - // Block 0xcd, offset 0x62b + // Block 0xd2, offset 0x649 {value: 0x0000, lo: 0x03}, {value: 0x0008, lo: 0x80, hi: 0xaf}, - {value: 0x1308, lo: 0xb0, hi: 0xb6}, + {value: 0x3308, lo: 0xb0, hi: 0xb6}, {value: 0x0018, lo: 0xb7, hi: 0xbf}, - // Block 0xce, offset 0x62f + // Block 0xd3, offset 0x64d {value: 0x0000, lo: 0x0a}, {value: 0x0008, lo: 0x80, hi: 0x83}, {value: 0x0018, lo: 0x84, hi: 0x85}, @@ -4141,67 +4209,75 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0008, lo: 0xa3, hi: 0xb7}, {value: 0x0040, lo: 0xb8, hi: 0xbc}, {value: 0x0008, lo: 0xbd, hi: 0xbf}, - // Block 0xcf, offset 0x63a + // Block 0xd4, offset 0x658 {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0x8f}, {value: 0x0040, lo: 0x90, hi: 0xbf}, - // Block 0xd0, offset 0x63d + // Block 0xd5, offset 0x65b {value: 0x0000, lo: 0x05}, {value: 0x0008, lo: 0x80, hi: 0x84}, {value: 0x0040, lo: 0x85, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x90}, - {value: 0x1008, lo: 0x91, hi: 0xbe}, + {value: 0x3008, lo: 0x91, hi: 0xbe}, {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0xd1, offset 0x643 + // Block 0xd6, offset 0x661 {value: 0x0000, lo: 0x04}, {value: 0x0040, lo: 0x80, hi: 0x8e}, - {value: 0x1308, lo: 0x8f, hi: 0x92}, + {value: 0x3308, lo: 0x8f, hi: 0x92}, {value: 0x0008, lo: 0x93, hi: 0x9f}, {value: 0x0040, lo: 0xa0, hi: 0xbf}, - // Block 0xd2, offset 0x648 + // Block 0xd7, offset 0x666 {value: 0x0000, lo: 0x03}, {value: 0x0040, lo: 0x80, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xa0}, - {value: 0x0040, lo: 0xa1, hi: 0xbf}, - // Block 0xd3, offset 0x64c + {value: 0x0008, lo: 0xa0, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xbf}, + // Block 0xd8, offset 0x66a {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0xac}, {value: 0x0040, lo: 0xad, hi: 0xbf}, - // Block 0xd4, offset 0x64f + // Block 0xd9, offset 0x66d {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0xb2}, {value: 0x0040, lo: 0xb3, hi: 0xbf}, - // Block 0xd5, offset 0x652 + // Block 0xda, offset 0x670 {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0x81}, - {value: 0x0040, lo: 0x82, hi: 0xbf}, - // Block 0xd6, offset 0x655 + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xbf}, + // Block 0xdb, offset 0x673 + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0xdc, offset 0x676 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0xdd, offset 0x679 {value: 0x0000, lo: 0x04}, {value: 0x0008, lo: 0x80, hi: 0xaa}, {value: 0x0040, lo: 0xab, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xbc}, {value: 0x0040, lo: 0xbd, hi: 0xbf}, - // Block 0xd7, offset 0x65a + // Block 0xde, offset 0x67e {value: 0x0000, lo: 0x09}, {value: 0x0008, lo: 0x80, hi: 0x88}, {value: 0x0040, lo: 0x89, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x99}, {value: 0x0040, lo: 0x9a, hi: 0x9b}, {value: 0x0018, lo: 0x9c, hi: 0x9c}, - {value: 0x1308, lo: 0x9d, hi: 0x9e}, + {value: 0x3308, lo: 0x9d, hi: 0x9e}, {value: 0x0018, lo: 0x9f, hi: 0x9f}, {value: 0x03c0, lo: 0xa0, hi: 0xa3}, {value: 0x0040, lo: 0xa4, hi: 0xbf}, - // Block 0xd8, offset 0x664 + // Block 0xdf, offset 0x688 {value: 0x0000, lo: 0x02}, {value: 0x0018, lo: 0x80, hi: 0xb5}, {value: 0x0040, lo: 0xb6, hi: 0xbf}, - // Block 0xd9, offset 0x667 + // Block 0xe0, offset 0x68b {value: 0x0000, lo: 0x03}, {value: 0x0018, lo: 0x80, hi: 0xa6}, {value: 0x0040, lo: 0xa7, hi: 0xa8}, {value: 0x0018, lo: 0xa9, hi: 0xbf}, - // Block 0xda, offset 0x66b + // Block 0xe1, offset 0x68f {value: 0x0000, lo: 0x0e}, {value: 0x0018, lo: 0x80, hi: 0x9d}, {value: 0xb5b9, lo: 0x9e, hi: 0x9e}, @@ -4211,127 +4287,127 @@ var idnaSparseValues = [1876]valueRange{ {value: 0xb719, lo: 0xa2, hi: 0xa2}, {value: 0xb781, lo: 0xa3, hi: 0xa3}, {value: 0xb7e9, lo: 0xa4, hi: 0xa4}, - {value: 0x1018, lo: 0xa5, hi: 0xa6}, - {value: 0x1318, lo: 0xa7, hi: 0xa9}, + {value: 0x3018, lo: 0xa5, hi: 0xa6}, + {value: 0x3318, lo: 0xa7, hi: 0xa9}, {value: 0x0018, lo: 0xaa, hi: 0xac}, - {value: 0x1018, lo: 0xad, hi: 0xb2}, + {value: 0x3018, lo: 0xad, hi: 0xb2}, {value: 0x0340, lo: 0xb3, hi: 0xba}, - {value: 0x1318, lo: 0xbb, hi: 0xbf}, - // Block 0xdb, offset 0x67a + {value: 0x3318, lo: 0xbb, hi: 0xbf}, + // Block 0xe2, offset 0x69e {value: 0x0000, lo: 0x0b}, - {value: 0x1318, lo: 0x80, hi: 0x82}, + {value: 0x3318, lo: 0x80, hi: 0x82}, {value: 0x0018, lo: 0x83, hi: 0x84}, - {value: 0x1318, lo: 0x85, hi: 0x8b}, + {value: 0x3318, lo: 0x85, hi: 0x8b}, {value: 0x0018, lo: 0x8c, hi: 0xa9}, - {value: 0x1318, lo: 0xaa, hi: 0xad}, + {value: 0x3318, lo: 0xaa, hi: 0xad}, {value: 0x0018, lo: 0xae, hi: 0xba}, {value: 0xb851, lo: 0xbb, hi: 0xbb}, {value: 0xb899, lo: 0xbc, hi: 0xbc}, {value: 0xb8e1, lo: 0xbd, hi: 0xbd}, {value: 0xb949, lo: 0xbe, hi: 0xbe}, {value: 0xb9b1, lo: 0xbf, hi: 0xbf}, - // Block 0xdc, offset 0x686 + // Block 0xe3, offset 0x6aa {value: 0x0000, lo: 0x03}, {value: 0xba19, lo: 0x80, hi: 0x80}, {value: 0x0018, lo: 0x81, hi: 0xa8}, {value: 0x0040, lo: 0xa9, hi: 0xbf}, - // Block 0xdd, offset 0x68a + // Block 0xe4, offset 0x6ae {value: 0x0000, lo: 0x04}, {value: 0x0018, lo: 0x80, hi: 0x81}, - {value: 0x1318, lo: 0x82, hi: 0x84}, + {value: 0x3318, lo: 0x82, hi: 0x84}, {value: 0x0018, lo: 0x85, hi: 0x85}, {value: 0x0040, lo: 0x86, hi: 0xbf}, - // Block 0xde, offset 0x68f + // Block 0xe5, offset 0x6b3 {value: 0x0000, lo: 0x04}, {value: 0x0018, lo: 0x80, hi: 0x96}, {value: 0x0040, lo: 0x97, hi: 0x9f}, {value: 0x0018, lo: 0xa0, hi: 0xb1}, {value: 0x0040, lo: 0xb2, hi: 0xbf}, - // Block 0xdf, offset 0x694 + // Block 0xe6, offset 0x6b8 {value: 0x0000, lo: 0x03}, - {value: 0x1308, lo: 0x80, hi: 0xb6}, + {value: 0x3308, lo: 0x80, hi: 0xb6}, {value: 0x0018, lo: 0xb7, hi: 0xba}, - {value: 0x1308, lo: 0xbb, hi: 0xbf}, - // Block 0xe0, offset 0x698 + {value: 0x3308, lo: 0xbb, hi: 0xbf}, + // Block 0xe7, offset 0x6bc {value: 0x0000, lo: 0x04}, - {value: 0x1308, lo: 0x80, hi: 0xac}, + {value: 0x3308, lo: 0x80, hi: 0xac}, {value: 0x0018, lo: 0xad, hi: 0xb4}, - {value: 0x1308, lo: 0xb5, hi: 0xb5}, + {value: 0x3308, lo: 0xb5, hi: 0xb5}, {value: 0x0018, lo: 0xb6, hi: 0xbf}, - // Block 0xe1, offset 0x69d + // Block 0xe8, offset 0x6c1 {value: 0x0000, lo: 0x08}, {value: 0x0018, lo: 0x80, hi: 0x83}, - {value: 0x1308, lo: 0x84, hi: 0x84}, + {value: 0x3308, lo: 0x84, hi: 0x84}, {value: 0x0018, lo: 0x85, hi: 0x8b}, {value: 0x0040, lo: 0x8c, hi: 0x9a}, - {value: 0x1308, lo: 0x9b, hi: 0x9f}, + {value: 0x3308, lo: 0x9b, hi: 0x9f}, {value: 0x0040, lo: 0xa0, hi: 0xa0}, - {value: 0x1308, lo: 0xa1, hi: 0xaf}, + {value: 0x3308, lo: 0xa1, hi: 0xaf}, {value: 0x0040, lo: 0xb0, hi: 0xbf}, - // Block 0xe2, offset 0x6a6 + // Block 0xe9, offset 0x6ca {value: 0x0000, lo: 0x0a}, - {value: 0x1308, lo: 0x80, hi: 0x86}, + {value: 0x3308, lo: 0x80, hi: 0x86}, {value: 0x0040, lo: 0x87, hi: 0x87}, - {value: 0x1308, lo: 0x88, hi: 0x98}, + {value: 0x3308, lo: 0x88, hi: 0x98}, {value: 0x0040, lo: 0x99, hi: 0x9a}, - {value: 0x1308, lo: 0x9b, hi: 0xa1}, + {value: 0x3308, lo: 0x9b, hi: 0xa1}, {value: 0x0040, lo: 0xa2, hi: 0xa2}, - {value: 0x1308, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa3, hi: 0xa4}, {value: 0x0040, lo: 0xa5, hi: 0xa5}, - {value: 0x1308, lo: 0xa6, hi: 0xaa}, + {value: 0x3308, lo: 0xa6, hi: 0xaa}, {value: 0x0040, lo: 0xab, hi: 0xbf}, - // Block 0xe3, offset 0x6b1 + // Block 0xea, offset 0x6d5 {value: 0x0000, lo: 0x05}, - {value: 0x0008, lo: 0x80, hi: 0x84}, + {value: 0x0808, lo: 0x80, hi: 0x84}, {value: 0x0040, lo: 0x85, hi: 0x86}, - {value: 0x0018, lo: 0x87, hi: 0x8f}, - {value: 0x1308, lo: 0x90, hi: 0x96}, + {value: 0x0818, lo: 0x87, hi: 0x8f}, + {value: 0x3308, lo: 0x90, hi: 0x96}, {value: 0x0040, lo: 0x97, hi: 0xbf}, - // Block 0xe4, offset 0x6b7 + // Block 0xeb, offset 0x6db {value: 0x0000, lo: 0x07}, - {value: 0x0208, lo: 0x80, hi: 0x83}, - {value: 0x1308, lo: 0x84, hi: 0x8a}, + {value: 0x0a08, lo: 0x80, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x8a}, {value: 0x0040, lo: 0x8b, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0808, lo: 0x90, hi: 0x99}, {value: 0x0040, lo: 0x9a, hi: 0x9d}, - {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0818, lo: 0x9e, hi: 0x9f}, {value: 0x0040, lo: 0xa0, hi: 0xbf}, - // Block 0xe5, offset 0x6bf + // Block 0xec, offset 0x6e3 {value: 0x0000, lo: 0x03}, {value: 0x0040, lo: 0x80, hi: 0xaf}, {value: 0x0018, lo: 0xb0, hi: 0xb1}, {value: 0x0040, lo: 0xb2, hi: 0xbf}, - // Block 0xe6, offset 0x6c3 + // Block 0xed, offset 0x6e7 {value: 0x0000, lo: 0x03}, {value: 0x0018, lo: 0x80, hi: 0xab}, {value: 0x0040, lo: 0xac, hi: 0xaf}, {value: 0x0018, lo: 0xb0, hi: 0xbf}, - // Block 0xe7, offset 0x6c7 + // Block 0xee, offset 0x6eb {value: 0x0000, lo: 0x05}, {value: 0x0018, lo: 0x80, hi: 0x93}, {value: 0x0040, lo: 0x94, hi: 0x9f}, {value: 0x0018, lo: 0xa0, hi: 0xae}, {value: 0x0040, lo: 0xaf, hi: 0xb0}, {value: 0x0018, lo: 0xb1, hi: 0xbf}, - // Block 0xe8, offset 0x6cd + // Block 0xef, offset 0x6f1 {value: 0x0000, lo: 0x05}, {value: 0x0040, lo: 0x80, hi: 0x80}, {value: 0x0018, lo: 0x81, hi: 0x8f}, {value: 0x0040, lo: 0x90, hi: 0x90}, {value: 0x0018, lo: 0x91, hi: 0xb5}, {value: 0x0040, lo: 0xb6, hi: 0xbf}, - // Block 0xe9, offset 0x6d3 + // Block 0xf0, offset 0x6f7 {value: 0x0000, lo: 0x04}, {value: 0x0018, lo: 0x80, hi: 0x8f}, {value: 0xc1c1, lo: 0x90, hi: 0x90}, {value: 0x0018, lo: 0x91, hi: 0xac}, {value: 0x0040, lo: 0xad, hi: 0xbf}, - // Block 0xea, offset 0x6d8 + // Block 0xf1, offset 0x6fc {value: 0x0000, lo: 0x02}, {value: 0x0040, lo: 0x80, hi: 0xa5}, {value: 0x0018, lo: 0xa6, hi: 0xbf}, - // Block 0xeb, offset 0x6db - {value: 0x0000, lo: 0x0d}, + // Block 0xf2, offset 0x6ff + {value: 0x0000, lo: 0x0f}, {value: 0xc7e9, lo: 0x80, hi: 0x80}, {value: 0xc839, lo: 0x81, hi: 0x81}, {value: 0xc889, lo: 0x82, hi: 0x82}, @@ -4344,84 +4420,88 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0040, lo: 0x89, hi: 0x8f}, {value: 0xcab9, lo: 0x90, hi: 0x90}, {value: 0xcad9, lo: 0x91, hi: 0x91}, - {value: 0x0040, lo: 0x92, hi: 0xbf}, - // Block 0xec, offset 0x6e9 + {value: 0x0040, lo: 0x92, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa5}, + {value: 0x0040, lo: 0xa6, hi: 0xbf}, + // Block 0xf3, offset 0x70f {value: 0x0000, lo: 0x06}, - {value: 0x0018, lo: 0x80, hi: 0x92}, - {value: 0x0040, lo: 0x93, hi: 0x9f}, + {value: 0x0018, lo: 0x80, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0x9f}, {value: 0x0018, lo: 0xa0, hi: 0xac}, {value: 0x0040, lo: 0xad, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xb6}, - {value: 0x0040, lo: 0xb7, hi: 0xbf}, - // Block 0xed, offset 0x6f0 + {value: 0x0018, lo: 0xb0, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0xf4, offset 0x716 {value: 0x0000, lo: 0x02}, {value: 0x0018, lo: 0x80, hi: 0xb3}, {value: 0x0040, lo: 0xb4, hi: 0xbf}, - // Block 0xee, offset 0x6f3 + // Block 0xf5, offset 0x719 {value: 0x0000, lo: 0x02}, {value: 0x0018, lo: 0x80, hi: 0x94}, {value: 0x0040, lo: 0x95, hi: 0xbf}, - // Block 0xef, offset 0x6f6 + // Block 0xf6, offset 0x71c {value: 0x0000, lo: 0x03}, {value: 0x0018, lo: 0x80, hi: 0x8b}, {value: 0x0040, lo: 0x8c, hi: 0x8f}, {value: 0x0018, lo: 0x90, hi: 0xbf}, - // Block 0xf0, offset 0x6fa + // Block 0xf7, offset 0x720 {value: 0x0000, lo: 0x05}, {value: 0x0018, lo: 0x80, hi: 0x87}, {value: 0x0040, lo: 0x88, hi: 0x8f}, {value: 0x0018, lo: 0x90, hi: 0x99}, {value: 0x0040, lo: 0x9a, hi: 0x9f}, {value: 0x0018, lo: 0xa0, hi: 0xbf}, - // Block 0xf1, offset 0x700 + // Block 0xf8, offset 0x726 {value: 0x0000, lo: 0x04}, {value: 0x0018, lo: 0x80, hi: 0x87}, {value: 0x0040, lo: 0x88, hi: 0x8f}, {value: 0x0018, lo: 0x90, hi: 0xad}, {value: 0x0040, lo: 0xae, hi: 0xbf}, - // Block 0xf2, offset 0x705 - {value: 0x0000, lo: 0x09}, - {value: 0x0040, lo: 0x80, hi: 0x8f}, - {value: 0x0018, lo: 0x90, hi: 0x9e}, - {value: 0x0040, lo: 0x9f, hi: 0x9f}, - {value: 0x0018, lo: 0xa0, hi: 0xa7}, - {value: 0x0040, lo: 0xa8, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xb0}, - {value: 0x0040, lo: 0xb1, hi: 0xb2}, - {value: 0x0018, lo: 0xb3, hi: 0xbe}, - {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0xf3, offset 0x70f + // Block 0xf9, offset 0x72b {value: 0x0000, lo: 0x04}, {value: 0x0018, lo: 0x80, hi: 0x8b}, {value: 0x0040, lo: 0x8c, hi: 0x8f}, - {value: 0x0018, lo: 0x90, hi: 0x9e}, - {value: 0x0040, lo: 0x9f, hi: 0xbf}, - // Block 0xf4, offset 0x714 - {value: 0x0000, lo: 0x02}, - {value: 0x0018, lo: 0x80, hi: 0x91}, - {value: 0x0040, lo: 0x92, hi: 0xbf}, - // Block 0xf5, offset 0x717 + {value: 0x0018, lo: 0x90, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xfa, offset 0x730 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xbf}, + // Block 0xfb, offset 0x735 {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x97}, + {value: 0x0040, lo: 0x98, hi: 0xbf}, + // Block 0xfc, offset 0x738 + {value: 0x0000, lo: 0x04}, {value: 0x0018, lo: 0x80, hi: 0x80}, - {value: 0x0040, lo: 0x81, hi: 0xbf}, - // Block 0xf6, offset 0x71a + {value: 0x0040, lo: 0x81, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xbf}, + // Block 0xfd, offset 0x73d {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0x96}, {value: 0x0040, lo: 0x97, hi: 0xbf}, - // Block 0xf7, offset 0x71d + // Block 0xfe, offset 0x740 {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0xb4}, {value: 0x0040, lo: 0xb5, hi: 0xbf}, - // Block 0xf8, offset 0x720 + // Block 0xff, offset 0x743 {value: 0x0000, lo: 0x03}, {value: 0x0008, lo: 0x80, hi: 0x9d}, {value: 0x0040, lo: 0x9e, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xbf}, - // Block 0xf9, offset 0x724 - {value: 0x0000, lo: 0x02}, + // Block 0x100, offset 0x747 + {value: 0x0000, lo: 0x03}, {value: 0x0008, lo: 0x80, hi: 0xa1}, - {value: 0x0040, lo: 0xa2, hi: 0xbf}, - // Block 0xfa, offset 0x727 + {value: 0x0040, lo: 0xa2, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x101, offset 0x74b + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xa0}, + {value: 0x0040, lo: 0xa1, hi: 0xbf}, + // Block 0x102, offset 0x74e {value: 0x0020, lo: 0x0f}, {value: 0xdeb9, lo: 0x80, hi: 0x89}, {value: 0x8dfd, lo: 0x8a, hi: 0x8a}, @@ -4438,7 +4518,7 @@ var idnaSparseValues = [1876]valueRange{ {value: 0xe4f9, lo: 0xba, hi: 0xba}, {value: 0x8edd, lo: 0xbb, hi: 0xbb}, {value: 0xe519, lo: 0xbc, hi: 0xbf}, - // Block 0xfb, offset 0x737 + // Block 0x103, offset 0x75e {value: 0x0020, lo: 0x10}, {value: 0x937d, lo: 0x80, hi: 0x80}, {value: 0xf099, lo: 0x81, hi: 0x86}, @@ -4455,23 +4535,23 @@ var idnaSparseValues = [1876]valueRange{ {value: 0xf4d9, lo: 0xae, hi: 0xaf}, {value: 0x94dd, lo: 0xb0, hi: 0xb1}, {value: 0xf519, lo: 0xb2, hi: 0xbe}, - {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0xfc, offset 0x748 + {value: 0x2040, lo: 0xbf, hi: 0xbf}, + // Block 0x104, offset 0x76f {value: 0x0000, lo: 0x04}, {value: 0x0040, lo: 0x80, hi: 0x80}, {value: 0x0340, lo: 0x81, hi: 0x81}, {value: 0x0040, lo: 0x82, hi: 0x9f}, {value: 0x0340, lo: 0xa0, hi: 0xbf}, - // Block 0xfd, offset 0x74d + // Block 0x105, offset 0x774 {value: 0x0000, lo: 0x01}, {value: 0x0340, lo: 0x80, hi: 0xbf}, - // Block 0xfe, offset 0x74f + // Block 0x106, offset 0x776 {value: 0x0000, lo: 0x01}, - {value: 0x13c0, lo: 0x80, hi: 0xbf}, - // Block 0xff, offset 0x751 + {value: 0x33c0, lo: 0x80, hi: 0xbf}, + // Block 0x107, offset 0x778 {value: 0x0000, lo: 0x02}, - {value: 0x13c0, lo: 0x80, hi: 0xaf}, + {value: 0x33c0, lo: 0x80, hi: 0xaf}, {value: 0x0040, lo: 0xb0, hi: 0xbf}, } -// Total table size 41559 bytes (40KiB); checksum: F4A1FA4E +// Total table size 42115 bytes (41KiB); checksum: F4A1FA4E diff --git a/vendor/golang.org/x/net/idna/trieval.go b/vendor/golang.org/x/net/idna/trieval.go index 63cb03b59..7a8cf889b 100644 --- a/vendor/golang.org/x/net/idna/trieval.go +++ b/vendor/golang.org/x/net/idna/trieval.go @@ -26,9 +26,9 @@ package idna // 15..3 index into xor or mapping table // } // } else { -// 15..13 unused -// 12 modifier (including virama) -// 11 virama modifier +// 15..14 unused +// 13 mayNeedNorm +// 12..11 attributes // 10..8 joining type // 7..3 category type // } @@ -49,15 +49,20 @@ const ( joinShift = 8 joinMask = 0x07 - viramaModifier = 0x0800 + // Attributes + attributesMask = 0x1800 + viramaModifier = 0x1800 modifier = 0x1000 + rtl = 0x0800 + + mayNeedNorm = 0x2000 ) // A category corresponds to a category defined in the IDNA mapping table. type category uint16 const ( - unknown category = 0 // not defined currently in unicode. + unknown category = 0 // not currently defined in unicode. mapped category = 1 disallowedSTD3Mapped category = 2 deviation category = 3 @@ -110,5 +115,5 @@ func (c info) isModifier() bool { } func (c info) isViramaModifier() bool { - return c&(viramaModifier|catSmallMask) == viramaModifier + return c&(attributesMask|catSmallMask) == viramaModifier } diff --git a/vendor/golang.org/x/net/internal/timeseries/timeseries.go b/vendor/golang.org/x/net/internal/timeseries/timeseries.go new file mode 100644 index 000000000..685f0e7ea --- /dev/null +++ b/vendor/golang.org/x/net/internal/timeseries/timeseries.go @@ -0,0 +1,525 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package timeseries implements a time series structure for stats collection. +package timeseries // import "golang.org/x/net/internal/timeseries" + +import ( + "fmt" + "log" + "time" +) + +const ( + timeSeriesNumBuckets = 64 + minuteHourSeriesNumBuckets = 60 +) + +var timeSeriesResolutions = []time.Duration{ + 1 * time.Second, + 10 * time.Second, + 1 * time.Minute, + 10 * time.Minute, + 1 * time.Hour, + 6 * time.Hour, + 24 * time.Hour, // 1 day + 7 * 24 * time.Hour, // 1 week + 4 * 7 * 24 * time.Hour, // 4 weeks + 16 * 7 * 24 * time.Hour, // 16 weeks +} + +var minuteHourSeriesResolutions = []time.Duration{ + 1 * time.Second, + 1 * time.Minute, +} + +// An Observable is a kind of data that can be aggregated in a time series. +type Observable interface { + Multiply(ratio float64) // Multiplies the data in self by a given ratio + Add(other Observable) // Adds the data from a different observation to self + Clear() // Clears the observation so it can be reused. + CopyFrom(other Observable) // Copies the contents of a given observation to self +} + +// Float attaches the methods of Observable to a float64. +type Float float64 + +// NewFloat returns a Float. +func NewFloat() Observable { + f := Float(0) + return &f +} + +// String returns the float as a string. +func (f *Float) String() string { return fmt.Sprintf("%g", f.Value()) } + +// Value returns the float's value. +func (f *Float) Value() float64 { return float64(*f) } + +func (f *Float) Multiply(ratio float64) { *f *= Float(ratio) } + +func (f *Float) Add(other Observable) { + o := other.(*Float) + *f += *o +} + +func (f *Float) Clear() { *f = 0 } + +func (f *Float) CopyFrom(other Observable) { + o := other.(*Float) + *f = *o +} + +// A Clock tells the current time. +type Clock interface { + Time() time.Time +} + +type defaultClock int + +var defaultClockInstance defaultClock + +func (defaultClock) Time() time.Time { return time.Now() } + +// Information kept per level. Each level consists of a circular list of +// observations. The start of the level may be derived from end and the +// len(buckets) * sizeInMillis. +type tsLevel struct { + oldest int // index to oldest bucketed Observable + newest int // index to newest bucketed Observable + end time.Time // end timestamp for this level + size time.Duration // duration of the bucketed Observable + buckets []Observable // collections of observations + provider func() Observable // used for creating new Observable +} + +func (l *tsLevel) Clear() { + l.oldest = 0 + l.newest = len(l.buckets) - 1 + l.end = time.Time{} + for i := range l.buckets { + if l.buckets[i] != nil { + l.buckets[i].Clear() + l.buckets[i] = nil + } + } +} + +func (l *tsLevel) InitLevel(size time.Duration, numBuckets int, f func() Observable) { + l.size = size + l.provider = f + l.buckets = make([]Observable, numBuckets) +} + +// Keeps a sequence of levels. Each level is responsible for storing data at +// a given resolution. For example, the first level stores data at a one +// minute resolution while the second level stores data at a one hour +// resolution. + +// Each level is represented by a sequence of buckets. Each bucket spans an +// interval equal to the resolution of the level. New observations are added +// to the last bucket. +type timeSeries struct { + provider func() Observable // make more Observable + numBuckets int // number of buckets in each level + levels []*tsLevel // levels of bucketed Observable + lastAdd time.Time // time of last Observable tracked + total Observable // convenient aggregation of all Observable + clock Clock // Clock for getting current time + pending Observable // observations not yet bucketed + pendingTime time.Time // what time are we keeping in pending + dirty bool // if there are pending observations +} + +// init initializes a level according to the supplied criteria. +func (ts *timeSeries) init(resolutions []time.Duration, f func() Observable, numBuckets int, clock Clock) { + ts.provider = f + ts.numBuckets = numBuckets + ts.clock = clock + ts.levels = make([]*tsLevel, len(resolutions)) + + for i := range resolutions { + if i > 0 && resolutions[i-1] >= resolutions[i] { + log.Print("timeseries: resolutions must be monotonically increasing") + break + } + newLevel := new(tsLevel) + newLevel.InitLevel(resolutions[i], ts.numBuckets, ts.provider) + ts.levels[i] = newLevel + } + + ts.Clear() +} + +// Clear removes all observations from the time series. +func (ts *timeSeries) Clear() { + ts.lastAdd = time.Time{} + ts.total = ts.resetObservation(ts.total) + ts.pending = ts.resetObservation(ts.pending) + ts.pendingTime = time.Time{} + ts.dirty = false + + for i := range ts.levels { + ts.levels[i].Clear() + } +} + +// Add records an observation at the current time. +func (ts *timeSeries) Add(observation Observable) { + ts.AddWithTime(observation, ts.clock.Time()) +} + +// AddWithTime records an observation at the specified time. +func (ts *timeSeries) AddWithTime(observation Observable, t time.Time) { + + smallBucketDuration := ts.levels[0].size + + if t.After(ts.lastAdd) { + ts.lastAdd = t + } + + if t.After(ts.pendingTime) { + ts.advance(t) + ts.mergePendingUpdates() + ts.pendingTime = ts.levels[0].end + ts.pending.CopyFrom(observation) + ts.dirty = true + } else if t.After(ts.pendingTime.Add(-1 * smallBucketDuration)) { + // The observation is close enough to go into the pending bucket. + // This compensates for clock skewing and small scheduling delays + // by letting the update stay in the fast path. + ts.pending.Add(observation) + ts.dirty = true + } else { + ts.mergeValue(observation, t) + } +} + +// mergeValue inserts the observation at the specified time in the past into all levels. +func (ts *timeSeries) mergeValue(observation Observable, t time.Time) { + for _, level := range ts.levels { + index := (ts.numBuckets - 1) - int(level.end.Sub(t)/level.size) + if 0 <= index && index < ts.numBuckets { + bucketNumber := (level.oldest + index) % ts.numBuckets + if level.buckets[bucketNumber] == nil { + level.buckets[bucketNumber] = level.provider() + } + level.buckets[bucketNumber].Add(observation) + } + } + ts.total.Add(observation) +} + +// mergePendingUpdates applies the pending updates into all levels. +func (ts *timeSeries) mergePendingUpdates() { + if ts.dirty { + ts.mergeValue(ts.pending, ts.pendingTime) + ts.pending = ts.resetObservation(ts.pending) + ts.dirty = false + } +} + +// advance cycles the buckets at each level until the latest bucket in +// each level can hold the time specified. +func (ts *timeSeries) advance(t time.Time) { + if !t.After(ts.levels[0].end) { + return + } + for i := 0; i < len(ts.levels); i++ { + level := ts.levels[i] + if !level.end.Before(t) { + break + } + + // If the time is sufficiently far, just clear the level and advance + // directly. + if !t.Before(level.end.Add(level.size * time.Duration(ts.numBuckets))) { + for _, b := range level.buckets { + ts.resetObservation(b) + } + level.end = time.Unix(0, (t.UnixNano()/level.size.Nanoseconds())*level.size.Nanoseconds()) + } + + for t.After(level.end) { + level.end = level.end.Add(level.size) + level.newest = level.oldest + level.oldest = (level.oldest + 1) % ts.numBuckets + ts.resetObservation(level.buckets[level.newest]) + } + + t = level.end + } +} + +// Latest returns the sum of the num latest buckets from the level. +func (ts *timeSeries) Latest(level, num int) Observable { + now := ts.clock.Time() + if ts.levels[0].end.Before(now) { + ts.advance(now) + } + + ts.mergePendingUpdates() + + result := ts.provider() + l := ts.levels[level] + index := l.newest + + for i := 0; i < num; i++ { + if l.buckets[index] != nil { + result.Add(l.buckets[index]) + } + if index == 0 { + index = ts.numBuckets + } + index-- + } + + return result +} + +// LatestBuckets returns a copy of the num latest buckets from level. +func (ts *timeSeries) LatestBuckets(level, num int) []Observable { + if level < 0 || level > len(ts.levels) { + log.Print("timeseries: bad level argument: ", level) + return nil + } + if num < 0 || num >= ts.numBuckets { + log.Print("timeseries: bad num argument: ", num) + return nil + } + + results := make([]Observable, num) + now := ts.clock.Time() + if ts.levels[0].end.Before(now) { + ts.advance(now) + } + + ts.mergePendingUpdates() + + l := ts.levels[level] + index := l.newest + + for i := 0; i < num; i++ { + result := ts.provider() + results[i] = result + if l.buckets[index] != nil { + result.CopyFrom(l.buckets[index]) + } + + if index == 0 { + index = ts.numBuckets + } + index -= 1 + } + return results +} + +// ScaleBy updates observations by scaling by factor. +func (ts *timeSeries) ScaleBy(factor float64) { + for _, l := range ts.levels { + for i := 0; i < ts.numBuckets; i++ { + l.buckets[i].Multiply(factor) + } + } + + ts.total.Multiply(factor) + ts.pending.Multiply(factor) +} + +// Range returns the sum of observations added over the specified time range. +// If start or finish times don't fall on bucket boundaries of the same +// level, then return values are approximate answers. +func (ts *timeSeries) Range(start, finish time.Time) Observable { + return ts.ComputeRange(start, finish, 1)[0] +} + +// Recent returns the sum of observations from the last delta. +func (ts *timeSeries) Recent(delta time.Duration) Observable { + now := ts.clock.Time() + return ts.Range(now.Add(-delta), now) +} + +// Total returns the total of all observations. +func (ts *timeSeries) Total() Observable { + ts.mergePendingUpdates() + return ts.total +} + +// ComputeRange computes a specified number of values into a slice using +// the observations recorded over the specified time period. The return +// values are approximate if the start or finish times don't fall on the +// bucket boundaries at the same level or if the number of buckets spanning +// the range is not an integral multiple of num. +func (ts *timeSeries) ComputeRange(start, finish time.Time, num int) []Observable { + if start.After(finish) { + log.Printf("timeseries: start > finish, %v>%v", start, finish) + return nil + } + + if num < 0 { + log.Printf("timeseries: num < 0, %v", num) + return nil + } + + results := make([]Observable, num) + + for _, l := range ts.levels { + if !start.Before(l.end.Add(-l.size * time.Duration(ts.numBuckets))) { + ts.extract(l, start, finish, num, results) + return results + } + } + + // Failed to find a level that covers the desired range. So just + // extract from the last level, even if it doesn't cover the entire + // desired range. + ts.extract(ts.levels[len(ts.levels)-1], start, finish, num, results) + + return results +} + +// RecentList returns the specified number of values in slice over the most +// recent time period of the specified range. +func (ts *timeSeries) RecentList(delta time.Duration, num int) []Observable { + if delta < 0 { + return nil + } + now := ts.clock.Time() + return ts.ComputeRange(now.Add(-delta), now, num) +} + +// extract returns a slice of specified number of observations from a given +// level over a given range. +func (ts *timeSeries) extract(l *tsLevel, start, finish time.Time, num int, results []Observable) { + ts.mergePendingUpdates() + + srcInterval := l.size + dstInterval := finish.Sub(start) / time.Duration(num) + dstStart := start + srcStart := l.end.Add(-srcInterval * time.Duration(ts.numBuckets)) + + srcIndex := 0 + + // Where should scanning start? + if dstStart.After(srcStart) { + advance := dstStart.Sub(srcStart) / srcInterval + srcIndex += int(advance) + srcStart = srcStart.Add(advance * srcInterval) + } + + // The i'th value is computed as show below. + // interval = (finish/start)/num + // i'th value = sum of observation in range + // [ start + i * interval, + // start + (i + 1) * interval ) + for i := 0; i < num; i++ { + results[i] = ts.resetObservation(results[i]) + dstEnd := dstStart.Add(dstInterval) + for srcIndex < ts.numBuckets && srcStart.Before(dstEnd) { + srcEnd := srcStart.Add(srcInterval) + if srcEnd.After(ts.lastAdd) { + srcEnd = ts.lastAdd + } + + if !srcEnd.Before(dstStart) { + srcValue := l.buckets[(srcIndex+l.oldest)%ts.numBuckets] + if !srcStart.Before(dstStart) && !srcEnd.After(dstEnd) { + // dst completely contains src. + if srcValue != nil { + results[i].Add(srcValue) + } + } else { + // dst partially overlaps src. + overlapStart := maxTime(srcStart, dstStart) + overlapEnd := minTime(srcEnd, dstEnd) + base := srcEnd.Sub(srcStart) + fraction := overlapEnd.Sub(overlapStart).Seconds() / base.Seconds() + + used := ts.provider() + if srcValue != nil { + used.CopyFrom(srcValue) + } + used.Multiply(fraction) + results[i].Add(used) + } + + if srcEnd.After(dstEnd) { + break + } + } + srcIndex++ + srcStart = srcStart.Add(srcInterval) + } + dstStart = dstStart.Add(dstInterval) + } +} + +// resetObservation clears the content so the struct may be reused. +func (ts *timeSeries) resetObservation(observation Observable) Observable { + if observation == nil { + observation = ts.provider() + } else { + observation.Clear() + } + return observation +} + +// TimeSeries tracks data at granularities from 1 second to 16 weeks. +type TimeSeries struct { + timeSeries +} + +// NewTimeSeries creates a new TimeSeries using the function provided for creating new Observable. +func NewTimeSeries(f func() Observable) *TimeSeries { + return NewTimeSeriesWithClock(f, defaultClockInstance) +} + +// NewTimeSeriesWithClock creates a new TimeSeries using the function provided for creating new Observable and the clock for +// assigning timestamps. +func NewTimeSeriesWithClock(f func() Observable, clock Clock) *TimeSeries { + ts := new(TimeSeries) + ts.timeSeries.init(timeSeriesResolutions, f, timeSeriesNumBuckets, clock) + return ts +} + +// MinuteHourSeries tracks data at granularities of 1 minute and 1 hour. +type MinuteHourSeries struct { + timeSeries +} + +// NewMinuteHourSeries creates a new MinuteHourSeries using the function provided for creating new Observable. +func NewMinuteHourSeries(f func() Observable) *MinuteHourSeries { + return NewMinuteHourSeriesWithClock(f, defaultClockInstance) +} + +// NewMinuteHourSeriesWithClock creates a new MinuteHourSeries using the function provided for creating new Observable and the clock for +// assigning timestamps. +func NewMinuteHourSeriesWithClock(f func() Observable, clock Clock) *MinuteHourSeries { + ts := new(MinuteHourSeries) + ts.timeSeries.init(minuteHourSeriesResolutions, f, + minuteHourSeriesNumBuckets, clock) + return ts +} + +func (ts *MinuteHourSeries) Minute() Observable { + return ts.timeSeries.Latest(0, 60) +} + +func (ts *MinuteHourSeries) Hour() Observable { + return ts.timeSeries.Latest(1, 60) +} + +func minTime(a, b time.Time) time.Time { + if a.Before(b) { + return a + } + return b +} + +func maxTime(a, b time.Time) time.Time { + if a.After(b) { + return a + } + return b +} diff --git a/vendor/golang.org/x/net/trace/events.go b/vendor/golang.org/x/net/trace/events.go new file mode 100644 index 000000000..c646a6952 --- /dev/null +++ b/vendor/golang.org/x/net/trace/events.go @@ -0,0 +1,532 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +import ( + "bytes" + "fmt" + "html/template" + "io" + "log" + "net/http" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "text/tabwriter" + "time" +) + +const maxEventsPerLog = 100 + +type bucket struct { + MaxErrAge time.Duration + String string +} + +var buckets = []bucket{ + {0, "total"}, + {10 * time.Second, "errs<10s"}, + {1 * time.Minute, "errs<1m"}, + {10 * time.Minute, "errs<10m"}, + {1 * time.Hour, "errs<1h"}, + {10 * time.Hour, "errs<10h"}, + {24000 * time.Hour, "errors"}, +} + +// RenderEvents renders the HTML page typically served at /debug/events. +// It does not do any auth checking. The request may be nil. +// +// Most users will use the Events handler. +func RenderEvents(w http.ResponseWriter, req *http.Request, sensitive bool) { + now := time.Now() + data := &struct { + Families []string // family names + Buckets []bucket + Counts [][]int // eventLog count per family/bucket + + // Set when a bucket has been selected. + Family string + Bucket int + EventLogs eventLogs + Expanded bool + }{ + Buckets: buckets, + } + + data.Families = make([]string, 0, len(families)) + famMu.RLock() + for name := range families { + data.Families = append(data.Families, name) + } + famMu.RUnlock() + sort.Strings(data.Families) + + // Count the number of eventLogs in each family for each error age. + data.Counts = make([][]int, len(data.Families)) + for i, name := range data.Families { + // TODO(sameer): move this loop under the family lock. + f := getEventFamily(name) + data.Counts[i] = make([]int, len(data.Buckets)) + for j, b := range data.Buckets { + data.Counts[i][j] = f.Count(now, b.MaxErrAge) + } + } + + if req != nil { + var ok bool + data.Family, data.Bucket, ok = parseEventsArgs(req) + if !ok { + // No-op + } else { + data.EventLogs = getEventFamily(data.Family).Copy(now, buckets[data.Bucket].MaxErrAge) + } + if data.EventLogs != nil { + defer data.EventLogs.Free() + sort.Sort(data.EventLogs) + } + if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { + data.Expanded = exp + } + } + + famMu.RLock() + defer famMu.RUnlock() + if err := eventsTmpl().Execute(w, data); err != nil { + log.Printf("net/trace: Failed executing template: %v", err) + } +} + +func parseEventsArgs(req *http.Request) (fam string, b int, ok bool) { + fam, bStr := req.FormValue("fam"), req.FormValue("b") + if fam == "" || bStr == "" { + return "", 0, false + } + b, err := strconv.Atoi(bStr) + if err != nil || b < 0 || b >= len(buckets) { + return "", 0, false + } + return fam, b, true +} + +// An EventLog provides a log of events associated with a specific object. +type EventLog interface { + // Printf formats its arguments with fmt.Sprintf and adds the + // result to the event log. + Printf(format string, a ...interface{}) + + // Errorf is like Printf, but it marks this event as an error. + Errorf(format string, a ...interface{}) + + // Finish declares that this event log is complete. + // The event log should not be used after calling this method. + Finish() +} + +// NewEventLog returns a new EventLog with the specified family name +// and title. +func NewEventLog(family, title string) EventLog { + el := newEventLog() + el.ref() + el.Family, el.Title = family, title + el.Start = time.Now() + el.events = make([]logEntry, 0, maxEventsPerLog) + el.stack = make([]uintptr, 32) + n := runtime.Callers(2, el.stack) + el.stack = el.stack[:n] + + getEventFamily(family).add(el) + return el +} + +func (el *eventLog) Finish() { + getEventFamily(el.Family).remove(el) + el.unref() // matches ref in New +} + +var ( + famMu sync.RWMutex + families = make(map[string]*eventFamily) // family name => family +) + +func getEventFamily(fam string) *eventFamily { + famMu.Lock() + defer famMu.Unlock() + f := families[fam] + if f == nil { + f = &eventFamily{} + families[fam] = f + } + return f +} + +type eventFamily struct { + mu sync.RWMutex + eventLogs eventLogs +} + +func (f *eventFamily) add(el *eventLog) { + f.mu.Lock() + f.eventLogs = append(f.eventLogs, el) + f.mu.Unlock() +} + +func (f *eventFamily) remove(el *eventLog) { + f.mu.Lock() + defer f.mu.Unlock() + for i, el0 := range f.eventLogs { + if el == el0 { + copy(f.eventLogs[i:], f.eventLogs[i+1:]) + f.eventLogs = f.eventLogs[:len(f.eventLogs)-1] + return + } + } +} + +func (f *eventFamily) Count(now time.Time, maxErrAge time.Duration) (n int) { + f.mu.RLock() + defer f.mu.RUnlock() + for _, el := range f.eventLogs { + if el.hasRecentError(now, maxErrAge) { + n++ + } + } + return +} + +func (f *eventFamily) Copy(now time.Time, maxErrAge time.Duration) (els eventLogs) { + f.mu.RLock() + defer f.mu.RUnlock() + els = make(eventLogs, 0, len(f.eventLogs)) + for _, el := range f.eventLogs { + if el.hasRecentError(now, maxErrAge) { + el.ref() + els = append(els, el) + } + } + return +} + +type eventLogs []*eventLog + +// Free calls unref on each element of the list. +func (els eventLogs) Free() { + for _, el := range els { + el.unref() + } +} + +// eventLogs may be sorted in reverse chronological order. +func (els eventLogs) Len() int { return len(els) } +func (els eventLogs) Less(i, j int) bool { return els[i].Start.After(els[j].Start) } +func (els eventLogs) Swap(i, j int) { els[i], els[j] = els[j], els[i] } + +// A logEntry is a timestamped log entry in an event log. +type logEntry struct { + When time.Time + Elapsed time.Duration // since previous event in log + NewDay bool // whether this event is on a different day to the previous event + What string + IsErr bool +} + +// WhenString returns a string representation of the elapsed time of the event. +// It will include the date if midnight was crossed. +func (e logEntry) WhenString() string { + if e.NewDay { + return e.When.Format("2006/01/02 15:04:05.000000") + } + return e.When.Format("15:04:05.000000") +} + +// An eventLog represents an active event log. +type eventLog struct { + // Family is the top-level grouping of event logs to which this belongs. + Family string + + // Title is the title of this event log. + Title string + + // Timing information. + Start time.Time + + // Call stack where this event log was created. + stack []uintptr + + // Append-only sequence of events. + // + // TODO(sameer): change this to a ring buffer to avoid the array copy + // when we hit maxEventsPerLog. + mu sync.RWMutex + events []logEntry + LastErrorTime time.Time + discarded int + + refs int32 // how many buckets this is in +} + +func (el *eventLog) reset() { + // Clear all but the mutex. Mutexes may not be copied, even when unlocked. + el.Family = "" + el.Title = "" + el.Start = time.Time{} + el.stack = nil + el.events = nil + el.LastErrorTime = time.Time{} + el.discarded = 0 + el.refs = 0 +} + +func (el *eventLog) hasRecentError(now time.Time, maxErrAge time.Duration) bool { + if maxErrAge == 0 { + return true + } + el.mu.RLock() + defer el.mu.RUnlock() + return now.Sub(el.LastErrorTime) < maxErrAge +} + +// delta returns the elapsed time since the last event or the log start, +// and whether it spans midnight. +// L >= el.mu +func (el *eventLog) delta(t time.Time) (time.Duration, bool) { + if len(el.events) == 0 { + return t.Sub(el.Start), false + } + prev := el.events[len(el.events)-1].When + return t.Sub(prev), prev.Day() != t.Day() + +} + +func (el *eventLog) Printf(format string, a ...interface{}) { + el.printf(false, format, a...) +} + +func (el *eventLog) Errorf(format string, a ...interface{}) { + el.printf(true, format, a...) +} + +func (el *eventLog) printf(isErr bool, format string, a ...interface{}) { + e := logEntry{When: time.Now(), IsErr: isErr, What: fmt.Sprintf(format, a...)} + el.mu.Lock() + e.Elapsed, e.NewDay = el.delta(e.When) + if len(el.events) < maxEventsPerLog { + el.events = append(el.events, e) + } else { + // Discard the oldest event. + if el.discarded == 0 { + // el.discarded starts at two to count for the event it + // is replacing, plus the next one that we are about to + // drop. + el.discarded = 2 + } else { + el.discarded++ + } + // TODO(sameer): if this causes allocations on a critical path, + // change eventLog.What to be a fmt.Stringer, as in trace.go. + el.events[0].What = fmt.Sprintf("(%d events discarded)", el.discarded) + // The timestamp of the discarded meta-event should be + // the time of the last event it is representing. + el.events[0].When = el.events[1].When + copy(el.events[1:], el.events[2:]) + el.events[maxEventsPerLog-1] = e + } + if e.IsErr { + el.LastErrorTime = e.When + } + el.mu.Unlock() +} + +func (el *eventLog) ref() { + atomic.AddInt32(&el.refs, 1) +} + +func (el *eventLog) unref() { + if atomic.AddInt32(&el.refs, -1) == 0 { + freeEventLog(el) + } +} + +func (el *eventLog) When() string { + return el.Start.Format("2006/01/02 15:04:05.000000") +} + +func (el *eventLog) ElapsedTime() string { + elapsed := time.Since(el.Start) + return fmt.Sprintf("%.6f", elapsed.Seconds()) +} + +func (el *eventLog) Stack() string { + buf := new(bytes.Buffer) + tw := tabwriter.NewWriter(buf, 1, 8, 1, '\t', 0) + printStackRecord(tw, el.stack) + tw.Flush() + return buf.String() +} + +// printStackRecord prints the function + source line information +// for a single stack trace. +// Adapted from runtime/pprof/pprof.go. +func printStackRecord(w io.Writer, stk []uintptr) { + for _, pc := range stk { + f := runtime.FuncForPC(pc) + if f == nil { + continue + } + file, line := f.FileLine(pc) + name := f.Name() + // Hide runtime.goexit and any runtime functions at the beginning. + if strings.HasPrefix(name, "runtime.") { + continue + } + fmt.Fprintf(w, "# %s\t%s:%d\n", name, file, line) + } +} + +func (el *eventLog) Events() []logEntry { + el.mu.RLock() + defer el.mu.RUnlock() + return el.events +} + +// freeEventLogs is a freelist of *eventLog +var freeEventLogs = make(chan *eventLog, 1000) + +// newEventLog returns a event log ready to use. +func newEventLog() *eventLog { + select { + case el := <-freeEventLogs: + return el + default: + return new(eventLog) + } +} + +// freeEventLog adds el to freeEventLogs if there's room. +// This is non-blocking. +func freeEventLog(el *eventLog) { + el.reset() + select { + case freeEventLogs <- el: + default: + } +} + +var eventsTmplCache *template.Template +var eventsTmplOnce sync.Once + +func eventsTmpl() *template.Template { + eventsTmplOnce.Do(func() { + eventsTmplCache = template.Must(template.New("events").Funcs(template.FuncMap{ + "elapsed": elapsed, + "trimSpace": strings.TrimSpace, + }).Parse(eventsHTML)) + }) + return eventsTmplCache +} + +const eventsHTML = ` + + + events + + + + +

/debug/events

+ + + {{range $i, $fam := .Families}} + + + + {{range $j, $bucket := $.Buckets}} + {{$n := index $.Counts $i $j}} + + {{end}} + + {{end}} +
{{$fam}} + {{if $n}}{{end}} + [{{$n}} {{$bucket.String}}] + {{if $n}}{{end}} +
+ +{{if $.EventLogs}} +
+

Family: {{$.Family}}

+ +{{if $.Expanded}}{{end}} +[Summary]{{if $.Expanded}}{{end}} + +{{if not $.Expanded}}{{end}} +[Expanded]{{if not $.Expanded}}{{end}} + + + + {{range $el := $.EventLogs}} + + + + + {{if $.Expanded}} + + + + + + {{range $el.Events}} + + + + + + {{end}} + {{end}} + {{end}} +
WhenElapsed
{{$el.When}}{{$el.ElapsedTime}}{{$el.Title}} +
{{$el.Stack|trimSpace}}
{{.WhenString}}{{elapsed .Elapsed}}.{{if .IsErr}}E{{else}}.{{end}}. {{.What}}
+{{end}} + + +` diff --git a/vendor/golang.org/x/net/trace/histogram.go b/vendor/golang.org/x/net/trace/histogram.go new file mode 100644 index 000000000..9bf4286c7 --- /dev/null +++ b/vendor/golang.org/x/net/trace/histogram.go @@ -0,0 +1,365 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +// This file implements histogramming for RPC statistics collection. + +import ( + "bytes" + "fmt" + "html/template" + "log" + "math" + "sync" + + "golang.org/x/net/internal/timeseries" +) + +const ( + bucketCount = 38 +) + +// histogram keeps counts of values in buckets that are spaced +// out in powers of 2: 0-1, 2-3, 4-7... +// histogram implements timeseries.Observable +type histogram struct { + sum int64 // running total of measurements + sumOfSquares float64 // square of running total + buckets []int64 // bucketed values for histogram + value int // holds a single value as an optimization + valueCount int64 // number of values recorded for single value +} + +// AddMeasurement records a value measurement observation to the histogram. +func (h *histogram) addMeasurement(value int64) { + // TODO: assert invariant + h.sum += value + h.sumOfSquares += float64(value) * float64(value) + + bucketIndex := getBucket(value) + + if h.valueCount == 0 || (h.valueCount > 0 && h.value == bucketIndex) { + h.value = bucketIndex + h.valueCount++ + } else { + h.allocateBuckets() + h.buckets[bucketIndex]++ + } +} + +func (h *histogram) allocateBuckets() { + if h.buckets == nil { + h.buckets = make([]int64, bucketCount) + h.buckets[h.value] = h.valueCount + h.value = 0 + h.valueCount = -1 + } +} + +func log2(i int64) int { + n := 0 + for ; i >= 0x100; i >>= 8 { + n += 8 + } + for ; i > 0; i >>= 1 { + n += 1 + } + return n +} + +func getBucket(i int64) (index int) { + index = log2(i) - 1 + if index < 0 { + index = 0 + } + if index >= bucketCount { + index = bucketCount - 1 + } + return +} + +// Total returns the number of recorded observations. +func (h *histogram) total() (total int64) { + if h.valueCount >= 0 { + total = h.valueCount + } + for _, val := range h.buckets { + total += int64(val) + } + return +} + +// Average returns the average value of recorded observations. +func (h *histogram) average() float64 { + t := h.total() + if t == 0 { + return 0 + } + return float64(h.sum) / float64(t) +} + +// Variance returns the variance of recorded observations. +func (h *histogram) variance() float64 { + t := float64(h.total()) + if t == 0 { + return 0 + } + s := float64(h.sum) / t + return h.sumOfSquares/t - s*s +} + +// StandardDeviation returns the standard deviation of recorded observations. +func (h *histogram) standardDeviation() float64 { + return math.Sqrt(h.variance()) +} + +// PercentileBoundary estimates the value that the given fraction of recorded +// observations are less than. +func (h *histogram) percentileBoundary(percentile float64) int64 { + total := h.total() + + // Corner cases (make sure result is strictly less than Total()) + if total == 0 { + return 0 + } else if total == 1 { + return int64(h.average()) + } + + percentOfTotal := round(float64(total) * percentile) + var runningTotal int64 + + for i := range h.buckets { + value := h.buckets[i] + runningTotal += value + if runningTotal == percentOfTotal { + // We hit an exact bucket boundary. If the next bucket has data, it is a + // good estimate of the value. If the bucket is empty, we interpolate the + // midpoint between the next bucket's boundary and the next non-zero + // bucket. If the remaining buckets are all empty, then we use the + // boundary for the next bucket as the estimate. + j := uint8(i + 1) + min := bucketBoundary(j) + if runningTotal < total { + for h.buckets[j] == 0 { + j++ + } + } + max := bucketBoundary(j) + return min + round(float64(max-min)/2) + } else if runningTotal > percentOfTotal { + // The value is in this bucket. Interpolate the value. + delta := runningTotal - percentOfTotal + percentBucket := float64(value-delta) / float64(value) + bucketMin := bucketBoundary(uint8(i)) + nextBucketMin := bucketBoundary(uint8(i + 1)) + bucketSize := nextBucketMin - bucketMin + return bucketMin + round(percentBucket*float64(bucketSize)) + } + } + return bucketBoundary(bucketCount - 1) +} + +// Median returns the estimated median of the observed values. +func (h *histogram) median() int64 { + return h.percentileBoundary(0.5) +} + +// Add adds other to h. +func (h *histogram) Add(other timeseries.Observable) { + o := other.(*histogram) + if o.valueCount == 0 { + // Other histogram is empty + } else if h.valueCount >= 0 && o.valueCount > 0 && h.value == o.value { + // Both have a single bucketed value, aggregate them + h.valueCount += o.valueCount + } else { + // Two different values necessitate buckets in this histogram + h.allocateBuckets() + if o.valueCount >= 0 { + h.buckets[o.value] += o.valueCount + } else { + for i := range h.buckets { + h.buckets[i] += o.buckets[i] + } + } + } + h.sumOfSquares += o.sumOfSquares + h.sum += o.sum +} + +// Clear resets the histogram to an empty state, removing all observed values. +func (h *histogram) Clear() { + h.buckets = nil + h.value = 0 + h.valueCount = 0 + h.sum = 0 + h.sumOfSquares = 0 +} + +// CopyFrom copies from other, which must be a *histogram, into h. +func (h *histogram) CopyFrom(other timeseries.Observable) { + o := other.(*histogram) + if o.valueCount == -1 { + h.allocateBuckets() + copy(h.buckets, o.buckets) + } + h.sum = o.sum + h.sumOfSquares = o.sumOfSquares + h.value = o.value + h.valueCount = o.valueCount +} + +// Multiply scales the histogram by the specified ratio. +func (h *histogram) Multiply(ratio float64) { + if h.valueCount == -1 { + for i := range h.buckets { + h.buckets[i] = int64(float64(h.buckets[i]) * ratio) + } + } else { + h.valueCount = int64(float64(h.valueCount) * ratio) + } + h.sum = int64(float64(h.sum) * ratio) + h.sumOfSquares = h.sumOfSquares * ratio +} + +// New creates a new histogram. +func (h *histogram) New() timeseries.Observable { + r := new(histogram) + r.Clear() + return r +} + +func (h *histogram) String() string { + return fmt.Sprintf("%d, %f, %d, %d, %v", + h.sum, h.sumOfSquares, h.value, h.valueCount, h.buckets) +} + +// round returns the closest int64 to the argument +func round(in float64) int64 { + return int64(math.Floor(in + 0.5)) +} + +// bucketBoundary returns the first value in the bucket. +func bucketBoundary(bucket uint8) int64 { + if bucket == 0 { + return 0 + } + return 1 << bucket +} + +// bucketData holds data about a specific bucket for use in distTmpl. +type bucketData struct { + Lower, Upper int64 + N int64 + Pct, CumulativePct float64 + GraphWidth int +} + +// data holds data about a Distribution for use in distTmpl. +type data struct { + Buckets []*bucketData + Count, Median int64 + Mean, StandardDeviation float64 +} + +// maxHTMLBarWidth is the maximum width of the HTML bar for visualizing buckets. +const maxHTMLBarWidth = 350.0 + +// newData returns data representing h for use in distTmpl. +func (h *histogram) newData() *data { + // Force the allocation of buckets to simplify the rendering implementation + h.allocateBuckets() + // We scale the bars on the right so that the largest bar is + // maxHTMLBarWidth pixels in width. + maxBucket := int64(0) + for _, n := range h.buckets { + if n > maxBucket { + maxBucket = n + } + } + total := h.total() + barsizeMult := maxHTMLBarWidth / float64(maxBucket) + var pctMult float64 + if total == 0 { + pctMult = 1.0 + } else { + pctMult = 100.0 / float64(total) + } + + buckets := make([]*bucketData, len(h.buckets)) + runningTotal := int64(0) + for i, n := range h.buckets { + if n == 0 { + continue + } + runningTotal += n + var upperBound int64 + if i < bucketCount-1 { + upperBound = bucketBoundary(uint8(i + 1)) + } else { + upperBound = math.MaxInt64 + } + buckets[i] = &bucketData{ + Lower: bucketBoundary(uint8(i)), + Upper: upperBound, + N: n, + Pct: float64(n) * pctMult, + CumulativePct: float64(runningTotal) * pctMult, + GraphWidth: int(float64(n) * barsizeMult), + } + } + return &data{ + Buckets: buckets, + Count: total, + Median: h.median(), + Mean: h.average(), + StandardDeviation: h.standardDeviation(), + } +} + +func (h *histogram) html() template.HTML { + buf := new(bytes.Buffer) + if err := distTmpl().Execute(buf, h.newData()); err != nil { + buf.Reset() + log.Printf("net/trace: couldn't execute template: %v", err) + } + return template.HTML(buf.String()) +} + +var distTmplCache *template.Template +var distTmplOnce sync.Once + +func distTmpl() *template.Template { + distTmplOnce.Do(func() { + // Input: data + distTmplCache = template.Must(template.New("distTmpl").Parse(` + + + + + + + +
Count: {{.Count}}Mean: {{printf "%.0f" .Mean}}StdDev: {{printf "%.0f" .StandardDeviation}}Median: {{.Median}}
+
+ +{{range $b := .Buckets}} +{{if $b}} + + + + + + + + + +{{end}} +{{end}} +
[{{.Lower}},{{.Upper}}){{.N}}{{printf "%#.3f" .Pct}}%{{printf "%#.3f" .CumulativePct}}%
+`)) + }) + return distTmplCache +} diff --git a/vendor/golang.org/x/net/trace/trace.go b/vendor/golang.org/x/net/trace/trace.go new file mode 100644 index 000000000..f00d869ff --- /dev/null +++ b/vendor/golang.org/x/net/trace/trace.go @@ -0,0 +1,1111 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package trace implements tracing of requests and long-lived objects. +It exports HTTP interfaces on /debug/requests and /debug/events. + +A trace.Trace provides tracing for short-lived objects, usually requests. +A request handler might be implemented like this: + + func fooHandler(w http.ResponseWriter, req *http.Request) { + tr := trace.New("mypkg.Foo", req.URL.Path) + defer tr.Finish() + ... + tr.LazyPrintf("some event %q happened", str) + ... + if err := somethingImportant(); err != nil { + tr.LazyPrintf("somethingImportant failed: %v", err) + tr.SetError() + } + } + +The /debug/requests HTTP endpoint organizes the traces by family, +errors, and duration. It also provides histogram of request duration +for each family. + +A trace.EventLog provides tracing for long-lived objects, such as RPC +connections. + + // A Fetcher fetches URL paths for a single domain. + type Fetcher struct { + domain string + events trace.EventLog + } + + func NewFetcher(domain string) *Fetcher { + return &Fetcher{ + domain, + trace.NewEventLog("mypkg.Fetcher", domain), + } + } + + func (f *Fetcher) Fetch(path string) (string, error) { + resp, err := http.Get("http://" + f.domain + "/" + path) + if err != nil { + f.events.Errorf("Get(%q) = %v", path, err) + return "", err + } + f.events.Printf("Get(%q) = %s", path, resp.Status) + ... + } + + func (f *Fetcher) Close() error { + f.events.Finish() + return nil + } + +The /debug/events HTTP endpoint organizes the event logs by family and +by time since the last error. The expanded view displays recent log +entries and the log's call stack. +*/ +package trace // import "golang.org/x/net/trace" + +import ( + "bytes" + "fmt" + "html/template" + "io" + "log" + "net" + "net/http" + "net/url" + "runtime" + "sort" + "strconv" + "sync" + "sync/atomic" + "time" + + "golang.org/x/net/internal/timeseries" +) + +// DebugUseAfterFinish controls whether to debug uses of Trace values after finishing. +// FOR DEBUGGING ONLY. This will slow down the program. +var DebugUseAfterFinish = false + +// AuthRequest determines whether a specific request is permitted to load the +// /debug/requests or /debug/events pages. +// +// It returns two bools; the first indicates whether the page may be viewed at all, +// and the second indicates whether sensitive events will be shown. +// +// AuthRequest may be replaced by a program to customize its authorization requirements. +// +// The default AuthRequest function returns (true, true) if and only if the request +// comes from localhost/127.0.0.1/[::1]. +var AuthRequest = func(req *http.Request) (any, sensitive bool) { + // RemoteAddr is commonly in the form "IP" or "IP:port". + // If it is in the form "IP:port", split off the port. + host, _, err := net.SplitHostPort(req.RemoteAddr) + if err != nil { + host = req.RemoteAddr + } + switch host { + case "localhost", "127.0.0.1", "::1": + return true, true + default: + return false, false + } +} + +func init() { + _, pat := http.DefaultServeMux.Handler(&http.Request{URL: &url.URL{Path: "/debug/requests"}}) + if pat != "" { + panic("/debug/requests is already registered. You may have two independent copies of " + + "golang.org/x/net/trace in your binary, trying to maintain separate state. This may " + + "involve a vendored copy of golang.org/x/net/trace.") + } + + // TODO(jbd): Serve Traces from /debug/traces in the future? + // There is no requirement for a request to be present to have traces. + http.HandleFunc("/debug/requests", Traces) + http.HandleFunc("/debug/events", Events) +} + +// Traces responds with traces from the program. +// The package initialization registers it in http.DefaultServeMux +// at /debug/requests. +// +// It performs authorization by running AuthRequest. +func Traces(w http.ResponseWriter, req *http.Request) { + any, sensitive := AuthRequest(req) + if !any { + http.Error(w, "not allowed", http.StatusUnauthorized) + return + } + w.Header().Set("Content-Type", "text/html; charset=utf-8") + Render(w, req, sensitive) +} + +// Events responds with a page of events collected by EventLogs. +// The package initialization registers it in http.DefaultServeMux +// at /debug/events. +// +// It performs authorization by running AuthRequest. +func Events(w http.ResponseWriter, req *http.Request) { + any, sensitive := AuthRequest(req) + if !any { + http.Error(w, "not allowed", http.StatusUnauthorized) + return + } + w.Header().Set("Content-Type", "text/html; charset=utf-8") + RenderEvents(w, req, sensitive) +} + +// Render renders the HTML page typically served at /debug/requests. +// It does not do any auth checking. The request may be nil. +// +// Most users will use the Traces handler. +func Render(w io.Writer, req *http.Request, sensitive bool) { + data := &struct { + Families []string + ActiveTraceCount map[string]int + CompletedTraces map[string]*family + + // Set when a bucket has been selected. + Traces traceList + Family string + Bucket int + Expanded bool + Traced bool + Active bool + ShowSensitive bool // whether to show sensitive events + + Histogram template.HTML + HistogramWindow string // e.g. "last minute", "last hour", "all time" + + // If non-zero, the set of traces is a partial set, + // and this is the total number. + Total int + }{ + CompletedTraces: completedTraces, + } + + data.ShowSensitive = sensitive + if req != nil { + // Allow show_sensitive=0 to force hiding of sensitive data for testing. + // This only goes one way; you can't use show_sensitive=1 to see things. + if req.FormValue("show_sensitive") == "0" { + data.ShowSensitive = false + } + + if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { + data.Expanded = exp + } + if exp, err := strconv.ParseBool(req.FormValue("rtraced")); err == nil { + data.Traced = exp + } + } + + completedMu.RLock() + data.Families = make([]string, 0, len(completedTraces)) + for fam := range completedTraces { + data.Families = append(data.Families, fam) + } + completedMu.RUnlock() + sort.Strings(data.Families) + + // We are careful here to minimize the time spent locking activeMu, + // since that lock is required every time an RPC starts and finishes. + data.ActiveTraceCount = make(map[string]int, len(data.Families)) + activeMu.RLock() + for fam, s := range activeTraces { + data.ActiveTraceCount[fam] = s.Len() + } + activeMu.RUnlock() + + var ok bool + data.Family, data.Bucket, ok = parseArgs(req) + switch { + case !ok: + // No-op + case data.Bucket == -1: + data.Active = true + n := data.ActiveTraceCount[data.Family] + data.Traces = getActiveTraces(data.Family) + if len(data.Traces) < n { + data.Total = n + } + case data.Bucket < bucketsPerFamily: + if b := lookupBucket(data.Family, data.Bucket); b != nil { + data.Traces = b.Copy(data.Traced) + } + default: + if f := getFamily(data.Family, false); f != nil { + var obs timeseries.Observable + f.LatencyMu.RLock() + switch o := data.Bucket - bucketsPerFamily; o { + case 0: + obs = f.Latency.Minute() + data.HistogramWindow = "last minute" + case 1: + obs = f.Latency.Hour() + data.HistogramWindow = "last hour" + case 2: + obs = f.Latency.Total() + data.HistogramWindow = "all time" + } + f.LatencyMu.RUnlock() + if obs != nil { + data.Histogram = obs.(*histogram).html() + } + } + } + + if data.Traces != nil { + defer data.Traces.Free() + sort.Sort(data.Traces) + } + + completedMu.RLock() + defer completedMu.RUnlock() + if err := pageTmpl().ExecuteTemplate(w, "Page", data); err != nil { + log.Printf("net/trace: Failed executing template: %v", err) + } +} + +func parseArgs(req *http.Request) (fam string, b int, ok bool) { + if req == nil { + return "", 0, false + } + fam, bStr := req.FormValue("fam"), req.FormValue("b") + if fam == "" || bStr == "" { + return "", 0, false + } + b, err := strconv.Atoi(bStr) + if err != nil || b < -1 { + return "", 0, false + } + + return fam, b, true +} + +func lookupBucket(fam string, b int) *traceBucket { + f := getFamily(fam, false) + if f == nil || b < 0 || b >= len(f.Buckets) { + return nil + } + return f.Buckets[b] +} + +type contextKeyT string + +var contextKey = contextKeyT("golang.org/x/net/trace.Trace") + +// Trace represents an active request. +type Trace interface { + // LazyLog adds x to the event log. It will be evaluated each time the + // /debug/requests page is rendered. Any memory referenced by x will be + // pinned until the trace is finished and later discarded. + LazyLog(x fmt.Stringer, sensitive bool) + + // LazyPrintf evaluates its arguments with fmt.Sprintf each time the + // /debug/requests page is rendered. Any memory referenced by a will be + // pinned until the trace is finished and later discarded. + LazyPrintf(format string, a ...interface{}) + + // SetError declares that this trace resulted in an error. + SetError() + + // SetRecycler sets a recycler for the trace. + // f will be called for each event passed to LazyLog at a time when + // it is no longer required, whether while the trace is still active + // and the event is discarded, or when a completed trace is discarded. + SetRecycler(f func(interface{})) + + // SetTraceInfo sets the trace info for the trace. + // This is currently unused. + SetTraceInfo(traceID, spanID uint64) + + // SetMaxEvents sets the maximum number of events that will be stored + // in the trace. This has no effect if any events have already been + // added to the trace. + SetMaxEvents(m int) + + // Finish declares that this trace is complete. + // The trace should not be used after calling this method. + Finish() +} + +type lazySprintf struct { + format string + a []interface{} +} + +func (l *lazySprintf) String() string { + return fmt.Sprintf(l.format, l.a...) +} + +// New returns a new Trace with the specified family and title. +func New(family, title string) Trace { + tr := newTrace() + tr.ref() + tr.Family, tr.Title = family, title + tr.Start = time.Now() + tr.maxEvents = maxEventsPerTrace + tr.events = tr.eventsBuf[:0] + + activeMu.RLock() + s := activeTraces[tr.Family] + activeMu.RUnlock() + if s == nil { + activeMu.Lock() + s = activeTraces[tr.Family] // check again + if s == nil { + s = new(traceSet) + activeTraces[tr.Family] = s + } + activeMu.Unlock() + } + s.Add(tr) + + // Trigger allocation of the completed trace structure for this family. + // This will cause the family to be present in the request page during + // the first trace of this family. We don't care about the return value, + // nor is there any need for this to run inline, so we execute it in its + // own goroutine, but only if the family isn't allocated yet. + completedMu.RLock() + if _, ok := completedTraces[tr.Family]; !ok { + go allocFamily(tr.Family) + } + completedMu.RUnlock() + + return tr +} + +func (tr *trace) Finish() { + elapsed := time.Now().Sub(tr.Start) + tr.mu.Lock() + tr.Elapsed = elapsed + tr.mu.Unlock() + + if DebugUseAfterFinish { + buf := make([]byte, 4<<10) // 4 KB should be enough + n := runtime.Stack(buf, false) + tr.finishStack = buf[:n] + } + + activeMu.RLock() + m := activeTraces[tr.Family] + activeMu.RUnlock() + m.Remove(tr) + + f := getFamily(tr.Family, true) + tr.mu.RLock() // protects tr fields in Cond.match calls + for _, b := range f.Buckets { + if b.Cond.match(tr) { + b.Add(tr) + } + } + tr.mu.RUnlock() + + // Add a sample of elapsed time as microseconds to the family's timeseries + h := new(histogram) + h.addMeasurement(elapsed.Nanoseconds() / 1e3) + f.LatencyMu.Lock() + f.Latency.Add(h) + f.LatencyMu.Unlock() + + tr.unref() // matches ref in New +} + +const ( + bucketsPerFamily = 9 + tracesPerBucket = 10 + maxActiveTraces = 20 // Maximum number of active traces to show. + maxEventsPerTrace = 10 + numHistogramBuckets = 38 +) + +var ( + // The active traces. + activeMu sync.RWMutex + activeTraces = make(map[string]*traceSet) // family -> traces + + // Families of completed traces. + completedMu sync.RWMutex + completedTraces = make(map[string]*family) // family -> traces +) + +type traceSet struct { + mu sync.RWMutex + m map[*trace]bool + + // We could avoid the entire map scan in FirstN by having a slice of all the traces + // ordered by start time, and an index into that from the trace struct, with a periodic + // repack of the slice after enough traces finish; we could also use a skip list or similar. + // However, that would shift some of the expense from /debug/requests time to RPC time, + // which is probably the wrong trade-off. +} + +func (ts *traceSet) Len() int { + ts.mu.RLock() + defer ts.mu.RUnlock() + return len(ts.m) +} + +func (ts *traceSet) Add(tr *trace) { + ts.mu.Lock() + if ts.m == nil { + ts.m = make(map[*trace]bool) + } + ts.m[tr] = true + ts.mu.Unlock() +} + +func (ts *traceSet) Remove(tr *trace) { + ts.mu.Lock() + delete(ts.m, tr) + ts.mu.Unlock() +} + +// FirstN returns the first n traces ordered by time. +func (ts *traceSet) FirstN(n int) traceList { + ts.mu.RLock() + defer ts.mu.RUnlock() + + if n > len(ts.m) { + n = len(ts.m) + } + trl := make(traceList, 0, n) + + // Fast path for when no selectivity is needed. + if n == len(ts.m) { + for tr := range ts.m { + tr.ref() + trl = append(trl, tr) + } + sort.Sort(trl) + return trl + } + + // Pick the oldest n traces. + // This is inefficient. See the comment in the traceSet struct. + for tr := range ts.m { + // Put the first n traces into trl in the order they occur. + // When we have n, sort trl, and thereafter maintain its order. + if len(trl) < n { + tr.ref() + trl = append(trl, tr) + if len(trl) == n { + // This is guaranteed to happen exactly once during this loop. + sort.Sort(trl) + } + continue + } + if tr.Start.After(trl[n-1].Start) { + continue + } + + // Find where to insert this one. + tr.ref() + i := sort.Search(n, func(i int) bool { return trl[i].Start.After(tr.Start) }) + trl[n-1].unref() + copy(trl[i+1:], trl[i:]) + trl[i] = tr + } + + return trl +} + +func getActiveTraces(fam string) traceList { + activeMu.RLock() + s := activeTraces[fam] + activeMu.RUnlock() + if s == nil { + return nil + } + return s.FirstN(maxActiveTraces) +} + +func getFamily(fam string, allocNew bool) *family { + completedMu.RLock() + f := completedTraces[fam] + completedMu.RUnlock() + if f == nil && allocNew { + f = allocFamily(fam) + } + return f +} + +func allocFamily(fam string) *family { + completedMu.Lock() + defer completedMu.Unlock() + f := completedTraces[fam] + if f == nil { + f = newFamily() + completedTraces[fam] = f + } + return f +} + +// family represents a set of trace buckets and associated latency information. +type family struct { + // traces may occur in multiple buckets. + Buckets [bucketsPerFamily]*traceBucket + + // latency time series + LatencyMu sync.RWMutex + Latency *timeseries.MinuteHourSeries +} + +func newFamily() *family { + return &family{ + Buckets: [bucketsPerFamily]*traceBucket{ + {Cond: minCond(0)}, + {Cond: minCond(50 * time.Millisecond)}, + {Cond: minCond(100 * time.Millisecond)}, + {Cond: minCond(200 * time.Millisecond)}, + {Cond: minCond(500 * time.Millisecond)}, + {Cond: minCond(1 * time.Second)}, + {Cond: minCond(10 * time.Second)}, + {Cond: minCond(100 * time.Second)}, + {Cond: errorCond{}}, + }, + Latency: timeseries.NewMinuteHourSeries(func() timeseries.Observable { return new(histogram) }), + } +} + +// traceBucket represents a size-capped bucket of historic traces, +// along with a condition for a trace to belong to the bucket. +type traceBucket struct { + Cond cond + + // Ring buffer implementation of a fixed-size FIFO queue. + mu sync.RWMutex + buf [tracesPerBucket]*trace + start int // < tracesPerBucket + length int // <= tracesPerBucket +} + +func (b *traceBucket) Add(tr *trace) { + b.mu.Lock() + defer b.mu.Unlock() + + i := b.start + b.length + if i >= tracesPerBucket { + i -= tracesPerBucket + } + if b.length == tracesPerBucket { + // "Remove" an element from the bucket. + b.buf[i].unref() + b.start++ + if b.start == tracesPerBucket { + b.start = 0 + } + } + b.buf[i] = tr + if b.length < tracesPerBucket { + b.length++ + } + tr.ref() +} + +// Copy returns a copy of the traces in the bucket. +// If tracedOnly is true, only the traces with trace information will be returned. +// The logs will be ref'd before returning; the caller should call +// the Free method when it is done with them. +// TODO(dsymonds): keep track of traced requests in separate buckets. +func (b *traceBucket) Copy(tracedOnly bool) traceList { + b.mu.RLock() + defer b.mu.RUnlock() + + trl := make(traceList, 0, b.length) + for i, x := 0, b.start; i < b.length; i++ { + tr := b.buf[x] + if !tracedOnly || tr.spanID != 0 { + tr.ref() + trl = append(trl, tr) + } + x++ + if x == b.length { + x = 0 + } + } + return trl +} + +func (b *traceBucket) Empty() bool { + b.mu.RLock() + defer b.mu.RUnlock() + return b.length == 0 +} + +// cond represents a condition on a trace. +type cond interface { + match(t *trace) bool + String() string +} + +type minCond time.Duration + +func (m minCond) match(t *trace) bool { return t.Elapsed >= time.Duration(m) } +func (m minCond) String() string { return fmt.Sprintf("≥%gs", time.Duration(m).Seconds()) } + +type errorCond struct{} + +func (e errorCond) match(t *trace) bool { return t.IsError } +func (e errorCond) String() string { return "errors" } + +type traceList []*trace + +// Free calls unref on each element of the list. +func (trl traceList) Free() { + for _, t := range trl { + t.unref() + } +} + +// traceList may be sorted in reverse chronological order. +func (trl traceList) Len() int { return len(trl) } +func (trl traceList) Less(i, j int) bool { return trl[i].Start.After(trl[j].Start) } +func (trl traceList) Swap(i, j int) { trl[i], trl[j] = trl[j], trl[i] } + +// An event is a timestamped log entry in a trace. +type event struct { + When time.Time + Elapsed time.Duration // since previous event in trace + NewDay bool // whether this event is on a different day to the previous event + Recyclable bool // whether this event was passed via LazyLog + Sensitive bool // whether this event contains sensitive information + What interface{} // string or fmt.Stringer +} + +// WhenString returns a string representation of the elapsed time of the event. +// It will include the date if midnight was crossed. +func (e event) WhenString() string { + if e.NewDay { + return e.When.Format("2006/01/02 15:04:05.000000") + } + return e.When.Format("15:04:05.000000") +} + +// discarded represents a number of discarded events. +// It is stored as *discarded to make it easier to update in-place. +type discarded int + +func (d *discarded) String() string { + return fmt.Sprintf("(%d events discarded)", int(*d)) +} + +// trace represents an active or complete request, +// either sent or received by this program. +type trace struct { + // Family is the top-level grouping of traces to which this belongs. + Family string + + // Title is the title of this trace. + Title string + + // Start time of the this trace. + Start time.Time + + mu sync.RWMutex + events []event // Append-only sequence of events (modulo discards). + maxEvents int + recycler func(interface{}) + IsError bool // Whether this trace resulted in an error. + Elapsed time.Duration // Elapsed time for this trace, zero while active. + traceID uint64 // Trace information if non-zero. + spanID uint64 + + refs int32 // how many buckets this is in + disc discarded // scratch space to avoid allocation + + finishStack []byte // where finish was called, if DebugUseAfterFinish is set + + eventsBuf [4]event // preallocated buffer in case we only log a few events +} + +func (tr *trace) reset() { + // Clear all but the mutex. Mutexes may not be copied, even when unlocked. + tr.Family = "" + tr.Title = "" + tr.Start = time.Time{} + + tr.mu.Lock() + tr.Elapsed = 0 + tr.traceID = 0 + tr.spanID = 0 + tr.IsError = false + tr.maxEvents = 0 + tr.events = nil + tr.recycler = nil + tr.mu.Unlock() + + tr.refs = 0 + tr.disc = 0 + tr.finishStack = nil + for i := range tr.eventsBuf { + tr.eventsBuf[i] = event{} + } +} + +// delta returns the elapsed time since the last event or the trace start, +// and whether it spans midnight. +// L >= tr.mu +func (tr *trace) delta(t time.Time) (time.Duration, bool) { + if len(tr.events) == 0 { + return t.Sub(tr.Start), false + } + prev := tr.events[len(tr.events)-1].When + return t.Sub(prev), prev.Day() != t.Day() +} + +func (tr *trace) addEvent(x interface{}, recyclable, sensitive bool) { + if DebugUseAfterFinish && tr.finishStack != nil { + buf := make([]byte, 4<<10) // 4 KB should be enough + n := runtime.Stack(buf, false) + log.Printf("net/trace: trace used after finish:\nFinished at:\n%s\nUsed at:\n%s", tr.finishStack, buf[:n]) + } + + /* + NOTE TO DEBUGGERS + + If you are here because your program panicked in this code, + it is almost definitely the fault of code using this package, + and very unlikely to be the fault of this code. + + The most likely scenario is that some code elsewhere is using + a trace.Trace after its Finish method is called. + You can temporarily set the DebugUseAfterFinish var + to help discover where that is; do not leave that var set, + since it makes this package much less efficient. + */ + + e := event{When: time.Now(), What: x, Recyclable: recyclable, Sensitive: sensitive} + tr.mu.Lock() + e.Elapsed, e.NewDay = tr.delta(e.When) + if len(tr.events) < tr.maxEvents { + tr.events = append(tr.events, e) + } else { + // Discard the middle events. + di := int((tr.maxEvents - 1) / 2) + if d, ok := tr.events[di].What.(*discarded); ok { + (*d)++ + } else { + // disc starts at two to count for the event it is replacing, + // plus the next one that we are about to drop. + tr.disc = 2 + if tr.recycler != nil && tr.events[di].Recyclable { + go tr.recycler(tr.events[di].What) + } + tr.events[di].What = &tr.disc + } + // The timestamp of the discarded meta-event should be + // the time of the last event it is representing. + tr.events[di].When = tr.events[di+1].When + + if tr.recycler != nil && tr.events[di+1].Recyclable { + go tr.recycler(tr.events[di+1].What) + } + copy(tr.events[di+1:], tr.events[di+2:]) + tr.events[tr.maxEvents-1] = e + } + tr.mu.Unlock() +} + +func (tr *trace) LazyLog(x fmt.Stringer, sensitive bool) { + tr.addEvent(x, true, sensitive) +} + +func (tr *trace) LazyPrintf(format string, a ...interface{}) { + tr.addEvent(&lazySprintf{format, a}, false, false) +} + +func (tr *trace) SetError() { + tr.mu.Lock() + tr.IsError = true + tr.mu.Unlock() +} + +func (tr *trace) SetRecycler(f func(interface{})) { + tr.mu.Lock() + tr.recycler = f + tr.mu.Unlock() +} + +func (tr *trace) SetTraceInfo(traceID, spanID uint64) { + tr.mu.Lock() + tr.traceID, tr.spanID = traceID, spanID + tr.mu.Unlock() +} + +func (tr *trace) SetMaxEvents(m int) { + tr.mu.Lock() + // Always keep at least three events: first, discarded count, last. + if len(tr.events) == 0 && m > 3 { + tr.maxEvents = m + } + tr.mu.Unlock() +} + +func (tr *trace) ref() { + atomic.AddInt32(&tr.refs, 1) +} + +func (tr *trace) unref() { + if atomic.AddInt32(&tr.refs, -1) == 0 { + tr.mu.RLock() + if tr.recycler != nil { + // freeTrace clears tr, so we hold tr.recycler and tr.events here. + go func(f func(interface{}), es []event) { + for _, e := range es { + if e.Recyclable { + f(e.What) + } + } + }(tr.recycler, tr.events) + } + tr.mu.RUnlock() + + freeTrace(tr) + } +} + +func (tr *trace) When() string { + return tr.Start.Format("2006/01/02 15:04:05.000000") +} + +func (tr *trace) ElapsedTime() string { + tr.mu.RLock() + t := tr.Elapsed + tr.mu.RUnlock() + + if t == 0 { + // Active trace. + t = time.Since(tr.Start) + } + return fmt.Sprintf("%.6f", t.Seconds()) +} + +func (tr *trace) Events() []event { + tr.mu.RLock() + defer tr.mu.RUnlock() + return tr.events +} + +var traceFreeList = make(chan *trace, 1000) // TODO(dsymonds): Use sync.Pool? + +// newTrace returns a trace ready to use. +func newTrace() *trace { + select { + case tr := <-traceFreeList: + return tr + default: + return new(trace) + } +} + +// freeTrace adds tr to traceFreeList if there's room. +// This is non-blocking. +func freeTrace(tr *trace) { + if DebugUseAfterFinish { + return // never reuse + } + tr.reset() + select { + case traceFreeList <- tr: + default: + } +} + +func elapsed(d time.Duration) string { + b := []byte(fmt.Sprintf("%.6f", d.Seconds())) + + // For subsecond durations, blank all zeros before decimal point, + // and all zeros between the decimal point and the first non-zero digit. + if d < time.Second { + dot := bytes.IndexByte(b, '.') + for i := 0; i < dot; i++ { + b[i] = ' ' + } + for i := dot + 1; i < len(b); i++ { + if b[i] == '0' { + b[i] = ' ' + } else { + break + } + } + } + + return string(b) +} + +var pageTmplCache *template.Template +var pageTmplOnce sync.Once + +func pageTmpl() *template.Template { + pageTmplOnce.Do(func() { + pageTmplCache = template.Must(template.New("Page").Funcs(template.FuncMap{ + "elapsed": elapsed, + "add": func(a, b int) int { return a + b }, + }).Parse(pageHTML)) + }) + return pageTmplCache +} + +const pageHTML = ` +{{template "Prolog" .}} +{{template "StatusTable" .}} +{{template "Epilog" .}} + +{{define "Prolog"}} + + + /debug/requests + + + + +

/debug/requests

+{{end}} {{/* end of Prolog */}} + +{{define "StatusTable"}} + + {{range $fam := .Families}} + + + + {{$n := index $.ActiveTraceCount $fam}} + + + {{$f := index $.CompletedTraces $fam}} + {{range $i, $b := $f.Buckets}} + {{$empty := $b.Empty}} + + {{end}} + + {{$nb := len $f.Buckets}} + + + + + + {{end}} +
{{$fam}} + {{if $n}}{{end}} + [{{$n}} active] + {{if $n}}{{end}} + + {{if not $empty}}{{end}} + [{{.Cond}}] + {{if not $empty}}{{end}} + + [minute] + + [hour] + + [total] +
+{{end}} {{/* end of StatusTable */}} + +{{define "Epilog"}} +{{if $.Traces}} +
+

Family: {{$.Family}}

+ +{{if or $.Expanded $.Traced}} + [Normal/Summary] +{{else}} + [Normal/Summary] +{{end}} + +{{if or (not $.Expanded) $.Traced}} + [Normal/Expanded] +{{else}} + [Normal/Expanded] +{{end}} + +{{if not $.Active}} + {{if or $.Expanded (not $.Traced)}} + [Traced/Summary] + {{else}} + [Traced/Summary] + {{end}} + {{if or (not $.Expanded) (not $.Traced)}} + [Traced/Expanded] + {{else}} + [Traced/Expanded] + {{end}} +{{end}} + +{{if $.Total}} +

Showing {{len $.Traces}} of {{$.Total}} traces.

+{{end}} + + + + + {{range $tr := $.Traces}} + + + + + {{/* TODO: include traceID/spanID */}} + + {{if $.Expanded}} + {{range $tr.Events}} + + + + + + {{end}} + {{end}} + {{end}} +
+ {{if $.Active}}Active{{else}}Completed{{end}} Requests +
WhenElapsed (s)
{{$tr.When}}{{$tr.ElapsedTime}}{{$tr.Title}}
{{.WhenString}}{{elapsed .Elapsed}}{{if or $.ShowSensitive (not .Sensitive)}}... {{.What}}{{else}}[redacted]{{end}}
+{{end}} {{/* if $.Traces */}} + +{{if $.Histogram}} +

Latency (µs) of {{$.Family}} over {{$.HistogramWindow}}

+{{$.Histogram}} +{{end}} {{/* if $.Histogram */}} + + + +{{end}} {{/* end of Epilog */}} +` diff --git a/vendor/golang.org/x/net/trace/trace_go16.go b/vendor/golang.org/x/net/trace/trace_go16.go new file mode 100644 index 000000000..d60819118 --- /dev/null +++ b/vendor/golang.org/x/net/trace/trace_go16.go @@ -0,0 +1,21 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.7 + +package trace + +import "golang.org/x/net/context" + +// NewContext returns a copy of the parent context +// and associates it with a Trace. +func NewContext(ctx context.Context, tr Trace) context.Context { + return context.WithValue(ctx, contextKey, tr) +} + +// FromContext returns the Trace bound to the context, if any. +func FromContext(ctx context.Context) (tr Trace, ok bool) { + tr, ok = ctx.Value(contextKey).(Trace) + return +} diff --git a/vendor/golang.org/x/net/trace/trace_go17.go b/vendor/golang.org/x/net/trace/trace_go17.go new file mode 100644 index 000000000..df6e1fba7 --- /dev/null +++ b/vendor/golang.org/x/net/trace/trace_go17.go @@ -0,0 +1,21 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7 + +package trace + +import "context" + +// NewContext returns a copy of the parent context +// and associates it with a Trace. +func NewContext(ctx context.Context, tr Trace) context.Context { + return context.WithValue(ctx, contextKey, tr) +} + +// FromContext returns the Trace bound to the context, if any. +func FromContext(ctx context.Context) (tr Trace, ok bool) { + tr, ok = ctx.Value(contextKey).(Trace) + return +} diff --git a/vendor/golang.org/x/oauth2/AUTHORS b/vendor/golang.org/x/oauth2/AUTHORS new file mode 100644 index 000000000..15167cd74 --- /dev/null +++ b/vendor/golang.org/x/oauth2/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/oauth2/CONTRIBUTORS b/vendor/golang.org/x/oauth2/CONTRIBUTORS new file mode 100644 index 000000000..1c4577e96 --- /dev/null +++ b/vendor/golang.org/x/oauth2/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/oauth2/LICENSE b/vendor/golang.org/x/oauth2/LICENSE new file mode 100644 index 000000000..6a66aea5e --- /dev/null +++ b/vendor/golang.org/x/oauth2/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/oauth2/google/appengine.go b/vendor/golang.org/x/oauth2/google/appengine.go new file mode 100644 index 000000000..50d918b87 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/appengine.go @@ -0,0 +1,89 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "sort" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + "golang.org/x/oauth2" +) + +// appengineFlex is set at init time by appengineflex_hook.go. If true, we are on App Engine Flex. +var appengineFlex bool + +// Set at init time by appengine_hook.go. If nil, we're not on App Engine. +var appengineTokenFunc func(c context.Context, scopes ...string) (token string, expiry time.Time, err error) + +// Set at init time by appengine_hook.go. If nil, we're not on App Engine. +var appengineAppIDFunc func(c context.Context) string + +// AppEngineTokenSource returns a token source that fetches tokens +// issued to the current App Engine application's service account. +// If you are implementing a 3-legged OAuth 2.0 flow on App Engine +// that involves user accounts, see oauth2.Config instead. +// +// The provided context must have come from appengine.NewContext. +func AppEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource { + if appengineTokenFunc == nil { + panic("google: AppEngineTokenSource can only be used on App Engine.") + } + scopes := append([]string{}, scope...) + sort.Strings(scopes) + return &appEngineTokenSource{ + ctx: ctx, + scopes: scopes, + key: strings.Join(scopes, " "), + } +} + +// aeTokens helps the fetched tokens to be reused until their expiration. +var ( + aeTokensMu sync.Mutex + aeTokens = make(map[string]*tokenLock) // key is space-separated scopes +) + +type tokenLock struct { + mu sync.Mutex // guards t; held while fetching or updating t + t *oauth2.Token +} + +type appEngineTokenSource struct { + ctx context.Context + scopes []string + key string // to aeTokens map; space-separated scopes +} + +func (ts *appEngineTokenSource) Token() (*oauth2.Token, error) { + if appengineTokenFunc == nil { + panic("google: AppEngineTokenSource can only be used on App Engine.") + } + + aeTokensMu.Lock() + tok, ok := aeTokens[ts.key] + if !ok { + tok = &tokenLock{} + aeTokens[ts.key] = tok + } + aeTokensMu.Unlock() + + tok.mu.Lock() + defer tok.mu.Unlock() + if tok.t.Valid() { + return tok.t, nil + } + access, exp, err := appengineTokenFunc(ts.ctx, ts.scopes...) + if err != nil { + return nil, err + } + tok.t = &oauth2.Token{ + AccessToken: access, + Expiry: exp, + } + return tok.t, nil +} diff --git a/vendor/golang.org/x/oauth2/google/appengine_hook.go b/vendor/golang.org/x/oauth2/google/appengine_hook.go new file mode 100644 index 000000000..56669eaa9 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/appengine_hook.go @@ -0,0 +1,14 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build appengine appenginevm + +package google + +import "google.golang.org/appengine" + +func init() { + appengineTokenFunc = appengine.AccessToken + appengineAppIDFunc = appengine.AppID +} diff --git a/vendor/golang.org/x/oauth2/google/appengineflex_hook.go b/vendor/golang.org/x/oauth2/google/appengineflex_hook.go new file mode 100644 index 000000000..5d0231af2 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/appengineflex_hook.go @@ -0,0 +1,11 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build appenginevm + +package google + +func init() { + appengineFlex = true // Flex doesn't support appengine.AccessToken; depend on metadata server. +} diff --git a/vendor/golang.org/x/oauth2/google/default.go b/vendor/golang.org/x/oauth2/google/default.go new file mode 100644 index 000000000..a31607437 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/default.go @@ -0,0 +1,115 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "os" + "path/filepath" + "runtime" + + "cloud.google.com/go/compute/metadata" + "golang.org/x/net/context" + "golang.org/x/oauth2" +) + +// DefaultClient returns an HTTP Client that uses the +// DefaultTokenSource to obtain authentication credentials. +func DefaultClient(ctx context.Context, scope ...string) (*http.Client, error) { + ts, err := DefaultTokenSource(ctx, scope...) + if err != nil { + return nil, err + } + return oauth2.NewClient(ctx, ts), nil +} + +// DefaultTokenSource returns the token source for +// "Application Default Credentials". +// It is a shortcut for FindDefaultCredentials(ctx, scope).TokenSource. +func DefaultTokenSource(ctx context.Context, scope ...string) (oauth2.TokenSource, error) { + creds, err := FindDefaultCredentials(ctx, scope...) + if err != nil { + return nil, err + } + return creds.TokenSource, nil +} + +// Common implementation for FindDefaultCredentials. +func findDefaultCredentials(ctx context.Context, scopes []string) (*DefaultCredentials, error) { + // First, try the environment variable. + const envVar = "GOOGLE_APPLICATION_CREDENTIALS" + if filename := os.Getenv(envVar); filename != "" { + creds, err := readCredentialsFile(ctx, filename, scopes) + if err != nil { + return nil, fmt.Errorf("google: error getting credentials using %v environment variable: %v", envVar, err) + } + return creds, nil + } + + // Second, try a well-known file. + filename := wellKnownFile() + if creds, err := readCredentialsFile(ctx, filename, scopes); err == nil { + return creds, nil + } else if !os.IsNotExist(err) { + return nil, fmt.Errorf("google: error getting credentials using well-known file (%v): %v", filename, err) + } + + // Third, if we're on Google App Engine use those credentials. + if appengineTokenFunc != nil && !appengineFlex { + return &DefaultCredentials{ + ProjectID: appengineAppIDFunc(ctx), + TokenSource: AppEngineTokenSource(ctx, scopes...), + }, nil + } + + // Fourth, if we're on Google Compute Engine use the metadata server. + if metadata.OnGCE() { + id, _ := metadata.ProjectID() + return &DefaultCredentials{ + ProjectID: id, + TokenSource: ComputeTokenSource(""), + }, nil + } + + // None are found; return helpful error. + const url = "https://developers.google.com/accounts/docs/application-default-credentials" + return nil, fmt.Errorf("google: could not find default credentials. See %v for more information.", url) +} + +// Common implementation for CredentialsFromJSON. +func credentialsFromJSON(ctx context.Context, jsonData []byte, scopes []string) (*DefaultCredentials, error) { + var f credentialsFile + if err := json.Unmarshal(jsonData, &f); err != nil { + return nil, err + } + ts, err := f.tokenSource(ctx, append([]string(nil), scopes...)) + if err != nil { + return nil, err + } + return &DefaultCredentials{ + ProjectID: f.ProjectID, + TokenSource: ts, + JSON: jsonData, + }, nil +} + +func wellKnownFile() string { + const f = "application_default_credentials.json" + if runtime.GOOS == "windows" { + return filepath.Join(os.Getenv("APPDATA"), "gcloud", f) + } + return filepath.Join(guessUnixHomeDir(), ".config", "gcloud", f) +} + +func readCredentialsFile(ctx context.Context, filename string, scopes []string) (*DefaultCredentials, error) { + b, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + return CredentialsFromJSON(ctx, b, scopes...) +} diff --git a/vendor/golang.org/x/oauth2/google/doc_go19.go b/vendor/golang.org/x/oauth2/google/doc_go19.go new file mode 100644 index 000000000..2a86325fe --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/doc_go19.go @@ -0,0 +1,42 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +// Package google provides support for making OAuth2 authorized and authenticated +// HTTP requests to Google APIs. It supports the Web server flow, client-side +// credentials, service accounts, Google Compute Engine service accounts, and Google +// App Engine service accounts. +// +// A brief overview of the package follows. For more information, please read +// https://developers.google.com/accounts/docs/OAuth2 +// and +// https://developers.google.com/accounts/docs/application-default-credentials. +// +// OAuth2 Configs +// +// Two functions in this package return golang.org/x/oauth2.Config values from Google credential +// data. Google supports two JSON formats for OAuth2 credentials: one is handled by ConfigFromJSON, +// the other by JWTConfigFromJSON. The returned Config can be used to obtain a TokenSource or +// create an http.Client. +// +// +// Credentials +// +// The Credentials type represents Google credentials, including Application Default +// Credentials. +// +// Use FindDefaultCredentials to obtain Application Default Credentials. +// FindDefaultCredentials looks in some well-known places for a credentials file, and +// will call AppEngineTokenSource or ComputeTokenSource as needed. +// +// DefaultClient and DefaultTokenSource are convenience methods. They first call FindDefaultCredentials, +// then use the credentials to construct an http.Client or an oauth2.TokenSource. +// +// Use CredentialsFromJSON to obtain credentials from either of the two JSON formats +// described in OAuth2 Configs, above. The TokenSource in the returned value is the +// same as the one obtained from the oauth2.Config returned from ConfigFromJSON or +// JWTConfigFromJSON, but the Credentials may contain additional information +// that is useful is some circumstances. +package google // import "golang.org/x/oauth2/google" diff --git a/vendor/golang.org/x/oauth2/google/doc_not_go19.go b/vendor/golang.org/x/oauth2/google/doc_not_go19.go new file mode 100644 index 000000000..5c3c6e148 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/doc_not_go19.go @@ -0,0 +1,43 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 + +// Package google provides support for making OAuth2 authorized and authenticated +// HTTP requests to Google APIs. It supports the Web server flow, client-side +// credentials, service accounts, Google Compute Engine service accounts, and Google +// App Engine service accounts. +// +// A brief overview of the package follows. For more information, please read +// https://developers.google.com/accounts/docs/OAuth2 +// and +// https://developers.google.com/accounts/docs/application-default-credentials. +// +// OAuth2 Configs +// +// Two functions in this package return golang.org/x/oauth2.Config values from Google credential +// data. Google supports two JSON formats for OAuth2 credentials: one is handled by ConfigFromJSON, +// the other by JWTConfigFromJSON. The returned Config can be used to obtain a TokenSource or +// create an http.Client. +// +// +// Credentials +// +// The DefaultCredentials type represents Google Application Default Credentials, as +// well as other forms of credential. +// +// Use FindDefaultCredentials to obtain Application Default Credentials. +// FindDefaultCredentials looks in some well-known places for a credentials file, and +// will call AppEngineTokenSource or ComputeTokenSource as needed. +// +// DefaultClient and DefaultTokenSource are convenience methods. They first call FindDefaultCredentials, +// then use the credentials to construct an http.Client or an oauth2.TokenSource. +// +// Use CredentialsFromJSON to obtain credentials from either of the two JSON +// formats described in OAuth2 Configs, above. (The DefaultCredentials returned may +// not be "Application Default Credentials".) The TokenSource in the returned value +// is the same as the one obtained from the oauth2.Config returned from +// ConfigFromJSON or JWTConfigFromJSON, but the DefaultCredentials may contain +// additional information that is useful is some circumstances. +package google // import "golang.org/x/oauth2/google" diff --git a/vendor/golang.org/x/oauth2/google/go19.go b/vendor/golang.org/x/oauth2/google/go19.go new file mode 100644 index 000000000..4d0318b1e --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/go19.go @@ -0,0 +1,57 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package google + +import ( + "golang.org/x/net/context" + "golang.org/x/oauth2" +) + +// Credentials holds Google credentials, including "Application Default Credentials". +// For more details, see: +// https://developers.google.com/accounts/docs/application-default-credentials +type Credentials struct { + ProjectID string // may be empty + TokenSource oauth2.TokenSource + + // JSON contains the raw bytes from a JSON credentials file. + // This field may be nil if authentication is provided by the + // environment and not with a credentials file, e.g. when code is + // running on Google Cloud Platform. + JSON []byte +} + +// DefaultCredentials is the old name of Credentials. +// +// Deprecated: use Credentials instead. +type DefaultCredentials = Credentials + +// FindDefaultCredentials searches for "Application Default Credentials". +// +// It looks for credentials in the following places, +// preferring the first location found: +// +// 1. A JSON file whose path is specified by the +// GOOGLE_APPLICATION_CREDENTIALS environment variable. +// 2. A JSON file in a location known to the gcloud command-line tool. +// On Windows, this is %APPDATA%/gcloud/application_default_credentials.json. +// On other systems, $HOME/.config/gcloud/application_default_credentials.json. +// 3. On Google App Engine it uses the appengine.AccessToken function. +// 4. On Google Compute Engine and Google App Engine Managed VMs, it fetches +// credentials from the metadata server. +// (In this final case any provided scopes are ignored.) +func FindDefaultCredentials(ctx context.Context, scopes ...string) (*Credentials, error) { + return findDefaultCredentials(ctx, scopes) +} + +// CredentialsFromJSON obtains Google credentials from a JSON value. The JSON can +// represent either a Google Developers Console client_credentials.json file (as in +// ConfigFromJSON) or a Google Developers service account key file (as in +// JWTConfigFromJSON). +func CredentialsFromJSON(ctx context.Context, jsonData []byte, scopes ...string) (*Credentials, error) { + return credentialsFromJSON(ctx, jsonData, scopes) +} diff --git a/vendor/golang.org/x/oauth2/google/google.go b/vendor/golang.org/x/oauth2/google/google.go new file mode 100644 index 000000000..f7481fbcc --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/google.go @@ -0,0 +1,192 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "encoding/json" + "errors" + "fmt" + "strings" + "time" + + "cloud.google.com/go/compute/metadata" + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/jwt" +) + +// Endpoint is Google's OAuth 2.0 endpoint. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://accounts.google.com/o/oauth2/auth", + TokenURL: "https://accounts.google.com/o/oauth2/token", +} + +// JWTTokenURL is Google's OAuth 2.0 token URL to use with the JWT flow. +const JWTTokenURL = "https://accounts.google.com/o/oauth2/token" + +// ConfigFromJSON uses a Google Developers Console client_credentials.json +// file to construct a config. +// client_credentials.json can be downloaded from +// https://console.developers.google.com, under "Credentials". Download the Web +// application credentials in the JSON format and provide the contents of the +// file as jsonKey. +func ConfigFromJSON(jsonKey []byte, scope ...string) (*oauth2.Config, error) { + type cred struct { + ClientID string `json:"client_id"` + ClientSecret string `json:"client_secret"` + RedirectURIs []string `json:"redirect_uris"` + AuthURI string `json:"auth_uri"` + TokenURI string `json:"token_uri"` + } + var j struct { + Web *cred `json:"web"` + Installed *cred `json:"installed"` + } + if err := json.Unmarshal(jsonKey, &j); err != nil { + return nil, err + } + var c *cred + switch { + case j.Web != nil: + c = j.Web + case j.Installed != nil: + c = j.Installed + default: + return nil, fmt.Errorf("oauth2/google: no credentials found") + } + if len(c.RedirectURIs) < 1 { + return nil, errors.New("oauth2/google: missing redirect URL in the client_credentials.json") + } + return &oauth2.Config{ + ClientID: c.ClientID, + ClientSecret: c.ClientSecret, + RedirectURL: c.RedirectURIs[0], + Scopes: scope, + Endpoint: oauth2.Endpoint{ + AuthURL: c.AuthURI, + TokenURL: c.TokenURI, + }, + }, nil +} + +// JWTConfigFromJSON uses a Google Developers service account JSON key file to read +// the credentials that authorize and authenticate the requests. +// Create a service account on "Credentials" for your project at +// https://console.developers.google.com to download a JSON key file. +func JWTConfigFromJSON(jsonKey []byte, scope ...string) (*jwt.Config, error) { + var f credentialsFile + if err := json.Unmarshal(jsonKey, &f); err != nil { + return nil, err + } + if f.Type != serviceAccountKey { + return nil, fmt.Errorf("google: read JWT from JSON credentials: 'type' field is %q (expected %q)", f.Type, serviceAccountKey) + } + scope = append([]string(nil), scope...) // copy + return f.jwtConfig(scope), nil +} + +// JSON key file types. +const ( + serviceAccountKey = "service_account" + userCredentialsKey = "authorized_user" +) + +// credentialsFile is the unmarshalled representation of a credentials file. +type credentialsFile struct { + Type string `json:"type"` // serviceAccountKey or userCredentialsKey + + // Service Account fields + ClientEmail string `json:"client_email"` + PrivateKeyID string `json:"private_key_id"` + PrivateKey string `json:"private_key"` + TokenURL string `json:"token_uri"` + ProjectID string `json:"project_id"` + + // User Credential fields + // (These typically come from gcloud auth.) + ClientSecret string `json:"client_secret"` + ClientID string `json:"client_id"` + RefreshToken string `json:"refresh_token"` +} + +func (f *credentialsFile) jwtConfig(scopes []string) *jwt.Config { + cfg := &jwt.Config{ + Email: f.ClientEmail, + PrivateKey: []byte(f.PrivateKey), + PrivateKeyID: f.PrivateKeyID, + Scopes: scopes, + TokenURL: f.TokenURL, + } + if cfg.TokenURL == "" { + cfg.TokenURL = JWTTokenURL + } + return cfg +} + +func (f *credentialsFile) tokenSource(ctx context.Context, scopes []string) (oauth2.TokenSource, error) { + switch f.Type { + case serviceAccountKey: + cfg := f.jwtConfig(scopes) + return cfg.TokenSource(ctx), nil + case userCredentialsKey: + cfg := &oauth2.Config{ + ClientID: f.ClientID, + ClientSecret: f.ClientSecret, + Scopes: scopes, + Endpoint: Endpoint, + } + tok := &oauth2.Token{RefreshToken: f.RefreshToken} + return cfg.TokenSource(ctx, tok), nil + case "": + return nil, errors.New("missing 'type' field in credentials") + default: + return nil, fmt.Errorf("unknown credential type: %q", f.Type) + } +} + +// ComputeTokenSource returns a token source that fetches access tokens +// from Google Compute Engine (GCE)'s metadata server. It's only valid to use +// this token source if your program is running on a GCE instance. +// If no account is specified, "default" is used. +// Further information about retrieving access tokens from the GCE metadata +// server can be found at https://cloud.google.com/compute/docs/authentication. +func ComputeTokenSource(account string) oauth2.TokenSource { + return oauth2.ReuseTokenSource(nil, computeSource{account: account}) +} + +type computeSource struct { + account string +} + +func (cs computeSource) Token() (*oauth2.Token, error) { + if !metadata.OnGCE() { + return nil, errors.New("oauth2/google: can't get a token from the metadata service; not running on GCE") + } + acct := cs.account + if acct == "" { + acct = "default" + } + tokenJSON, err := metadata.Get("instance/service-accounts/" + acct + "/token") + if err != nil { + return nil, err + } + var res struct { + AccessToken string `json:"access_token"` + ExpiresInSec int `json:"expires_in"` + TokenType string `json:"token_type"` + } + err = json.NewDecoder(strings.NewReader(tokenJSON)).Decode(&res) + if err != nil { + return nil, fmt.Errorf("oauth2/google: invalid token JSON from metadata: %v", err) + } + if res.ExpiresInSec == 0 || res.AccessToken == "" { + return nil, fmt.Errorf("oauth2/google: incomplete token received from metadata") + } + return &oauth2.Token{ + AccessToken: res.AccessToken, + TokenType: res.TokenType, + Expiry: time.Now().Add(time.Duration(res.ExpiresInSec) * time.Second), + }, nil +} diff --git a/vendor/golang.org/x/oauth2/google/jwt.go b/vendor/golang.org/x/oauth2/google/jwt.go new file mode 100644 index 000000000..b0fdb3a88 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/jwt.go @@ -0,0 +1,74 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "crypto/rsa" + "fmt" + "time" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/internal" + "golang.org/x/oauth2/jws" +) + +// JWTAccessTokenSourceFromJSON uses a Google Developers service account JSON +// key file to read the credentials that authorize and authenticate the +// requests, and returns a TokenSource that does not use any OAuth2 flow but +// instead creates a JWT and sends that as the access token. +// The audience is typically a URL that specifies the scope of the credentials. +// +// Note that this is not a standard OAuth flow, but rather an +// optimization supported by a few Google services. +// Unless you know otherwise, you should use JWTConfigFromJSON instead. +func JWTAccessTokenSourceFromJSON(jsonKey []byte, audience string) (oauth2.TokenSource, error) { + cfg, err := JWTConfigFromJSON(jsonKey) + if err != nil { + return nil, fmt.Errorf("google: could not parse JSON key: %v", err) + } + pk, err := internal.ParseKey(cfg.PrivateKey) + if err != nil { + return nil, fmt.Errorf("google: could not parse key: %v", err) + } + ts := &jwtAccessTokenSource{ + email: cfg.Email, + audience: audience, + pk: pk, + pkID: cfg.PrivateKeyID, + } + tok, err := ts.Token() + if err != nil { + return nil, err + } + return oauth2.ReuseTokenSource(tok, ts), nil +} + +type jwtAccessTokenSource struct { + email, audience string + pk *rsa.PrivateKey + pkID string +} + +func (ts *jwtAccessTokenSource) Token() (*oauth2.Token, error) { + iat := time.Now() + exp := iat.Add(time.Hour) + cs := &jws.ClaimSet{ + Iss: ts.email, + Sub: ts.email, + Aud: ts.audience, + Iat: iat.Unix(), + Exp: exp.Unix(), + } + hdr := &jws.Header{ + Algorithm: "RS256", + Typ: "JWT", + KeyID: string(ts.pkID), + } + msg, err := jws.Encode(hdr, cs, ts.pk) + if err != nil { + return nil, fmt.Errorf("google: could not encode JWT: %v", err) + } + return &oauth2.Token{AccessToken: msg, TokenType: "Bearer", Expiry: exp}, nil +} diff --git a/vendor/golang.org/x/oauth2/google/not_go19.go b/vendor/golang.org/x/oauth2/google/not_go19.go new file mode 100644 index 000000000..544e40624 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/not_go19.go @@ -0,0 +1,54 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 + +package google + +import ( + "golang.org/x/net/context" + "golang.org/x/oauth2" +) + +// DefaultCredentials holds Google credentials, including "Application Default Credentials". +// For more details, see: +// https://developers.google.com/accounts/docs/application-default-credentials +type DefaultCredentials struct { + ProjectID string // may be empty + TokenSource oauth2.TokenSource + + // JSON contains the raw bytes from a JSON credentials file. + // This field may be nil if authentication is provided by the + // environment and not with a credentials file, e.g. when code is + // running on Google Cloud Platform. + JSON []byte +} + +// FindDefaultCredentials searches for "Application Default Credentials". +// +// It looks for credentials in the following places, +// preferring the first location found: +// +// 1. A JSON file whose path is specified by the +// GOOGLE_APPLICATION_CREDENTIALS environment variable. +// 2. A JSON file in a location known to the gcloud command-line tool. +// On Windows, this is %APPDATA%/gcloud/application_default_credentials.json. +// On other systems, $HOME/.config/gcloud/application_default_credentials.json. +// 3. On Google App Engine it uses the appengine.AccessToken function. +// 4. On Google Compute Engine and Google App Engine Managed VMs, it fetches +// credentials from the metadata server. +// (In this final case any provided scopes are ignored.) +func FindDefaultCredentials(ctx context.Context, scopes ...string) (*DefaultCredentials, error) { + return findDefaultCredentials(ctx, scopes) +} + +// CredentialsFromJSON obtains Google credentials from a JSON value. The JSON can +// represent either a Google Developers Console client_credentials.json file (as in +// ConfigFromJSON) or a Google Developers service account key file (as in +// JWTConfigFromJSON). +// +// Note: despite the name, the returned credentials may not be Application Default Credentials. +func CredentialsFromJSON(ctx context.Context, jsonData []byte, scopes ...string) (*DefaultCredentials, error) { + return credentialsFromJSON(ctx, jsonData, scopes) +} diff --git a/vendor/golang.org/x/oauth2/google/sdk.go b/vendor/golang.org/x/oauth2/google/sdk.go new file mode 100644 index 000000000..b9660cadd --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/sdk.go @@ -0,0 +1,201 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "bufio" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "os" + "os/user" + "path/filepath" + "runtime" + "strings" + "time" + + "golang.org/x/net/context" + "golang.org/x/oauth2" +) + +type sdkCredentials struct { + Data []struct { + Credential struct { + ClientID string `json:"client_id"` + ClientSecret string `json:"client_secret"` + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + TokenExpiry *time.Time `json:"token_expiry"` + } `json:"credential"` + Key struct { + Account string `json:"account"` + Scope string `json:"scope"` + } `json:"key"` + } +} + +// An SDKConfig provides access to tokens from an account already +// authorized via the Google Cloud SDK. +type SDKConfig struct { + conf oauth2.Config + initialToken *oauth2.Token +} + +// NewSDKConfig creates an SDKConfig for the given Google Cloud SDK +// account. If account is empty, the account currently active in +// Google Cloud SDK properties is used. +// Google Cloud SDK credentials must be created by running `gcloud auth` +// before using this function. +// The Google Cloud SDK is available at https://cloud.google.com/sdk/. +func NewSDKConfig(account string) (*SDKConfig, error) { + configPath, err := sdkConfigPath() + if err != nil { + return nil, fmt.Errorf("oauth2/google: error getting SDK config path: %v", err) + } + credentialsPath := filepath.Join(configPath, "credentials") + f, err := os.Open(credentialsPath) + if err != nil { + return nil, fmt.Errorf("oauth2/google: failed to load SDK credentials: %v", err) + } + defer f.Close() + + var c sdkCredentials + if err := json.NewDecoder(f).Decode(&c); err != nil { + return nil, fmt.Errorf("oauth2/google: failed to decode SDK credentials from %q: %v", credentialsPath, err) + } + if len(c.Data) == 0 { + return nil, fmt.Errorf("oauth2/google: no credentials found in %q, run `gcloud auth login` to create one", credentialsPath) + } + if account == "" { + propertiesPath := filepath.Join(configPath, "properties") + f, err := os.Open(propertiesPath) + if err != nil { + return nil, fmt.Errorf("oauth2/google: failed to load SDK properties: %v", err) + } + defer f.Close() + ini, err := parseINI(f) + if err != nil { + return nil, fmt.Errorf("oauth2/google: failed to parse SDK properties %q: %v", propertiesPath, err) + } + core, ok := ini["core"] + if !ok { + return nil, fmt.Errorf("oauth2/google: failed to find [core] section in %v", ini) + } + active, ok := core["account"] + if !ok { + return nil, fmt.Errorf("oauth2/google: failed to find %q attribute in %v", "account", core) + } + account = active + } + + for _, d := range c.Data { + if account == "" || d.Key.Account == account { + if d.Credential.AccessToken == "" && d.Credential.RefreshToken == "" { + return nil, fmt.Errorf("oauth2/google: no token available for account %q", account) + } + var expiry time.Time + if d.Credential.TokenExpiry != nil { + expiry = *d.Credential.TokenExpiry + } + return &SDKConfig{ + conf: oauth2.Config{ + ClientID: d.Credential.ClientID, + ClientSecret: d.Credential.ClientSecret, + Scopes: strings.Split(d.Key.Scope, " "), + Endpoint: Endpoint, + RedirectURL: "oob", + }, + initialToken: &oauth2.Token{ + AccessToken: d.Credential.AccessToken, + RefreshToken: d.Credential.RefreshToken, + Expiry: expiry, + }, + }, nil + } + } + return nil, fmt.Errorf("oauth2/google: no such credentials for account %q", account) +} + +// Client returns an HTTP client using Google Cloud SDK credentials to +// authorize requests. The token will auto-refresh as necessary. The +// underlying http.RoundTripper will be obtained using the provided +// context. The returned client and its Transport should not be +// modified. +func (c *SDKConfig) Client(ctx context.Context) *http.Client { + return &http.Client{ + Transport: &oauth2.Transport{ + Source: c.TokenSource(ctx), + }, + } +} + +// TokenSource returns an oauth2.TokenSource that retrieve tokens from +// Google Cloud SDK credentials using the provided context. +// It will returns the current access token stored in the credentials, +// and refresh it when it expires, but it won't update the credentials +// with the new access token. +func (c *SDKConfig) TokenSource(ctx context.Context) oauth2.TokenSource { + return c.conf.TokenSource(ctx, c.initialToken) +} + +// Scopes are the OAuth 2.0 scopes the current account is authorized for. +func (c *SDKConfig) Scopes() []string { + return c.conf.Scopes +} + +func parseINI(ini io.Reader) (map[string]map[string]string, error) { + result := map[string]map[string]string{ + "": {}, // root section + } + scanner := bufio.NewScanner(ini) + currentSection := "" + for scanner.Scan() { + line := strings.TrimSpace(scanner.Text()) + if strings.HasPrefix(line, ";") { + // comment. + continue + } + if strings.HasPrefix(line, "[") && strings.HasSuffix(line, "]") { + currentSection = strings.TrimSpace(line[1 : len(line)-1]) + result[currentSection] = map[string]string{} + continue + } + parts := strings.SplitN(line, "=", 2) + if len(parts) == 2 && parts[0] != "" { + result[currentSection][strings.TrimSpace(parts[0])] = strings.TrimSpace(parts[1]) + } + } + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("error scanning ini: %v", err) + } + return result, nil +} + +// sdkConfigPath tries to guess where the gcloud config is located. +// It can be overridden during tests. +var sdkConfigPath = func() (string, error) { + if runtime.GOOS == "windows" { + return filepath.Join(os.Getenv("APPDATA"), "gcloud"), nil + } + homeDir := guessUnixHomeDir() + if homeDir == "" { + return "", errors.New("unable to get current user home directory: os/user lookup failed; $HOME is empty") + } + return filepath.Join(homeDir, ".config", "gcloud"), nil +} + +func guessUnixHomeDir() string { + // Prefer $HOME over user.Current due to glibc bug: golang.org/issue/13470 + if v := os.Getenv("HOME"); v != "" { + return v + } + // Else, fall back to user.Current: + if u, err := user.Current(); err == nil { + return u.HomeDir + } + return "" +} diff --git a/vendor/golang.org/x/oauth2/internal/client_appengine.go b/vendor/golang.org/x/oauth2/internal/client_appengine.go new file mode 100644 index 000000000..743487188 --- /dev/null +++ b/vendor/golang.org/x/oauth2/internal/client_appengine.go @@ -0,0 +1,13 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build appengine + +package internal + +import "google.golang.org/appengine/urlfetch" + +func init() { + appengineClientHook = urlfetch.Client +} diff --git a/vendor/golang.org/x/oauth2/internal/doc.go b/vendor/golang.org/x/oauth2/internal/doc.go new file mode 100644 index 000000000..03265e888 --- /dev/null +++ b/vendor/golang.org/x/oauth2/internal/doc.go @@ -0,0 +1,6 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package internal contains support packages for oauth2 package. +package internal diff --git a/vendor/golang.org/x/oauth2/internal/oauth2.go b/vendor/golang.org/x/oauth2/internal/oauth2.go new file mode 100644 index 000000000..fc63fcab3 --- /dev/null +++ b/vendor/golang.org/x/oauth2/internal/oauth2.go @@ -0,0 +1,37 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal + +import ( + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "errors" + "fmt" +) + +// ParseKey converts the binary contents of a private key file +// to an *rsa.PrivateKey. It detects whether the private key is in a +// PEM container or not. If so, it extracts the the private key +// from PEM container before conversion. It only supports PEM +// containers with no passphrase. +func ParseKey(key []byte) (*rsa.PrivateKey, error) { + block, _ := pem.Decode(key) + if block != nil { + key = block.Bytes + } + parsedKey, err := x509.ParsePKCS8PrivateKey(key) + if err != nil { + parsedKey, err = x509.ParsePKCS1PrivateKey(key) + if err != nil { + return nil, fmt.Errorf("private key should be a PEM or plain PKSC1 or PKCS8; parse error: %v", err) + } + } + parsed, ok := parsedKey.(*rsa.PrivateKey) + if !ok { + return nil, errors.New("private key is invalid") + } + return parsed, nil +} diff --git a/vendor/golang.org/x/oauth2/internal/token.go b/vendor/golang.org/x/oauth2/internal/token.go new file mode 100644 index 000000000..711bc9366 --- /dev/null +++ b/vendor/golang.org/x/oauth2/internal/token.go @@ -0,0 +1,273 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "mime" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "golang.org/x/net/context" + "golang.org/x/net/context/ctxhttp" +) + +// Token represents the credentials used to authorize +// the requests to access protected resources on the OAuth 2.0 +// provider's backend. +// +// This type is a mirror of oauth2.Token and exists to break +// an otherwise-circular dependency. Other internal packages +// should convert this Token into an oauth2.Token before use. +type Token struct { + // AccessToken is the token that authorizes and authenticates + // the requests. + AccessToken string + + // TokenType is the type of token. + // The Type method returns either this or "Bearer", the default. + TokenType string + + // RefreshToken is a token that's used by the application + // (as opposed to the user) to refresh the access token + // if it expires. + RefreshToken string + + // Expiry is the optional expiration time of the access token. + // + // If zero, TokenSource implementations will reuse the same + // token forever and RefreshToken or equivalent + // mechanisms for that TokenSource will not be used. + Expiry time.Time + + // Raw optionally contains extra metadata from the server + // when updating a token. + Raw interface{} +} + +// tokenJSON is the struct representing the HTTP response from OAuth2 +// providers returning a token in JSON form. +type tokenJSON struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + RefreshToken string `json:"refresh_token"` + ExpiresIn expirationTime `json:"expires_in"` // at least PayPal returns string, while most return number + Expires expirationTime `json:"expires"` // broken Facebook spelling of expires_in +} + +func (e *tokenJSON) expiry() (t time.Time) { + if v := e.ExpiresIn; v != 0 { + return time.Now().Add(time.Duration(v) * time.Second) + } + if v := e.Expires; v != 0 { + return time.Now().Add(time.Duration(v) * time.Second) + } + return +} + +type expirationTime int32 + +func (e *expirationTime) UnmarshalJSON(b []byte) error { + var n json.Number + err := json.Unmarshal(b, &n) + if err != nil { + return err + } + i, err := n.Int64() + if err != nil { + return err + } + *e = expirationTime(i) + return nil +} + +var brokenAuthHeaderProviders = []string{ + "https://accounts.google.com/", + "https://api.codeswholesale.com/oauth/token", + "https://api.dropbox.com/", + "https://api.dropboxapi.com/", + "https://api.instagram.com/", + "https://api.netatmo.net/", + "https://api.odnoklassniki.ru/", + "https://api.pushbullet.com/", + "https://api.soundcloud.com/", + "https://api.twitch.tv/", + "https://id.twitch.tv/", + "https://app.box.com/", + "https://api.box.com/", + "https://connect.stripe.com/", + "https://login.mailchimp.com/", + "https://login.microsoftonline.com/", + "https://login.salesforce.com/", + "https://login.windows.net", + "https://login.live.com/", + "https://login.live-int.com/", + "https://oauth.sandbox.trainingpeaks.com/", + "https://oauth.trainingpeaks.com/", + "https://oauth.vk.com/", + "https://openapi.baidu.com/", + "https://slack.com/", + "https://test-sandbox.auth.corp.google.com", + "https://test.salesforce.com/", + "https://user.gini.net/", + "https://www.douban.com/", + "https://www.googleapis.com/", + "https://www.linkedin.com/", + "https://www.strava.com/oauth/", + "https://www.wunderlist.com/oauth/", + "https://api.patreon.com/", + "https://sandbox.codeswholesale.com/oauth/token", + "https://api.sipgate.com/v1/authorization/oauth", + "https://api.medium.com/v1/tokens", + "https://log.finalsurge.com/oauth/token", + "https://multisport.todaysplan.com.au/rest/oauth/access_token", + "https://whats.todaysplan.com.au/rest/oauth/access_token", + "https://stackoverflow.com/oauth/access_token", + "https://account.health.nokia.com", +} + +// brokenAuthHeaderDomains lists broken providers that issue dynamic endpoints. +var brokenAuthHeaderDomains = []string{ + ".auth0.com", + ".force.com", + ".myshopify.com", + ".okta.com", + ".oktapreview.com", +} + +func RegisterBrokenAuthHeaderProvider(tokenURL string) { + brokenAuthHeaderProviders = append(brokenAuthHeaderProviders, tokenURL) +} + +// providerAuthHeaderWorks reports whether the OAuth2 server identified by the tokenURL +// implements the OAuth2 spec correctly +// See https://code.google.com/p/goauth2/issues/detail?id=31 for background. +// In summary: +// - Reddit only accepts client secret in the Authorization header +// - Dropbox accepts either it in URL param or Auth header, but not both. +// - Google only accepts URL param (not spec compliant?), not Auth header +// - Stripe only accepts client secret in Auth header with Bearer method, not Basic +func providerAuthHeaderWorks(tokenURL string) bool { + for _, s := range brokenAuthHeaderProviders { + if strings.HasPrefix(tokenURL, s) { + // Some sites fail to implement the OAuth2 spec fully. + return false + } + } + + if u, err := url.Parse(tokenURL); err == nil { + for _, s := range brokenAuthHeaderDomains { + if strings.HasSuffix(u.Host, s) { + return false + } + } + } + + // Assume the provider implements the spec properly + // otherwise. We can add more exceptions as they're + // discovered. We will _not_ be adding configurable hooks + // to this package to let users select server bugs. + return true +} + +func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, v url.Values) (*Token, error) { + bustedAuth := !providerAuthHeaderWorks(tokenURL) + if bustedAuth { + if clientID != "" { + v.Set("client_id", clientID) + } + if clientSecret != "" { + v.Set("client_secret", clientSecret) + } + } + req, err := http.NewRequest("POST", tokenURL, strings.NewReader(v.Encode())) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + if !bustedAuth { + req.SetBasicAuth(url.QueryEscape(clientID), url.QueryEscape(clientSecret)) + } + r, err := ctxhttp.Do(ctx, ContextClient(ctx), req) + if err != nil { + return nil, err + } + defer r.Body.Close() + body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<20)) + if err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + if code := r.StatusCode; code < 200 || code > 299 { + return nil, &RetrieveError{ + Response: r, + Body: body, + } + } + + var token *Token + content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type")) + switch content { + case "application/x-www-form-urlencoded", "text/plain": + vals, err := url.ParseQuery(string(body)) + if err != nil { + return nil, err + } + token = &Token{ + AccessToken: vals.Get("access_token"), + TokenType: vals.Get("token_type"), + RefreshToken: vals.Get("refresh_token"), + Raw: vals, + } + e := vals.Get("expires_in") + if e == "" { + // TODO(jbd): Facebook's OAuth2 implementation is broken and + // returns expires_in field in expires. Remove the fallback to expires, + // when Facebook fixes their implementation. + e = vals.Get("expires") + } + expires, _ := strconv.Atoi(e) + if expires != 0 { + token.Expiry = time.Now().Add(time.Duration(expires) * time.Second) + } + default: + var tj tokenJSON + if err = json.Unmarshal(body, &tj); err != nil { + return nil, err + } + token = &Token{ + AccessToken: tj.AccessToken, + TokenType: tj.TokenType, + RefreshToken: tj.RefreshToken, + Expiry: tj.expiry(), + Raw: make(map[string]interface{}), + } + json.Unmarshal(body, &token.Raw) // no error checks for optional fields + } + // Don't overwrite `RefreshToken` with an empty value + // if this was a token refreshing request. + if token.RefreshToken == "" { + token.RefreshToken = v.Get("refresh_token") + } + if token.AccessToken == "" { + return token, errors.New("oauth2: server response missing access_token") + } + return token, nil +} + +type RetrieveError struct { + Response *http.Response + Body []byte +} + +func (r *RetrieveError) Error() string { + return fmt.Sprintf("oauth2: cannot fetch token: %v\nResponse: %s", r.Response.Status, r.Body) +} diff --git a/vendor/golang.org/x/oauth2/internal/transport.go b/vendor/golang.org/x/oauth2/internal/transport.go new file mode 100644 index 000000000..d16f9ae1f --- /dev/null +++ b/vendor/golang.org/x/oauth2/internal/transport.go @@ -0,0 +1,34 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal + +import ( + "net/http" + + "golang.org/x/net/context" +) + +// HTTPClient is the context key to use with golang.org/x/net/context's +// WithValue function to associate an *http.Client value with a context. +var HTTPClient ContextKey + +// ContextKey is just an empty struct. It exists so HTTPClient can be +// an immutable public variable with a unique type. It's immutable +// because nobody else can create a ContextKey, being unexported. +type ContextKey struct{} + +var appengineClientHook func(context.Context) *http.Client + +func ContextClient(ctx context.Context) *http.Client { + if ctx != nil { + if hc, ok := ctx.Value(HTTPClient).(*http.Client); ok { + return hc + } + } + if appengineClientHook != nil { + return appengineClientHook(ctx) + } + return http.DefaultClient +} diff --git a/vendor/golang.org/x/oauth2/jws/jws.go b/vendor/golang.org/x/oauth2/jws/jws.go new file mode 100644 index 000000000..683d2d271 --- /dev/null +++ b/vendor/golang.org/x/oauth2/jws/jws.go @@ -0,0 +1,182 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package jws provides a partial implementation +// of JSON Web Signature encoding and decoding. +// It exists to support the golang.org/x/oauth2 package. +// +// See RFC 7515. +// +// Deprecated: this package is not intended for public use and might be +// removed in the future. It exists for internal use only. +// Please switch to another JWS package or copy this package into your own +// source tree. +package jws // import "golang.org/x/oauth2/jws" + +import ( + "bytes" + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "strings" + "time" +) + +// ClaimSet contains information about the JWT signature including the +// permissions being requested (scopes), the target of the token, the issuer, +// the time the token was issued, and the lifetime of the token. +type ClaimSet struct { + Iss string `json:"iss"` // email address of the client_id of the application making the access token request + Scope string `json:"scope,omitempty"` // space-delimited list of the permissions the application requests + Aud string `json:"aud"` // descriptor of the intended target of the assertion (Optional). + Exp int64 `json:"exp"` // the expiration time of the assertion (seconds since Unix epoch) + Iat int64 `json:"iat"` // the time the assertion was issued (seconds since Unix epoch) + Typ string `json:"typ,omitempty"` // token type (Optional). + + // Email for which the application is requesting delegated access (Optional). + Sub string `json:"sub,omitempty"` + + // The old name of Sub. Client keeps setting Prn to be + // complaint with legacy OAuth 2.0 providers. (Optional) + Prn string `json:"prn,omitempty"` + + // See http://tools.ietf.org/html/draft-jones-json-web-token-10#section-4.3 + // This array is marshalled using custom code (see (c *ClaimSet) encode()). + PrivateClaims map[string]interface{} `json:"-"` +} + +func (c *ClaimSet) encode() (string, error) { + // Reverting time back for machines whose time is not perfectly in sync. + // If client machine's time is in the future according + // to Google servers, an access token will not be issued. + now := time.Now().Add(-10 * time.Second) + if c.Iat == 0 { + c.Iat = now.Unix() + } + if c.Exp == 0 { + c.Exp = now.Add(time.Hour).Unix() + } + if c.Exp < c.Iat { + return "", fmt.Errorf("jws: invalid Exp = %v; must be later than Iat = %v", c.Exp, c.Iat) + } + + b, err := json.Marshal(c) + if err != nil { + return "", err + } + + if len(c.PrivateClaims) == 0 { + return base64.RawURLEncoding.EncodeToString(b), nil + } + + // Marshal private claim set and then append it to b. + prv, err := json.Marshal(c.PrivateClaims) + if err != nil { + return "", fmt.Errorf("jws: invalid map of private claims %v", c.PrivateClaims) + } + + // Concatenate public and private claim JSON objects. + if !bytes.HasSuffix(b, []byte{'}'}) { + return "", fmt.Errorf("jws: invalid JSON %s", b) + } + if !bytes.HasPrefix(prv, []byte{'{'}) { + return "", fmt.Errorf("jws: invalid JSON %s", prv) + } + b[len(b)-1] = ',' // Replace closing curly brace with a comma. + b = append(b, prv[1:]...) // Append private claims. + return base64.RawURLEncoding.EncodeToString(b), nil +} + +// Header represents the header for the signed JWS payloads. +type Header struct { + // The algorithm used for signature. + Algorithm string `json:"alg"` + + // Represents the token type. + Typ string `json:"typ"` + + // The optional hint of which key is being used. + KeyID string `json:"kid,omitempty"` +} + +func (h *Header) encode() (string, error) { + b, err := json.Marshal(h) + if err != nil { + return "", err + } + return base64.RawURLEncoding.EncodeToString(b), nil +} + +// Decode decodes a claim set from a JWS payload. +func Decode(payload string) (*ClaimSet, error) { + // decode returned id token to get expiry + s := strings.Split(payload, ".") + if len(s) < 2 { + // TODO(jbd): Provide more context about the error. + return nil, errors.New("jws: invalid token received") + } + decoded, err := base64.RawURLEncoding.DecodeString(s[1]) + if err != nil { + return nil, err + } + c := &ClaimSet{} + err = json.NewDecoder(bytes.NewBuffer(decoded)).Decode(c) + return c, err +} + +// Signer returns a signature for the given data. +type Signer func(data []byte) (sig []byte, err error) + +// EncodeWithSigner encodes a header and claim set with the provided signer. +func EncodeWithSigner(header *Header, c *ClaimSet, sg Signer) (string, error) { + head, err := header.encode() + if err != nil { + return "", err + } + cs, err := c.encode() + if err != nil { + return "", err + } + ss := fmt.Sprintf("%s.%s", head, cs) + sig, err := sg([]byte(ss)) + if err != nil { + return "", err + } + return fmt.Sprintf("%s.%s", ss, base64.RawURLEncoding.EncodeToString(sig)), nil +} + +// Encode encodes a signed JWS with provided header and claim set. +// This invokes EncodeWithSigner using crypto/rsa.SignPKCS1v15 with the given RSA private key. +func Encode(header *Header, c *ClaimSet, key *rsa.PrivateKey) (string, error) { + sg := func(data []byte) (sig []byte, err error) { + h := sha256.New() + h.Write(data) + return rsa.SignPKCS1v15(rand.Reader, key, crypto.SHA256, h.Sum(nil)) + } + return EncodeWithSigner(header, c, sg) +} + +// Verify tests whether the provided JWT token's signature was produced by the private key +// associated with the supplied public key. +func Verify(token string, key *rsa.PublicKey) error { + parts := strings.Split(token, ".") + if len(parts) != 3 { + return errors.New("jws: invalid token received, token must have 3 parts") + } + + signedContent := parts[0] + "." + parts[1] + signatureString, err := base64.RawURLEncoding.DecodeString(parts[2]) + if err != nil { + return err + } + + h := sha256.New() + h.Write([]byte(signedContent)) + return rsa.VerifyPKCS1v15(key, crypto.SHA256, h.Sum(nil), []byte(signatureString)) +} diff --git a/vendor/golang.org/x/oauth2/jwt/jwt.go b/vendor/golang.org/x/oauth2/jwt/jwt.go new file mode 100644 index 000000000..e08f31595 --- /dev/null +++ b/vendor/golang.org/x/oauth2/jwt/jwt.go @@ -0,0 +1,162 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package jwt implements the OAuth 2.0 JSON Web Token flow, commonly +// known as "two-legged OAuth 2.0". +// +// See: https://tools.ietf.org/html/draft-ietf-oauth-jwt-bearer-12 +package jwt + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" + "time" + + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/internal" + "golang.org/x/oauth2/jws" +) + +var ( + defaultGrantType = "urn:ietf:params:oauth:grant-type:jwt-bearer" + defaultHeader = &jws.Header{Algorithm: "RS256", Typ: "JWT"} +) + +// Config is the configuration for using JWT to fetch tokens, +// commonly known as "two-legged OAuth 2.0". +type Config struct { + // Email is the OAuth client identifier used when communicating with + // the configured OAuth provider. + Email string + + // PrivateKey contains the contents of an RSA private key or the + // contents of a PEM file that contains a private key. The provided + // private key is used to sign JWT payloads. + // PEM containers with a passphrase are not supported. + // Use the following command to convert a PKCS 12 file into a PEM. + // + // $ openssl pkcs12 -in key.p12 -out key.pem -nodes + // + PrivateKey []byte + + // PrivateKeyID contains an optional hint indicating which key is being + // used. + PrivateKeyID string + + // Subject is the optional user to impersonate. + Subject string + + // Scopes optionally specifies a list of requested permission scopes. + Scopes []string + + // TokenURL is the endpoint required to complete the 2-legged JWT flow. + TokenURL string + + // Expires optionally specifies how long the token is valid for. + Expires time.Duration +} + +// TokenSource returns a JWT TokenSource using the configuration +// in c and the HTTP client from the provided context. +func (c *Config) TokenSource(ctx context.Context) oauth2.TokenSource { + return oauth2.ReuseTokenSource(nil, jwtSource{ctx, c}) +} + +// Client returns an HTTP client wrapping the context's +// HTTP transport and adding Authorization headers with tokens +// obtained from c. +// +// The returned client and its Transport should not be modified. +func (c *Config) Client(ctx context.Context) *http.Client { + return oauth2.NewClient(ctx, c.TokenSource(ctx)) +} + +// jwtSource is a source that always does a signed JWT request for a token. +// It should typically be wrapped with a reuseTokenSource. +type jwtSource struct { + ctx context.Context + conf *Config +} + +func (js jwtSource) Token() (*oauth2.Token, error) { + pk, err := internal.ParseKey(js.conf.PrivateKey) + if err != nil { + return nil, err + } + hc := oauth2.NewClient(js.ctx, nil) + claimSet := &jws.ClaimSet{ + Iss: js.conf.Email, + Scope: strings.Join(js.conf.Scopes, " "), + Aud: js.conf.TokenURL, + } + if subject := js.conf.Subject; subject != "" { + claimSet.Sub = subject + // prn is the old name of sub. Keep setting it + // to be compatible with legacy OAuth 2.0 providers. + claimSet.Prn = subject + } + if t := js.conf.Expires; t > 0 { + claimSet.Exp = time.Now().Add(t).Unix() + } + h := *defaultHeader + h.KeyID = js.conf.PrivateKeyID + payload, err := jws.Encode(&h, claimSet, pk) + if err != nil { + return nil, err + } + v := url.Values{} + v.Set("grant_type", defaultGrantType) + v.Set("assertion", payload) + resp, err := hc.PostForm(js.conf.TokenURL, v) + if err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + if err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + if c := resp.StatusCode; c < 200 || c > 299 { + return nil, &oauth2.RetrieveError{ + Response: resp, + Body: body, + } + } + // tokenRes is the JSON response body. + var tokenRes struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + IDToken string `json:"id_token"` + ExpiresIn int64 `json:"expires_in"` // relative seconds from now + } + if err := json.Unmarshal(body, &tokenRes); err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + token := &oauth2.Token{ + AccessToken: tokenRes.AccessToken, + TokenType: tokenRes.TokenType, + } + raw := make(map[string]interface{}) + json.Unmarshal(body, &raw) // no error checks for optional fields + token = token.WithExtra(raw) + + if secs := tokenRes.ExpiresIn; secs > 0 { + token.Expiry = time.Now().Add(time.Duration(secs) * time.Second) + } + if v := tokenRes.IDToken; v != "" { + // decode returned id token to get expiry + claimSet, err := jws.Decode(v) + if err != nil { + return nil, fmt.Errorf("oauth2: error decoding JWT token: %v", err) + } + token.Expiry = time.Unix(claimSet.Exp, 0) + } + return token, nil +} diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go new file mode 100644 index 000000000..16775d081 --- /dev/null +++ b/vendor/golang.org/x/oauth2/oauth2.go @@ -0,0 +1,362 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package oauth2 provides support for making +// OAuth2 authorized and authenticated HTTP requests, +// as specified in RFC 6749. +// It can additionally grant authorization with Bearer JWT. +package oauth2 // import "golang.org/x/oauth2" + +import ( + "bytes" + "errors" + "net/http" + "net/url" + "strings" + "sync" + + "golang.org/x/net/context" + "golang.org/x/oauth2/internal" +) + +// NoContext is the default context you should supply if not using +// your own context.Context (see https://golang.org/x/net/context). +// +// Deprecated: Use context.Background() or context.TODO() instead. +var NoContext = context.TODO() + +// RegisterBrokenAuthHeaderProvider registers an OAuth2 server +// identified by the tokenURL prefix as an OAuth2 implementation +// which doesn't support the HTTP Basic authentication +// scheme to authenticate with the authorization server. +// Once a server is registered, credentials (client_id and client_secret) +// will be passed as query parameters rather than being present +// in the Authorization header. +// See https://code.google.com/p/goauth2/issues/detail?id=31 for background. +func RegisterBrokenAuthHeaderProvider(tokenURL string) { + internal.RegisterBrokenAuthHeaderProvider(tokenURL) +} + +// Config describes a typical 3-legged OAuth2 flow, with both the +// client application information and the server's endpoint URLs. +// For the client credentials 2-legged OAuth2 flow, see the clientcredentials +// package (https://golang.org/x/oauth2/clientcredentials). +type Config struct { + // ClientID is the application's ID. + ClientID string + + // ClientSecret is the application's secret. + ClientSecret string + + // Endpoint contains the resource server's token endpoint + // URLs. These are constants specific to each server and are + // often available via site-specific packages, such as + // google.Endpoint or github.Endpoint. + Endpoint Endpoint + + // RedirectURL is the URL to redirect users going through + // the OAuth flow, after the resource owner's URLs. + RedirectURL string + + // Scope specifies optional requested permissions. + Scopes []string +} + +// A TokenSource is anything that can return a token. +type TokenSource interface { + // Token returns a token or an error. + // Token must be safe for concurrent use by multiple goroutines. + // The returned Token must not be modified. + Token() (*Token, error) +} + +// Endpoint contains the OAuth 2.0 provider's authorization and token +// endpoint URLs. +type Endpoint struct { + AuthURL string + TokenURL string +} + +var ( + // AccessTypeOnline and AccessTypeOffline are options passed + // to the Options.AuthCodeURL method. They modify the + // "access_type" field that gets sent in the URL returned by + // AuthCodeURL. + // + // Online is the default if neither is specified. If your + // application needs to refresh access tokens when the user + // is not present at the browser, then use offline. This will + // result in your application obtaining a refresh token the + // first time your application exchanges an authorization + // code for a user. + AccessTypeOnline AuthCodeOption = SetAuthURLParam("access_type", "online") + AccessTypeOffline AuthCodeOption = SetAuthURLParam("access_type", "offline") + + // ApprovalForce forces the users to view the consent dialog + // and confirm the permissions request at the URL returned + // from AuthCodeURL, even if they've already done so. + ApprovalForce AuthCodeOption = SetAuthURLParam("approval_prompt", "force") +) + +// An AuthCodeOption is passed to Config.AuthCodeURL. +type AuthCodeOption interface { + setValue(url.Values) +} + +type setParam struct{ k, v string } + +func (p setParam) setValue(m url.Values) { m.Set(p.k, p.v) } + +// SetAuthURLParam builds an AuthCodeOption which passes key/value parameters +// to a provider's authorization endpoint. +func SetAuthURLParam(key, value string) AuthCodeOption { + return setParam{key, value} +} + +// AuthCodeURL returns a URL to OAuth 2.0 provider's consent page +// that asks for permissions for the required scopes explicitly. +// +// State is a token to protect the user from CSRF attacks. You must +// always provide a non-empty string and validate that it matches the +// the state query parameter on your redirect callback. +// See http://tools.ietf.org/html/rfc6749#section-10.12 for more info. +// +// Opts may include AccessTypeOnline or AccessTypeOffline, as well +// as ApprovalForce. +// It can also be used to pass the PKCE challange. +// See https://www.oauth.com/oauth2-servers/pkce/ for more info. +func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string { + var buf bytes.Buffer + buf.WriteString(c.Endpoint.AuthURL) + v := url.Values{ + "response_type": {"code"}, + "client_id": {c.ClientID}, + } + if c.RedirectURL != "" { + v.Set("redirect_uri", c.RedirectURL) + } + if len(c.Scopes) > 0 { + v.Set("scope", strings.Join(c.Scopes, " ")) + } + if state != "" { + // TODO(light): Docs say never to omit state; don't allow empty. + v.Set("state", state) + } + for _, opt := range opts { + opt.setValue(v) + } + if strings.Contains(c.Endpoint.AuthURL, "?") { + buf.WriteByte('&') + } else { + buf.WriteByte('?') + } + buf.WriteString(v.Encode()) + return buf.String() +} + +// PasswordCredentialsToken converts a resource owner username and password +// pair into a token. +// +// Per the RFC, this grant type should only be used "when there is a high +// degree of trust between the resource owner and the client (e.g., the client +// is part of the device operating system or a highly privileged application), +// and when other authorization grant types are not available." +// See https://tools.ietf.org/html/rfc6749#section-4.3 for more info. +// +// The HTTP client to use is derived from the context. +// If nil, http.DefaultClient is used. +func (c *Config) PasswordCredentialsToken(ctx context.Context, username, password string) (*Token, error) { + v := url.Values{ + "grant_type": {"password"}, + "username": {username}, + "password": {password}, + } + if len(c.Scopes) > 0 { + v.Set("scope", strings.Join(c.Scopes, " ")) + } + return retrieveToken(ctx, c, v) +} + +// Exchange converts an authorization code into a token. +// +// It is used after a resource provider redirects the user back +// to the Redirect URI (the URL obtained from AuthCodeURL). +// +// The HTTP client to use is derived from the context. +// If a client is not provided via the context, http.DefaultClient is used. +// +// The code will be in the *http.Request.FormValue("code"). Before +// calling Exchange, be sure to validate FormValue("state"). +// +// Opts may include the PKCE verifier code if previously used in AuthCodeURL. +// See https://www.oauth.com/oauth2-servers/pkce/ for more info. +func (c *Config) Exchange(ctx context.Context, code string, opts ...AuthCodeOption) (*Token, error) { + v := url.Values{ + "grant_type": {"authorization_code"}, + "code": {code}, + } + if c.RedirectURL != "" { + v.Set("redirect_uri", c.RedirectURL) + } + for _, opt := range opts { + opt.setValue(v) + } + return retrieveToken(ctx, c, v) +} + +// Client returns an HTTP client using the provided token. +// The token will auto-refresh as necessary. The underlying +// HTTP transport will be obtained using the provided context. +// The returned client and its Transport should not be modified. +func (c *Config) Client(ctx context.Context, t *Token) *http.Client { + return NewClient(ctx, c.TokenSource(ctx, t)) +} + +// TokenSource returns a TokenSource that returns t until t expires, +// automatically refreshing it as necessary using the provided context. +// +// Most users will use Config.Client instead. +func (c *Config) TokenSource(ctx context.Context, t *Token) TokenSource { + tkr := &tokenRefresher{ + ctx: ctx, + conf: c, + } + if t != nil { + tkr.refreshToken = t.RefreshToken + } + return &reuseTokenSource{ + t: t, + new: tkr, + } +} + +// tokenRefresher is a TokenSource that makes "grant_type"=="refresh_token" +// HTTP requests to renew a token using a RefreshToken. +type tokenRefresher struct { + ctx context.Context // used to get HTTP requests + conf *Config + refreshToken string +} + +// WARNING: Token is not safe for concurrent access, as it +// updates the tokenRefresher's refreshToken field. +// Within this package, it is used by reuseTokenSource which +// synchronizes calls to this method with its own mutex. +func (tf *tokenRefresher) Token() (*Token, error) { + if tf.refreshToken == "" { + return nil, errors.New("oauth2: token expired and refresh token is not set") + } + + tk, err := retrieveToken(tf.ctx, tf.conf, url.Values{ + "grant_type": {"refresh_token"}, + "refresh_token": {tf.refreshToken}, + }) + + if err != nil { + return nil, err + } + if tf.refreshToken != tk.RefreshToken { + tf.refreshToken = tk.RefreshToken + } + return tk, err +} + +// reuseTokenSource is a TokenSource that holds a single token in memory +// and validates its expiry before each call to retrieve it with +// Token. If it's expired, it will be auto-refreshed using the +// new TokenSource. +type reuseTokenSource struct { + new TokenSource // called when t is expired. + + mu sync.Mutex // guards t + t *Token +} + +// Token returns the current token if it's still valid, else will +// refresh the current token (using r.Context for HTTP client +// information) and return the new one. +func (s *reuseTokenSource) Token() (*Token, error) { + s.mu.Lock() + defer s.mu.Unlock() + if s.t.Valid() { + return s.t, nil + } + t, err := s.new.Token() + if err != nil { + return nil, err + } + s.t = t + return t, nil +} + +// StaticTokenSource returns a TokenSource that always returns the same token. +// Because the provided token t is never refreshed, StaticTokenSource is only +// useful for tokens that never expire. +func StaticTokenSource(t *Token) TokenSource { + return staticTokenSource{t} +} + +// staticTokenSource is a TokenSource that always returns the same Token. +type staticTokenSource struct { + t *Token +} + +func (s staticTokenSource) Token() (*Token, error) { + return s.t, nil +} + +// HTTPClient is the context key to use with golang.org/x/net/context's +// WithValue function to associate an *http.Client value with a context. +var HTTPClient internal.ContextKey + +// NewClient creates an *http.Client from a Context and TokenSource. +// The returned client is not valid beyond the lifetime of the context. +// +// Note that if a custom *http.Client is provided via the Context it +// is used only for token acquisition and is not used to configure the +// *http.Client returned from NewClient. +// +// As a special case, if src is nil, a non-OAuth2 client is returned +// using the provided context. This exists to support related OAuth2 +// packages. +func NewClient(ctx context.Context, src TokenSource) *http.Client { + if src == nil { + return internal.ContextClient(ctx) + } + return &http.Client{ + Transport: &Transport{ + Base: internal.ContextClient(ctx).Transport, + Source: ReuseTokenSource(nil, src), + }, + } +} + +// ReuseTokenSource returns a TokenSource which repeatedly returns the +// same token as long as it's valid, starting with t. +// When its cached token is invalid, a new token is obtained from src. +// +// ReuseTokenSource is typically used to reuse tokens from a cache +// (such as a file on disk) between runs of a program, rather than +// obtaining new tokens unnecessarily. +// +// The initial token t may be nil, in which case the TokenSource is +// wrapped in a caching version if it isn't one already. This also +// means it's always safe to wrap ReuseTokenSource around any other +// TokenSource without adverse effects. +func ReuseTokenSource(t *Token, src TokenSource) TokenSource { + // Don't wrap a reuseTokenSource in itself. That would work, + // but cause an unnecessary number of mutex operations. + // Just build the equivalent one. + if rt, ok := src.(*reuseTokenSource); ok { + if t == nil { + // Just use it directly. + return rt + } + src = rt.new + } + return &reuseTokenSource{ + t: t, + new: src, + } +} diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go new file mode 100644 index 000000000..34db8cdc8 --- /dev/null +++ b/vendor/golang.org/x/oauth2/token.go @@ -0,0 +1,175 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package oauth2 + +import ( + "fmt" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "golang.org/x/net/context" + "golang.org/x/oauth2/internal" +) + +// expiryDelta determines how earlier a token should be considered +// expired than its actual expiration time. It is used to avoid late +// expirations due to client-server time mismatches. +const expiryDelta = 10 * time.Second + +// Token represents the credentials used to authorize +// the requests to access protected resources on the OAuth 2.0 +// provider's backend. +// +// Most users of this package should not access fields of Token +// directly. They're exported mostly for use by related packages +// implementing derivative OAuth2 flows. +type Token struct { + // AccessToken is the token that authorizes and authenticates + // the requests. + AccessToken string `json:"access_token"` + + // TokenType is the type of token. + // The Type method returns either this or "Bearer", the default. + TokenType string `json:"token_type,omitempty"` + + // RefreshToken is a token that's used by the application + // (as opposed to the user) to refresh the access token + // if it expires. + RefreshToken string `json:"refresh_token,omitempty"` + + // Expiry is the optional expiration time of the access token. + // + // If zero, TokenSource implementations will reuse the same + // token forever and RefreshToken or equivalent + // mechanisms for that TokenSource will not be used. + Expiry time.Time `json:"expiry,omitempty"` + + // raw optionally contains extra metadata from the server + // when updating a token. + raw interface{} +} + +// Type returns t.TokenType if non-empty, else "Bearer". +func (t *Token) Type() string { + if strings.EqualFold(t.TokenType, "bearer") { + return "Bearer" + } + if strings.EqualFold(t.TokenType, "mac") { + return "MAC" + } + if strings.EqualFold(t.TokenType, "basic") { + return "Basic" + } + if t.TokenType != "" { + return t.TokenType + } + return "Bearer" +} + +// SetAuthHeader sets the Authorization header to r using the access +// token in t. +// +// This method is unnecessary when using Transport or an HTTP Client +// returned by this package. +func (t *Token) SetAuthHeader(r *http.Request) { + r.Header.Set("Authorization", t.Type()+" "+t.AccessToken) +} + +// WithExtra returns a new Token that's a clone of t, but using the +// provided raw extra map. This is only intended for use by packages +// implementing derivative OAuth2 flows. +func (t *Token) WithExtra(extra interface{}) *Token { + t2 := new(Token) + *t2 = *t + t2.raw = extra + return t2 +} + +// Extra returns an extra field. +// Extra fields are key-value pairs returned by the server as a +// part of the token retrieval response. +func (t *Token) Extra(key string) interface{} { + if raw, ok := t.raw.(map[string]interface{}); ok { + return raw[key] + } + + vals, ok := t.raw.(url.Values) + if !ok { + return nil + } + + v := vals.Get(key) + switch s := strings.TrimSpace(v); strings.Count(s, ".") { + case 0: // Contains no "."; try to parse as int + if i, err := strconv.ParseInt(s, 10, 64); err == nil { + return i + } + case 1: // Contains a single "."; try to parse as float + if f, err := strconv.ParseFloat(s, 64); err == nil { + return f + } + } + + return v +} + +// expired reports whether the token is expired. +// t must be non-nil. +func (t *Token) expired() bool { + if t.Expiry.IsZero() { + return false + } + return t.Expiry.Round(0).Add(-expiryDelta).Before(time.Now()) +} + +// Valid reports whether t is non-nil, has an AccessToken, and is not expired. +func (t *Token) Valid() bool { + return t != nil && t.AccessToken != "" && !t.expired() +} + +// tokenFromInternal maps an *internal.Token struct into +// a *Token struct. +func tokenFromInternal(t *internal.Token) *Token { + if t == nil { + return nil + } + return &Token{ + AccessToken: t.AccessToken, + TokenType: t.TokenType, + RefreshToken: t.RefreshToken, + Expiry: t.Expiry, + raw: t.Raw, + } +} + +// retrieveToken takes a *Config and uses that to retrieve an *internal.Token. +// This token is then mapped from *internal.Token into an *oauth2.Token which is returned along +// with an error.. +func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) { + tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v) + if err != nil { + if rErr, ok := err.(*internal.RetrieveError); ok { + return nil, (*RetrieveError)(rErr) + } + return nil, err + } + return tokenFromInternal(tk), nil +} + +// RetrieveError is the error returned when the token endpoint returns a +// non-2XX HTTP status code. +type RetrieveError struct { + Response *http.Response + // Body is the body that was consumed by reading Response.Body. + // It may be truncated. + Body []byte +} + +func (r *RetrieveError) Error() string { + return fmt.Sprintf("oauth2: cannot fetch token: %v\nResponse: %s", r.Response.Status, r.Body) +} diff --git a/vendor/golang.org/x/oauth2/transport.go b/vendor/golang.org/x/oauth2/transport.go new file mode 100644 index 000000000..aa0d34f1e --- /dev/null +++ b/vendor/golang.org/x/oauth2/transport.go @@ -0,0 +1,144 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package oauth2 + +import ( + "errors" + "io" + "net/http" + "sync" +) + +// Transport is an http.RoundTripper that makes OAuth 2.0 HTTP requests, +// wrapping a base RoundTripper and adding an Authorization header +// with a token from the supplied Sources. +// +// Transport is a low-level mechanism. Most code will use the +// higher-level Config.Client method instead. +type Transport struct { + // Source supplies the token to add to outgoing requests' + // Authorization headers. + Source TokenSource + + // Base is the base RoundTripper used to make HTTP requests. + // If nil, http.DefaultTransport is used. + Base http.RoundTripper + + mu sync.Mutex // guards modReq + modReq map[*http.Request]*http.Request // original -> modified +} + +// RoundTrip authorizes and authenticates the request with an +// access token from Transport's Source. +func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + reqBodyClosed := false + if req.Body != nil { + defer func() { + if !reqBodyClosed { + req.Body.Close() + } + }() + } + + if t.Source == nil { + return nil, errors.New("oauth2: Transport's Source is nil") + } + token, err := t.Source.Token() + if err != nil { + return nil, err + } + + req2 := cloneRequest(req) // per RoundTripper contract + token.SetAuthHeader(req2) + t.setModReq(req, req2) + res, err := t.base().RoundTrip(req2) + + // req.Body is assumed to have been closed by the base RoundTripper. + reqBodyClosed = true + + if err != nil { + t.setModReq(req, nil) + return nil, err + } + res.Body = &onEOFReader{ + rc: res.Body, + fn: func() { t.setModReq(req, nil) }, + } + return res, nil +} + +// CancelRequest cancels an in-flight request by closing its connection. +func (t *Transport) CancelRequest(req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + if cr, ok := t.base().(canceler); ok { + t.mu.Lock() + modReq := t.modReq[req] + delete(t.modReq, req) + t.mu.Unlock() + cr.CancelRequest(modReq) + } +} + +func (t *Transport) base() http.RoundTripper { + if t.Base != nil { + return t.Base + } + return http.DefaultTransport +} + +func (t *Transport) setModReq(orig, mod *http.Request) { + t.mu.Lock() + defer t.mu.Unlock() + if t.modReq == nil { + t.modReq = make(map[*http.Request]*http.Request) + } + if mod == nil { + delete(t.modReq, orig) + } else { + t.modReq[orig] = mod + } +} + +// cloneRequest returns a clone of the provided *http.Request. +// The clone is a shallow copy of the struct and its Header map. +func cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header, len(r.Header)) + for k, s := range r.Header { + r2.Header[k] = append([]string(nil), s...) + } + return r2 +} + +type onEOFReader struct { + rc io.ReadCloser + fn func() +} + +func (r *onEOFReader) Read(p []byte) (n int, err error) { + n, err = r.rc.Read(p) + if err == io.EOF { + r.runFunc() + } + return +} + +func (r *onEOFReader) Close() error { + err := r.rc.Close() + r.runFunc() + return err +} + +func (r *onEOFReader) runFunc() { + if fn := r.fn; fn != nil { + fn() + r.fn = nil + } +} diff --git a/vendor/golang.org/x/sync/semaphore/semaphore.go b/vendor/golang.org/x/sync/semaphore/semaphore.go new file mode 100644 index 000000000..e9d2d79a9 --- /dev/null +++ b/vendor/golang.org/x/sync/semaphore/semaphore.go @@ -0,0 +1,131 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package semaphore provides a weighted semaphore implementation. +package semaphore // import "golang.org/x/sync/semaphore" + +import ( + "container/list" + "sync" + + // Use the old context because packages that depend on this one + // (e.g. cloud.google.com/go/...) must run on Go 1.6. + // TODO(jba): update to "context" when possible. + "golang.org/x/net/context" +) + +type waiter struct { + n int64 + ready chan<- struct{} // Closed when semaphore acquired. +} + +// NewWeighted creates a new weighted semaphore with the given +// maximum combined weight for concurrent access. +func NewWeighted(n int64) *Weighted { + w := &Weighted{size: n} + return w +} + +// Weighted provides a way to bound concurrent access to a resource. +// The callers can request access with a given weight. +type Weighted struct { + size int64 + cur int64 + mu sync.Mutex + waiters list.List +} + +// Acquire acquires the semaphore with a weight of n, blocking only until ctx +// is done. On success, returns nil. On failure, returns ctx.Err() and leaves +// the semaphore unchanged. +// +// If ctx is already done, Acquire may still succeed without blocking. +func (s *Weighted) Acquire(ctx context.Context, n int64) error { + s.mu.Lock() + if s.size-s.cur >= n && s.waiters.Len() == 0 { + s.cur += n + s.mu.Unlock() + return nil + } + + if n > s.size { + // Don't make other Acquire calls block on one that's doomed to fail. + s.mu.Unlock() + <-ctx.Done() + return ctx.Err() + } + + ready := make(chan struct{}) + w := waiter{n: n, ready: ready} + elem := s.waiters.PushBack(w) + s.mu.Unlock() + + select { + case <-ctx.Done(): + err := ctx.Err() + s.mu.Lock() + select { + case <-ready: + // Acquired the semaphore after we were canceled. Rather than trying to + // fix up the queue, just pretend we didn't notice the cancelation. + err = nil + default: + s.waiters.Remove(elem) + } + s.mu.Unlock() + return err + + case <-ready: + return nil + } +} + +// TryAcquire acquires the semaphore with a weight of n without blocking. +// On success, returns true. On failure, returns false and leaves the semaphore unchanged. +func (s *Weighted) TryAcquire(n int64) bool { + s.mu.Lock() + success := s.size-s.cur >= n && s.waiters.Len() == 0 + if success { + s.cur += n + } + s.mu.Unlock() + return success +} + +// Release releases the semaphore with a weight of n. +func (s *Weighted) Release(n int64) { + s.mu.Lock() + s.cur -= n + if s.cur < 0 { + s.mu.Unlock() + panic("semaphore: bad release") + } + for { + next := s.waiters.Front() + if next == nil { + break // No more waiters blocked. + } + + w := next.Value.(waiter) + if s.size-s.cur < w.n { + // Not enough tokens for the next waiter. We could keep going (to try to + // find a waiter with a smaller request), but under load that could cause + // starvation for large requests; instead, we leave all remaining waiters + // blocked. + // + // Consider a semaphore used as a read-write lock, with N tokens, N + // readers, and one writer. Each reader can Acquire(1) to obtain a read + // lock. The writer can Acquire(N) to obtain a write lock, excluding all + // of the readers. If we allow the readers to jump ahead in the queue, + // the writer will starve — there is always one token available for every + // reader. + break + } + + s.cur += w.n + s.waiters.Remove(next) + close(w.ready) + } + s.mu.Unlock() +} diff --git a/vendor/google.golang.org/api/AUTHORS b/vendor/google.golang.org/api/AUTHORS new file mode 100644 index 000000000..f73b72574 --- /dev/null +++ b/vendor/google.golang.org/api/AUTHORS @@ -0,0 +1,10 @@ +# This is the official list of authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. + +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# Please keep the list sorted. +Google Inc. diff --git a/vendor/google.golang.org/api/CONTRIBUTORS b/vendor/google.golang.org/api/CONTRIBUTORS new file mode 100644 index 000000000..fe55ebff0 --- /dev/null +++ b/vendor/google.golang.org/api/CONTRIBUTORS @@ -0,0 +1,55 @@ +# This is the official list of people who can contribute +# (and typically have contributed) code to the repository. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# The submission process automatically checks to make sure +# that people submitting code are listed in this file (by email address). +# +# Names should be added to this file only after verifying that +# the individual or the individual's organization has agreed to +# the appropriate Contributor License Agreement, found here: +# +# https://cla.developers.google.com/about/google-individual +# https://cla.developers.google.com/about/google-corporate +# +# The CLA can be filled out on the web: +# +# https://cla.developers.google.com/ +# +# When adding J Random Contributor's name to this file, +# either J's name or J's organization's name should be +# added to the AUTHORS file, depending on whether the +# individual or corporate CLA was used. + +# Names should be added to this file like so: +# Name +# +# An entry with two email addresses specifies that the +# first address should be used in the submit logs and +# that the second address should be recognized as the +# same person when interacting with Rietveld. + +# Please keep the list sorted. + +Alain Vongsouvanhalainv +Andrew Gerrand +Brad Fitzpatrick +Eric Koleda +Francesc Campoy +Garrick Evans +Glenn Lewis +Ivan Krasin +Jason Hall +Johan Euphrosine +Kostik Shtoyk +Kunpei Sakai +Matthew Whisenhunt +Michael McGreevy +Nick Craig-Wood +Robbie Trencheny +Ross Light +Sarah Adams +Scott Van Woudenberg +Takashi Matsuo diff --git a/vendor/google.golang.org/api/LICENSE b/vendor/google.golang.org/api/LICENSE new file mode 100644 index 000000000..263aa7a0c --- /dev/null +++ b/vendor/google.golang.org/api/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2011 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/google.golang.org/api/googleapi/internal/uritemplates/LICENSE b/vendor/google.golang.org/api/googleapi/internal/uritemplates/LICENSE new file mode 100644 index 000000000..de9c88cb6 --- /dev/null +++ b/vendor/google.golang.org/api/googleapi/internal/uritemplates/LICENSE @@ -0,0 +1,18 @@ +Copyright (c) 2013 Joshua Tacoma + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/google.golang.org/api/googleapi/transport/apikey.go b/vendor/google.golang.org/api/googleapi/transport/apikey.go new file mode 100644 index 000000000..eca1ea250 --- /dev/null +++ b/vendor/google.golang.org/api/googleapi/transport/apikey.go @@ -0,0 +1,38 @@ +// Copyright 2012 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package transport contains HTTP transports used to make +// authenticated API requests. +package transport + +import ( + "errors" + "net/http" +) + +// APIKey is an HTTP Transport which wraps an underlying transport and +// appends an API Key "key" parameter to the URL of outgoing requests. +type APIKey struct { + // Key is the API Key to set on requests. + Key string + + // Transport is the underlying HTTP transport. + // If nil, http.DefaultTransport is used. + Transport http.RoundTripper +} + +func (t *APIKey) RoundTrip(req *http.Request) (*http.Response, error) { + rt := t.Transport + if rt == nil { + rt = http.DefaultTransport + if rt == nil { + return nil, errors.New("googleapi/transport: no Transport specified or available") + } + } + newReq := *req + args := newReq.URL.Query() + args.Set("key", t.Key) + newReq.URL.RawQuery = args.Encode() + return rt.RoundTrip(&newReq) +} diff --git a/vendor/google.golang.org/api/internal/creds.go b/vendor/google.golang.org/api/internal/creds.go new file mode 100644 index 000000000..2056c6d37 --- /dev/null +++ b/vendor/google.golang.org/api/internal/creds.go @@ -0,0 +1,45 @@ +// Copyright 2017 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "fmt" + "io/ioutil" + + "golang.org/x/net/context" + "golang.org/x/oauth2/google" +) + +// Creds returns credential information obtained from DialSettings, or if none, then +// it returns default credential information. +func Creds(ctx context.Context, ds *DialSettings) (*google.DefaultCredentials, error) { + if ds.Credentials != nil { + return ds.Credentials, nil + } + if ds.CredentialsJSON != nil { + return google.CredentialsFromJSON(ctx, ds.CredentialsJSON, ds.Scopes...) + } + if ds.CredentialsFile != "" { + data, err := ioutil.ReadFile(ds.CredentialsFile) + if err != nil { + return nil, fmt.Errorf("cannot read credentials file: %v", err) + } + return google.CredentialsFromJSON(ctx, data, ds.Scopes...) + } + if ds.TokenSource != nil { + return &google.DefaultCredentials{TokenSource: ds.TokenSource}, nil + } + return google.FindDefaultCredentials(ctx, ds.Scopes...) +} diff --git a/vendor/google.golang.org/api/internal/pool.go b/vendor/google.golang.org/api/internal/pool.go new file mode 100644 index 000000000..3bb2c1a2d --- /dev/null +++ b/vendor/google.golang.org/api/internal/pool.go @@ -0,0 +1,59 @@ +// Copyright 2016 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "errors" + "google.golang.org/grpc/naming" +) + +// PoolResolver provides a fixed list of addresses to load balance between +// and does not provide further updates. +type PoolResolver struct { + poolSize int + dialOpt *DialSettings + ch chan []*naming.Update +} + +// NewPoolResolver returns a PoolResolver +// This is an EXPERIMENTAL API and may be changed or removed in the future. +func NewPoolResolver(size int, o *DialSettings) *PoolResolver { + return &PoolResolver{poolSize: size, dialOpt: o} +} + +// Resolve returns a Watcher for the endpoint defined by the DialSettings +// provided to NewPoolResolver. +func (r *PoolResolver) Resolve(target string) (naming.Watcher, error) { + if r.dialOpt.Endpoint == "" { + return nil, errors.New("No endpoint configured") + } + addrs := make([]*naming.Update, 0, r.poolSize) + for i := 0; i < r.poolSize; i++ { + addrs = append(addrs, &naming.Update{Op: naming.Add, Addr: r.dialOpt.Endpoint, Metadata: i}) + } + r.ch = make(chan []*naming.Update, 1) + r.ch <- addrs + return r, nil +} + +// Next returns a static list of updates on the first call, +// and blocks indefinitely until Close is called on subsequent calls. +func (r *PoolResolver) Next() ([]*naming.Update, error) { + return <-r.ch, nil +} + +func (r *PoolResolver) Close() { + close(r.ch) +} diff --git a/vendor/google.golang.org/api/internal/settings.go b/vendor/google.golang.org/api/internal/settings.go new file mode 100644 index 000000000..afabdc423 --- /dev/null +++ b/vendor/google.golang.org/api/internal/settings.go @@ -0,0 +1,81 @@ +// Copyright 2017 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package internal supports the options and transport packages. +package internal + +import ( + "errors" + "net/http" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + "google.golang.org/grpc" +) + +// DialSettings holds information needed to establish a connection with a +// Google API service. +type DialSettings struct { + Endpoint string + Scopes []string + TokenSource oauth2.TokenSource + Credentials *google.DefaultCredentials + CredentialsFile string // if set, Token Source is ignored. + CredentialsJSON []byte + UserAgent string + APIKey string + HTTPClient *http.Client + GRPCDialOpts []grpc.DialOption + GRPCConn *grpc.ClientConn + NoAuth bool +} + +// Validate reports an error if ds is invalid. +func (ds *DialSettings) Validate() error { + hasCreds := ds.APIKey != "" || ds.TokenSource != nil || ds.CredentialsFile != "" || ds.Credentials != nil + if ds.NoAuth && hasCreds { + return errors.New("options.WithoutAuthentication is incompatible with any option that provides credentials") + } + // Credentials should not appear with other options. + // We currently allow TokenSource and CredentialsFile to coexist. + // TODO(jba): make TokenSource & CredentialsFile an error (breaking change). + nCreds := 0 + if ds.Credentials != nil { + nCreds++ + } + if ds.CredentialsJSON != nil { + nCreds++ + } + if ds.CredentialsFile != "" { + nCreds++ + } + if ds.APIKey != "" { + nCreds++ + } + if ds.TokenSource != nil { + nCreds++ + } + // Accept only one form of credentials, except we allow TokenSource and CredentialsFile for backwards compatibility. + if nCreds > 1 && !(nCreds == 2 && ds.TokenSource != nil && ds.CredentialsFile != "") { + return errors.New("multiple credential options provided") + } + if ds.HTTPClient != nil && ds.GRPCConn != nil { + return errors.New("WithHTTPClient is incompatible with WithGRPCConn") + } + if ds.HTTPClient != nil && ds.GRPCDialOpts != nil { + return errors.New("WithHTTPClient is incompatible with gRPC dial options") + } + + return nil +} diff --git a/vendor/google.golang.org/api/iterator/iterator.go b/vendor/google.golang.org/api/iterator/iterator.go new file mode 100644 index 000000000..3c8ea7732 --- /dev/null +++ b/vendor/google.golang.org/api/iterator/iterator.go @@ -0,0 +1,231 @@ +// Copyright 2016 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package iterator provides support for standard Google API iterators. +// See https://github.com/GoogleCloudPlatform/gcloud-golang/wiki/Iterator-Guidelines. +package iterator + +import ( + "errors" + "fmt" + "reflect" +) + +// Done is returned by an iterator's Next method when the iteration is +// complete; when there are no more items to return. +var Done = errors.New("no more items in iterator") + +// We don't support mixed calls to Next and NextPage because they play +// with the paging state in incompatible ways. +var errMixed = errors.New("iterator: Next and NextPage called on same iterator") + +// PageInfo contains information about an iterator's paging state. +type PageInfo struct { + // Token is the token used to retrieve the next page of items from the + // API. You may set Token immediately after creating an iterator to + // begin iteration at a particular point. If Token is the empty string, + // the iterator will begin with the first eligible item. + // + // The result of setting Token after the first call to Next is undefined. + // + // After the underlying API method is called to retrieve a page of items, + // Token is set to the next-page token in the response. + Token string + + // MaxSize is the maximum number of items returned by a call to the API. + // Set MaxSize as a hint to optimize the buffering behavior of the iterator. + // If zero, the page size is determined by the underlying service. + // + // Use Pager to retrieve a page of a specific, exact size. + MaxSize int + + // The error state of the iterator. Manipulated by PageInfo.next and Pager. + // This is a latch: it starts as nil, and once set should never change. + err error + + // If true, no more calls to fetch should be made. Set to true when fetch + // returns an empty page token. The iterator is Done when this is true AND + // the buffer is empty. + atEnd bool + + // Function that fetches a page from the underlying service. It should pass + // the pageSize and pageToken arguments to the service, fill the buffer + // with the results from the call, and return the next-page token returned + // by the service. The function must not remove any existing items from the + // buffer. If the underlying RPC takes an int32 page size, pageSize should + // be silently truncated. + fetch func(pageSize int, pageToken string) (nextPageToken string, err error) + + // Function that returns the number of currently buffered items. + bufLen func() int + + // Function that returns the buffer, after setting the buffer variable to nil. + takeBuf func() interface{} + + // Set to true on first call to PageInfo.next or Pager.NextPage. Used to check + // for calls to both Next and NextPage with the same iterator. + nextCalled, nextPageCalled bool +} + +// NewPageInfo exposes internals for iterator implementations. +// It is not a stable interface. +var NewPageInfo = newPageInfo + +// If an iterator can support paging, its iterator-creating method should call +// this (via the NewPageInfo variable above). +// +// The fetch, bufLen and takeBuf arguments provide access to the +// iterator's internal slice of buffered items. They behave as described in +// PageInfo, above. +// +// The return value is the PageInfo.next method bound to the returned PageInfo value. +// (Returning it avoids exporting PageInfo.next.) +func newPageInfo(fetch func(int, string) (string, error), bufLen func() int, takeBuf func() interface{}) (*PageInfo, func() error) { + pi := &PageInfo{ + fetch: fetch, + bufLen: bufLen, + takeBuf: takeBuf, + } + return pi, pi.next +} + +// Remaining returns the number of items available before the iterator makes another API call. +func (pi *PageInfo) Remaining() int { return pi.bufLen() } + +// next provides support for an iterator's Next function. An iterator's Next +// should return the error returned by next if non-nil; else it can assume +// there is at least one item in its buffer, and it should return that item and +// remove it from the buffer. +func (pi *PageInfo) next() error { + pi.nextCalled = true + if pi.err != nil { // Once we get an error, always return it. + // TODO(jba): fix so users can retry on transient errors? Probably not worth it. + return pi.err + } + if pi.nextPageCalled { + pi.err = errMixed + return pi.err + } + // Loop until we get some items or reach the end. + for pi.bufLen() == 0 && !pi.atEnd { + if err := pi.fill(pi.MaxSize); err != nil { + pi.err = err + return pi.err + } + if pi.Token == "" { + pi.atEnd = true + } + } + // Either the buffer is non-empty or pi.atEnd is true (or both). + if pi.bufLen() == 0 { + // The buffer is empty and pi.atEnd is true, i.e. the service has no + // more items. + pi.err = Done + } + return pi.err +} + +// Call the service to fill the buffer, using size and pi.Token. Set pi.Token to the +// next-page token returned by the call. +// If fill returns a non-nil error, the buffer will be empty. +func (pi *PageInfo) fill(size int) error { + tok, err := pi.fetch(size, pi.Token) + if err != nil { + pi.takeBuf() // clear the buffer + return err + } + pi.Token = tok + return nil +} + +// Pageable is implemented by iterators that support paging. +type Pageable interface { + // PageInfo returns paging information associated with the iterator. + PageInfo() *PageInfo +} + +// Pager supports retrieving iterator items a page at a time. +type Pager struct { + pageInfo *PageInfo + pageSize int +} + +// NewPager returns a pager that uses iter. Calls to its NextPage method will +// obtain exactly pageSize items, unless fewer remain. The pageToken argument +// indicates where to start the iteration. Pass the empty string to start at +// the beginning, or pass a token retrieved from a call to Pager.NextPage. +// +// If you use an iterator with a Pager, you must not call Next on the iterator. +func NewPager(iter Pageable, pageSize int, pageToken string) *Pager { + p := &Pager{ + pageInfo: iter.PageInfo(), + pageSize: pageSize, + } + p.pageInfo.Token = pageToken + if pageSize <= 0 { + p.pageInfo.err = errors.New("iterator: page size must be positive") + } + return p +} + +// NextPage retrieves a sequence of items from the iterator and appends them +// to slicep, which must be a pointer to a slice of the iterator's item type. +// Exactly p.pageSize items will be appended, unless fewer remain. +// +// The first return value is the page token to use for the next page of items. +// If empty, there are no more pages. Aside from checking for the end of the +// iteration, the returned page token is only needed if the iteration is to be +// resumed a later time, in another context (possibly another process). +// +// The second return value is non-nil if an error occurred. It will never be +// the special iterator sentinel value Done. To recognize the end of the +// iteration, compare nextPageToken to the empty string. +// +// It is possible for NextPage to return a single zero-length page along with +// an empty page token when there are no more items in the iteration. +func (p *Pager) NextPage(slicep interface{}) (nextPageToken string, err error) { + p.pageInfo.nextPageCalled = true + if p.pageInfo.err != nil { + return "", p.pageInfo.err + } + if p.pageInfo.nextCalled { + p.pageInfo.err = errMixed + return "", p.pageInfo.err + } + if p.pageInfo.bufLen() > 0 { + return "", errors.New("must call NextPage with an empty buffer") + } + // The buffer must be empty here, so takeBuf is a no-op. We call it just to get + // the buffer's type. + wantSliceType := reflect.PtrTo(reflect.ValueOf(p.pageInfo.takeBuf()).Type()) + if slicep == nil { + return "", errors.New("nil passed to Pager.NextPage") + } + vslicep := reflect.ValueOf(slicep) + if vslicep.Type() != wantSliceType { + return "", fmt.Errorf("slicep should be of type %s, got %T", wantSliceType, slicep) + } + for p.pageInfo.bufLen() < p.pageSize { + if err := p.pageInfo.fill(p.pageSize - p.pageInfo.bufLen()); err != nil { + p.pageInfo.err = err + return "", p.pageInfo.err + } + if p.pageInfo.Token == "" { + break + } + } + e := vslicep.Elem() + e.Set(reflect.AppendSlice(e, reflect.ValueOf(p.pageInfo.takeBuf()))) + return p.pageInfo.Token, nil +} diff --git a/vendor/google.golang.org/api/option/credentials_go19.go b/vendor/google.golang.org/api/option/credentials_go19.go new file mode 100644 index 000000000..30c889a11 --- /dev/null +++ b/vendor/google.golang.org/api/option/credentials_go19.go @@ -0,0 +1,32 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.9 + +package option + +import ( + "golang.org/x/oauth2/google" + "google.golang.org/api/internal" +) + +type withCreds google.Credentials + +func (w *withCreds) Apply(o *internal.DialSettings) { + o.Credentials = (*google.Credentials)(w) +} + +func WithCredentials(creds *google.Credentials) ClientOption { + return (*withCreds)(creds) +} diff --git a/vendor/google.golang.org/api/option/credentials_notgo19.go b/vendor/google.golang.org/api/option/credentials_notgo19.go new file mode 100644 index 000000000..74d3a4b5b --- /dev/null +++ b/vendor/google.golang.org/api/option/credentials_notgo19.go @@ -0,0 +1,32 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !go1.9 + +package option + +import ( + "golang.org/x/oauth2/google" + "google.golang.org/api/internal" +) + +type withCreds google.DefaultCredentials + +func (w *withCreds) Apply(o *internal.DialSettings) { + o.Credentials = (*google.DefaultCredentials)(w) +} + +func WithCredentials(creds *google.DefaultCredentials) ClientOption { + return (*withCreds)(creds) +} diff --git a/vendor/google.golang.org/api/option/option.go b/vendor/google.golang.org/api/option/option.go new file mode 100644 index 000000000..e7ecfe3c8 --- /dev/null +++ b/vendor/google.golang.org/api/option/option.go @@ -0,0 +1,191 @@ +// Copyright 2017 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package option contains options for Google API clients. +package option + +import ( + "net/http" + + "golang.org/x/oauth2" + "google.golang.org/api/internal" + "google.golang.org/grpc" +) + +// A ClientOption is an option for a Google API client. +type ClientOption interface { + Apply(*internal.DialSettings) +} + +// WithTokenSource returns a ClientOption that specifies an OAuth2 token +// source to be used as the basis for authentication. +func WithTokenSource(s oauth2.TokenSource) ClientOption { + return withTokenSource{s} +} + +type withTokenSource struct{ ts oauth2.TokenSource } + +func (w withTokenSource) Apply(o *internal.DialSettings) { + o.TokenSource = w.ts +} + +type withCredFile string + +func (w withCredFile) Apply(o *internal.DialSettings) { + o.CredentialsFile = string(w) +} + +// WithCredentialsFile returns a ClientOption that authenticates +// API calls with the given service account or refresh token JSON +// credentials file. +func WithCredentialsFile(filename string) ClientOption { + return withCredFile(filename) +} + +// WithServiceAccountFile returns a ClientOption that uses a Google service +// account credentials file to authenticate. +// +// Deprecated: Use WithCredentialsFile instead. +func WithServiceAccountFile(filename string) ClientOption { + return WithCredentialsFile(filename) +} + +// WithCredentialsJSON returns a ClientOption that authenticates +// API calls with the given service account or refresh token JSON +// credentials. +func WithCredentialsJSON(p []byte) ClientOption { + return withCredentialsJSON(p) +} + +type withCredentialsJSON []byte + +func (w withCredentialsJSON) Apply(o *internal.DialSettings) { + o.CredentialsJSON = make([]byte, len(w)) + copy(o.CredentialsJSON, w) +} + +// WithEndpoint returns a ClientOption that overrides the default endpoint +// to be used for a service. +func WithEndpoint(url string) ClientOption { + return withEndpoint(url) +} + +type withEndpoint string + +func (w withEndpoint) Apply(o *internal.DialSettings) { + o.Endpoint = string(w) +} + +// WithScopes returns a ClientOption that overrides the default OAuth2 scopes +// to be used for a service. +func WithScopes(scope ...string) ClientOption { + return withScopes(scope) +} + +type withScopes []string + +func (w withScopes) Apply(o *internal.DialSettings) { + o.Scopes = make([]string, len(w)) + copy(o.Scopes, w) +} + +// WithUserAgent returns a ClientOption that sets the User-Agent. +func WithUserAgent(ua string) ClientOption { + return withUA(ua) +} + +type withUA string + +func (w withUA) Apply(o *internal.DialSettings) { o.UserAgent = string(w) } + +// WithHTTPClient returns a ClientOption that specifies the HTTP client to use +// as the basis of communications. This option may only be used with services +// that support HTTP as their communication transport. When used, the +// WithHTTPClient option takes precedent over all other supplied options. +func WithHTTPClient(client *http.Client) ClientOption { + return withHTTPClient{client} +} + +type withHTTPClient struct{ client *http.Client } + +func (w withHTTPClient) Apply(o *internal.DialSettings) { + o.HTTPClient = w.client +} + +// WithGRPCConn returns a ClientOption that specifies the gRPC client +// connection to use as the basis of communications. This option many only be +// used with services that support gRPC as their communication transport. When +// used, the WithGRPCConn option takes precedent over all other supplied +// options. +func WithGRPCConn(conn *grpc.ClientConn) ClientOption { + return withGRPCConn{conn} +} + +type withGRPCConn struct{ conn *grpc.ClientConn } + +func (w withGRPCConn) Apply(o *internal.DialSettings) { + o.GRPCConn = w.conn +} + +// WithGRPCDialOption returns a ClientOption that appends a new grpc.DialOption +// to an underlying gRPC dial. It does not work with WithGRPCConn. +func WithGRPCDialOption(opt grpc.DialOption) ClientOption { + return withGRPCDialOption{opt} +} + +type withGRPCDialOption struct{ opt grpc.DialOption } + +func (w withGRPCDialOption) Apply(o *internal.DialSettings) { + o.GRPCDialOpts = append(o.GRPCDialOpts, w.opt) +} + +// WithGRPCConnectionPool returns a ClientOption that creates a pool of gRPC +// connections that requests will be balanced between. +// This is an EXPERIMENTAL API and may be changed or removed in the future. +func WithGRPCConnectionPool(size int) ClientOption { + return withGRPCConnectionPool(size) +} + +type withGRPCConnectionPool int + +func (w withGRPCConnectionPool) Apply(o *internal.DialSettings) { + balancer := grpc.RoundRobin(internal.NewPoolResolver(int(w), o)) + o.GRPCDialOpts = append(o.GRPCDialOpts, grpc.WithBalancer(balancer)) +} + +// WithAPIKey returns a ClientOption that specifies an API key to be used +// as the basis for authentication. +// +// API Keys can only be used for JSON-over-HTTP APIs, including those under +// the import path google.golang.org/api/.... +func WithAPIKey(apiKey string) ClientOption { + return withAPIKey(apiKey) +} + +type withAPIKey string + +func (w withAPIKey) Apply(o *internal.DialSettings) { o.APIKey = string(w) } + +// WithoutAuthentication returns a ClientOption that specifies that no +// authentication should be used. It is suitable only for testing and for +// accessing public resources, like public Google Cloud Storage buckets. +// It is an error to provide both WithoutAuthentication and any of WithAPIKey, +// WithTokenSource, WithCredentialsFile or WithServiceAccountFile. +func WithoutAuthentication() ClientOption { + return withoutAuthentication{} +} + +type withoutAuthentication struct{} + +func (w withoutAuthentication) Apply(o *internal.DialSettings) { o.NoAuth = true } diff --git a/vendor/google.golang.org/api/support/bundler/bundler.go b/vendor/google.golang.org/api/support/bundler/bundler.go new file mode 100644 index 000000000..e80f09cef --- /dev/null +++ b/vendor/google.golang.org/api/support/bundler/bundler.go @@ -0,0 +1,349 @@ +// Copyright 2016 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package bundler supports bundling (batching) of items. Bundling amortizes an +// action with fixed costs over multiple items. For example, if an API provides +// an RPC that accepts a list of items as input, but clients would prefer +// adding items one at a time, then a Bundler can accept individual items from +// the client and bundle many of them into a single RPC. +// +// This package is experimental and subject to change without notice. +package bundler + +import ( + "errors" + "math" + "reflect" + "sync" + "time" + + "golang.org/x/net/context" + "golang.org/x/sync/semaphore" +) + +const ( + DefaultDelayThreshold = time.Second + DefaultBundleCountThreshold = 10 + DefaultBundleByteThreshold = 1e6 // 1M + DefaultBufferedByteLimit = 1e9 // 1G +) + +var ( + // ErrOverflow indicates that Bundler's stored bytes exceeds its BufferedByteLimit. + ErrOverflow = errors.New("bundler reached buffered byte limit") + + // ErrOversizedItem indicates that an item's size exceeds the maximum bundle size. + ErrOversizedItem = errors.New("item size exceeds bundle byte limit") +) + +// A Bundler collects items added to it into a bundle until the bundle +// exceeds a given size, then calls a user-provided function to handle the bundle. +type Bundler struct { + // Starting from the time that the first message is added to a bundle, once + // this delay has passed, handle the bundle. The default is DefaultDelayThreshold. + DelayThreshold time.Duration + + // Once a bundle has this many items, handle the bundle. Since only one + // item at a time is added to a bundle, no bundle will exceed this + // threshold, so it also serves as a limit. The default is + // DefaultBundleCountThreshold. + BundleCountThreshold int + + // Once the number of bytes in current bundle reaches this threshold, handle + // the bundle. The default is DefaultBundleByteThreshold. This triggers handling, + // but does not cap the total size of a bundle. + BundleByteThreshold int + + // The maximum size of a bundle, in bytes. Zero means unlimited. + BundleByteLimit int + + // The maximum number of bytes that the Bundler will keep in memory before + // returning ErrOverflow. The default is DefaultBufferedByteLimit. + BufferedByteLimit int + + // The maximum number of handler invocations that can be running at once. + // The default is 1. + HandlerLimit int + + handler func(interface{}) // called to handle a bundle + itemSliceZero reflect.Value // nil (zero value) for slice of items + flushTimer *time.Timer // implements DelayThreshold + + mu sync.Mutex + sem *semaphore.Weighted // enforces BufferedByteLimit + semOnce sync.Once + curBundle bundle // incoming items added to this bundle + + // Each bundle is assigned a unique ticket that determines the order in which the + // handler is called. The ticket is assigned with mu locked, but waiting for tickets + // to be handled is done via mu2 and cond, below. + nextTicket uint64 // next ticket to be assigned + + mu2 sync.Mutex + cond *sync.Cond + nextHandled uint64 // next ticket to be handled + + // In this implementation, active uses space proportional to HandlerLimit, and + // waitUntilAllHandled takes time proportional to HandlerLimit each time an acquire + // or release occurs, so large values of HandlerLimit max may cause performance + // issues. + active map[uint64]bool // tickets of bundles actively being handled +} + +type bundle struct { + items reflect.Value // slice of item type + size int // size in bytes of all items +} + +// NewBundler creates a new Bundler. +// +// itemExample is a value of the type that will be bundled. For example, if you +// want to create bundles of *Entry, you could pass &Entry{} for itemExample. +// +// handler is a function that will be called on each bundle. If itemExample is +// of type T, the argument to handler is of type []T. handler is always called +// sequentially for each bundle, and never in parallel. +// +// Configure the Bundler by setting its thresholds and limits before calling +// any of its methods. +func NewBundler(itemExample interface{}, handler func(interface{})) *Bundler { + b := &Bundler{ + DelayThreshold: DefaultDelayThreshold, + BundleCountThreshold: DefaultBundleCountThreshold, + BundleByteThreshold: DefaultBundleByteThreshold, + BufferedByteLimit: DefaultBufferedByteLimit, + HandlerLimit: 1, + + handler: handler, + itemSliceZero: reflect.Zero(reflect.SliceOf(reflect.TypeOf(itemExample))), + active: map[uint64]bool{}, + } + b.curBundle.items = b.itemSliceZero + b.cond = sync.NewCond(&b.mu2) + return b +} + +func (b *Bundler) initSemaphores() { + // Create the semaphores lazily, because the user may set limits + // after NewBundler. + b.semOnce.Do(func() { + b.sem = semaphore.NewWeighted(int64(b.BufferedByteLimit)) + }) +} + +// Add adds item to the current bundle. It marks the bundle for handling and +// starts a new one if any of the thresholds or limits are exceeded. +// +// If the item's size exceeds the maximum bundle size (Bundler.BundleByteLimit), then +// the item can never be handled. Add returns ErrOversizedItem in this case. +// +// If adding the item would exceed the maximum memory allowed +// (Bundler.BufferedByteLimit) or an AddWait call is blocked waiting for +// memory, Add returns ErrOverflow. +// +// Add never blocks. +func (b *Bundler) Add(item interface{}, size int) error { + // If this item exceeds the maximum size of a bundle, + // we can never send it. + if b.BundleByteLimit > 0 && size > b.BundleByteLimit { + return ErrOversizedItem + } + // If adding this item would exceed our allotted memory + // footprint, we can't accept it. + // (TryAcquire also returns false if anything is waiting on the semaphore, + // so calls to Add and AddWait shouldn't be mixed.) + b.initSemaphores() + if !b.sem.TryAcquire(int64(size)) { + return ErrOverflow + } + b.add(item, size) + return nil +} + +// add adds item to the current bundle. It marks the bundle for handling and +// starts a new one if any of the thresholds or limits are exceeded. +func (b *Bundler) add(item interface{}, size int) { + b.mu.Lock() + defer b.mu.Unlock() + + // If adding this item to the current bundle would cause it to exceed the + // maximum bundle size, close the current bundle and start a new one. + if b.BundleByteLimit > 0 && b.curBundle.size+size > b.BundleByteLimit { + b.startFlushLocked() + } + // Add the item. + b.curBundle.items = reflect.Append(b.curBundle.items, reflect.ValueOf(item)) + b.curBundle.size += size + + // Start a timer to flush the item if one isn't already running. + // startFlushLocked clears the timer and closes the bundle at the same time, + // so we only allocate a new timer for the first item in each bundle. + // (We could try to call Reset on the timer instead, but that would add a lot + // of complexity to the code just to save one small allocation.) + if b.flushTimer == nil { + b.flushTimer = time.AfterFunc(b.DelayThreshold, b.Flush) + } + + // If the current bundle equals the count threshold, close it. + if b.curBundle.items.Len() == b.BundleCountThreshold { + b.startFlushLocked() + } + // If the current bundle equals or exceeds the byte threshold, close it. + if b.curBundle.size >= b.BundleByteThreshold { + b.startFlushLocked() + } +} + +// AddWait adds item to the current bundle. It marks the bundle for handling and +// starts a new one if any of the thresholds or limits are exceeded. +// +// If the item's size exceeds the maximum bundle size (Bundler.BundleByteLimit), then +// the item can never be handled. AddWait returns ErrOversizedItem in this case. +// +// If adding the item would exceed the maximum memory allowed (Bundler.BufferedByteLimit), +// AddWait blocks until space is available or ctx is done. +// +// Calls to Add and AddWait should not be mixed on the same Bundler. +func (b *Bundler) AddWait(ctx context.Context, item interface{}, size int) error { + // If this item exceeds the maximum size of a bundle, + // we can never send it. + if b.BundleByteLimit > 0 && size > b.BundleByteLimit { + return ErrOversizedItem + } + // If adding this item would exceed our allotted memory footprint, block + // until space is available. The semaphore is FIFO, so there will be no + // starvation. + b.initSemaphores() + if err := b.sem.Acquire(ctx, int64(size)); err != nil { + return err + } + // Here, we've reserved space for item. Other goroutines can call AddWait + // and even acquire space, but no one can take away our reservation + // (assuming sem.Release is used correctly). So there is no race condition + // resulting from locking the mutex after sem.Acquire returns. + b.add(item, size) + return nil +} + +// Flush invokes the handler for all remaining items in the Bundler and waits +// for it to return. +func (b *Bundler) Flush() { + b.mu.Lock() + b.startFlushLocked() + // Here, all bundles with tickets < b.nextTicket are + // either finished or active. Those are the ones + // we want to wait for. + t := b.nextTicket + b.mu.Unlock() + b.initSemaphores() + b.waitUntilAllHandled(t) +} + +func (b *Bundler) startFlushLocked() { + if b.flushTimer != nil { + b.flushTimer.Stop() + b.flushTimer = nil + } + if b.curBundle.items.Len() == 0 { + return + } + // Here, both semaphores must have been initialized. + bun := b.curBundle + b.curBundle = bundle{items: b.itemSliceZero} + ticket := b.nextTicket + b.nextTicket++ + go func() { + defer func() { + b.sem.Release(int64(bun.size)) + b.release(ticket) + }() + b.acquire(ticket) + b.handler(bun.items.Interface()) + }() +} + +// acquire blocks until ticket is the next to be served, then returns. In order for N +// acquire calls to return, the tickets must be in the range [0, N). A ticket must +// not be presented to acquire more than once. +func (b *Bundler) acquire(ticket uint64) { + b.mu2.Lock() + defer b.mu2.Unlock() + if ticket < b.nextHandled { + panic("bundler: acquire: arg too small") + } + for !(ticket == b.nextHandled && len(b.active) < b.HandlerLimit) { + b.cond.Wait() + } + // Here, + // ticket == b.nextHandled: the caller is the next one to be handled; + // and len(b.active) < b.HandlerLimit: there is space available. + b.active[ticket] = true + b.nextHandled++ + // Broadcast, not Signal: although at most one acquire waiter can make progress, + // there might be waiters in waitUntilAllHandled. + b.cond.Broadcast() +} + +// If a ticket is used for a call to acquire, it must later be passed to release. A +// ticket must not be presented to release more than once. +func (b *Bundler) release(ticket uint64) { + b.mu2.Lock() + defer b.mu2.Unlock() + if !b.active[ticket] { + panic("bundler: release: not an active ticket") + } + delete(b.active, ticket) + b.cond.Broadcast() +} + +// waitUntilAllHandled blocks until all tickets < n have called release, meaning +// all bundles with tickets < n have been handled. +func (b *Bundler) waitUntilAllHandled(n uint64) { + // Proof of correctness of this function. + // "N is acquired" means acquire(N) has returned. + // "N is released" means release(N) has returned. + // 1. If N is acquired, N-1 is acquired. + // Follows from the loop test in acquire, and the fact + // that nextHandled is incremented by 1. + // 2. If nextHandled >= N, then N-1 is acquired. + // Because we only increment nextHandled to N after N-1 is acquired. + // 3. If nextHandled >= N, then all n < N is acquired. + // Follows from #1 and #2. + // 4. If N is acquired and N is not in active, then N is released. + // Because we put N in active before acquire returns, and only + // remove it when it is released. + // Let min(active) be the smallest member of active, or infinity if active is empty. + // 5. If nextHandled >= N and N <= min(active), then all n < N is released. + // From nextHandled >= N and #3, all n < N is acquired. + // N <= min(active) implies n < min(active) for all n < N. So all n < N is not in active. + // So from #4, all n < N is released. + // The loop test below is the antecedent of #5. + b.mu2.Lock() + defer b.mu2.Unlock() + for !(b.nextHandled >= n && n <= min(b.active)) { + b.cond.Wait() + } +} + +// min returns the minimum value of the set s, or the largest uint64 if +// s is empty. +func min(s map[uint64]bool) uint64 { + var m uint64 = math.MaxUint64 + for n := range s { + if n < m { + m = n + } + } + return m +} diff --git a/vendor/google.golang.org/api/transport/dial.go b/vendor/google.golang.org/api/transport/dial.go new file mode 100644 index 000000000..c45ff5b2c --- /dev/null +++ b/vendor/google.golang.org/api/transport/dial.go @@ -0,0 +1,49 @@ +// Copyright 2015 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package transport supports network connections to HTTP and GRPC servers. +// This package is not intended for use by end developers. Use the +// google.golang.org/api/option package to configure API clients. +package transport + +import ( + "net/http" + + "golang.org/x/net/context" + "google.golang.org/grpc" + + "google.golang.org/api/option" + gtransport "google.golang.org/api/transport/grpc" + htransport "google.golang.org/api/transport/http" +) + +// NewHTTPClient returns an HTTP client for use communicating with a Google cloud +// service, configured with the given ClientOptions. It also returns the endpoint +// for the service as specified in the options. +func NewHTTPClient(ctx context.Context, opts ...option.ClientOption) (*http.Client, string, error) { + return htransport.NewClient(ctx, opts...) +} + +// DialGRPC returns a GRPC connection for use communicating with a Google cloud +// service, configured with the given ClientOptions. +func DialGRPC(ctx context.Context, opts ...option.ClientOption) (*grpc.ClientConn, error) { + return gtransport.Dial(ctx, opts...) +} + +// DialGRPCInsecure returns an insecure GRPC connection for use communicating +// with fake or mock Google cloud service implementations, such as emulators. +// The connection is configured with the given ClientOptions. +func DialGRPCInsecure(ctx context.Context, opts ...option.ClientOption) (*grpc.ClientConn, error) { + return gtransport.DialInsecure(ctx, opts...) +} diff --git a/vendor/google.golang.org/api/transport/go19.go b/vendor/google.golang.org/api/transport/go19.go new file mode 100644 index 000000000..40e3389d9 --- /dev/null +++ b/vendor/google.golang.org/api/transport/go19.go @@ -0,0 +1,34 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.9 + +package transport + +import ( + "golang.org/x/net/context" + "golang.org/x/oauth2/google" + "google.golang.org/api/internal" + "google.golang.org/api/option" +) + +// Creds constructs a google.Credentials from the information in the options, +// or obtains the default credentials in the same way as google.FindDefaultCredentials. +func Creds(ctx context.Context, opts ...option.ClientOption) (*google.Credentials, error) { + var ds internal.DialSettings + for _, opt := range opts { + opt.Apply(&ds) + } + return internal.Creds(ctx, &ds) +} diff --git a/vendor/google.golang.org/api/transport/grpc/dial.go b/vendor/google.golang.org/api/transport/grpc/dial.go new file mode 100644 index 000000000..c79b835b6 --- /dev/null +++ b/vendor/google.golang.org/api/transport/grpc/dial.go @@ -0,0 +1,91 @@ +// Copyright 2015 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package transport/grpc supports network connections to GRPC servers. +// This package is not intended for use by end developers. Use the +// google.golang.org/api/option package to configure API clients. +package grpc + +import ( + "errors" + "log" + + "golang.org/x/net/context" + "google.golang.org/api/internal" + "google.golang.org/api/option" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/oauth" +) + +// Set at init time by dial_appengine.go. If nil, we're not on App Engine. +var appengineDialerHook func(context.Context) grpc.DialOption + +// Dial returns a GRPC connection for use communicating with a Google cloud +// service, configured with the given ClientOptions. +func Dial(ctx context.Context, opts ...option.ClientOption) (*grpc.ClientConn, error) { + return dial(ctx, false, opts) +} + +// DialInsecure returns an insecure GRPC connection for use communicating +// with fake or mock Google cloud service implementations, such as emulators. +// The connection is configured with the given ClientOptions. +func DialInsecure(ctx context.Context, opts ...option.ClientOption) (*grpc.ClientConn, error) { + return dial(ctx, true, opts) +} + +func dial(ctx context.Context, insecure bool, opts []option.ClientOption) (*grpc.ClientConn, error) { + var o internal.DialSettings + for _, opt := range opts { + opt.Apply(&o) + } + if err := o.Validate(); err != nil { + return nil, err + } + if o.HTTPClient != nil { + return nil, errors.New("unsupported HTTP client specified") + } + if o.GRPCConn != nil { + return o.GRPCConn, nil + } + var grpcOpts []grpc.DialOption + if insecure { + grpcOpts = []grpc.DialOption{grpc.WithInsecure()} + } else if !o.NoAuth { + if o.APIKey != "" { + log.Print("API keys are not supported for gRPC APIs. Remove the WithAPIKey option from your client-creating call.") + } + creds, err := internal.Creds(ctx, &o) + if err != nil { + return nil, err + } + grpcOpts = []grpc.DialOption{ + grpc.WithPerRPCCredentials(oauth.TokenSource{creds.TokenSource}), + grpc.WithTransportCredentials(credentials.NewClientTLSFromCert(nil, "")), + } + } + if appengineDialerHook != nil { + // Use the Socket API on App Engine. + grpcOpts = append(grpcOpts, appengineDialerHook(ctx)) + } + // Add tracing, but before the other options, so that clients can override the + // gRPC stats handler. + // This assumes that gRPC options are processed in order, left to right. + grpcOpts = addOCStatsHandler(grpcOpts) + grpcOpts = append(grpcOpts, o.GRPCDialOpts...) + if o.UserAgent != "" { + grpcOpts = append(grpcOpts, grpc.WithUserAgent(o.UserAgent)) + } + return grpc.DialContext(ctx, o.Endpoint, grpcOpts...) +} diff --git a/vendor/google.golang.org/api/transport/grpc/dial_appengine.go b/vendor/google.golang.org/api/transport/grpc/dial_appengine.go new file mode 100644 index 000000000..b57e7dcbb --- /dev/null +++ b/vendor/google.golang.org/api/transport/grpc/dial_appengine.go @@ -0,0 +1,41 @@ +// Copyright 2016 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build appengine + +package grpc + +import ( + "net" + "time" + + "golang.org/x/net/context" + "google.golang.org/appengine" + "google.golang.org/appengine/socket" + "google.golang.org/grpc" +) + +func init() { + // NOTE: dev_appserver doesn't currently support SSL. + // When it does, this code can be removed. + if appengine.IsDevAppServer() { + return + } + + appengineDialerHook = func(ctx context.Context) grpc.DialOption { + return grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) { + return socket.DialTimeout(ctx, "tcp", addr, timeout) + }) + } +} diff --git a/vendor/google.golang.org/api/transport/grpc/go18.go b/vendor/google.golang.org/api/transport/grpc/go18.go new file mode 100644 index 000000000..4b29f8ea4 --- /dev/null +++ b/vendor/google.golang.org/api/transport/grpc/go18.go @@ -0,0 +1,26 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.8 + +package grpc + +import ( + "go.opencensus.io/plugin/ocgrpc" + "google.golang.org/grpc" +) + +func addOCStatsHandler(opts []grpc.DialOption) []grpc.DialOption { + return append(opts, grpc.WithStatsHandler(&ocgrpc.ClientHandler{})) +} diff --git a/vendor/google.golang.org/api/transport/grpc/not_go18.go b/vendor/google.golang.org/api/transport/grpc/not_go18.go new file mode 100644 index 000000000..01aba678e --- /dev/null +++ b/vendor/google.golang.org/api/transport/grpc/not_go18.go @@ -0,0 +1,21 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !go1.8 + +package grpc + +import "google.golang.org/grpc" + +func addOCStatsHandler(opts []grpc.DialOption) []grpc.DialOption { return opts } diff --git a/vendor/google.golang.org/api/transport/http/dial.go b/vendor/google.golang.org/api/transport/http/dial.go new file mode 100644 index 000000000..1ecbd9761 --- /dev/null +++ b/vendor/google.golang.org/api/transport/http/dial.go @@ -0,0 +1,138 @@ +// Copyright 2015 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package transport/http supports network connections to HTTP servers. +// This package is not intended for use by end developers. Use the +// google.golang.org/api/option package to configure API clients. +package http + +import ( + "errors" + "net/http" + + "golang.org/x/net/context" + "golang.org/x/oauth2" + "google.golang.org/api/googleapi/transport" + "google.golang.org/api/internal" + "google.golang.org/api/option" +) + +// NewClient returns an HTTP client for use communicating with a Google cloud +// service, configured with the given ClientOptions. It also returns the endpoint +// for the service as specified in the options. +func NewClient(ctx context.Context, opts ...option.ClientOption) (*http.Client, string, error) { + settings, err := newSettings(opts) + if err != nil { + return nil, "", err + } + // TODO(cbro): consider injecting the User-Agent even if an explicit HTTP client is provided? + if settings.HTTPClient != nil { + return settings.HTTPClient, settings.Endpoint, nil + } + trans, err := newTransport(ctx, defaultBaseTransport(ctx), settings) + if err != nil { + return nil, "", err + } + return &http.Client{Transport: trans}, settings.Endpoint, nil +} + +// NewTransport creates an http.RoundTripper for use communicating with a Google +// cloud service, configured with the given ClientOptions. Its RoundTrip method delegates to base. +func NewTransport(ctx context.Context, base http.RoundTripper, opts ...option.ClientOption) (http.RoundTripper, error) { + settings, err := newSettings(opts) + if err != nil { + return nil, err + } + if settings.HTTPClient != nil { + return nil, errors.New("transport/http: WithHTTPClient passed to NewTransport") + } + return newTransport(ctx, base, settings) +} + +func newTransport(ctx context.Context, base http.RoundTripper, settings *internal.DialSettings) (http.RoundTripper, error) { + trans := base + trans = userAgentTransport{ + base: trans, + userAgent: settings.UserAgent, + } + trans = addOCTransport(trans) + switch { + case settings.NoAuth: + // Do nothing. + case settings.APIKey != "": + trans = &transport.APIKey{ + Transport: trans, + Key: settings.APIKey, + } + default: + creds, err := internal.Creds(ctx, settings) + if err != nil { + return nil, err + } + trans = &oauth2.Transport{ + Base: trans, + Source: creds.TokenSource, + } + } + return trans, nil +} + +func newSettings(opts []option.ClientOption) (*internal.DialSettings, error) { + var o internal.DialSettings + for _, opt := range opts { + opt.Apply(&o) + } + if err := o.Validate(); err != nil { + return nil, err + } + if o.GRPCConn != nil { + return nil, errors.New("unsupported gRPC connection specified") + } + return &o, nil +} + +type userAgentTransport struct { + userAgent string + base http.RoundTripper +} + +func (t userAgentTransport) RoundTrip(req *http.Request) (*http.Response, error) { + rt := t.base + if rt == nil { + return nil, errors.New("transport: no Transport specified") + } + if t.userAgent == "" { + return rt.RoundTrip(req) + } + newReq := *req + newReq.Header = make(http.Header) + for k, vv := range req.Header { + newReq.Header[k] = vv + } + // TODO(cbro): append to existing User-Agent header? + newReq.Header["User-Agent"] = []string{t.userAgent} + return rt.RoundTrip(&newReq) +} + +// Set at init time by dial_appengine.go. If nil, we're not on App Engine. +var appengineUrlfetchHook func(context.Context) http.RoundTripper + +// defaultBaseTransport returns the base HTTP transport. +// On App Engine, this is urlfetch.Transport, otherwise it's http.DefaultTransport. +func defaultBaseTransport(ctx context.Context) http.RoundTripper { + if appengineUrlfetchHook != nil { + return appengineUrlfetchHook(ctx) + } + return http.DefaultTransport +} diff --git a/vendor/google.golang.org/api/transport/http/dial_appengine.go b/vendor/google.golang.org/api/transport/http/dial_appengine.go new file mode 100644 index 000000000..313e0bd5b --- /dev/null +++ b/vendor/google.golang.org/api/transport/http/dial_appengine.go @@ -0,0 +1,30 @@ +// Copyright 2016 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build appengine + +package http + +import ( + "net/http" + + "golang.org/x/net/context" + "google.golang.org/appengine/urlfetch" +) + +func init() { + appengineUrlfetchHook = func(ctx context.Context) http.RoundTripper { + return &urlfetch.Transport{Context: ctx} + } +} diff --git a/vendor/google.golang.org/api/transport/http/go18.go b/vendor/google.golang.org/api/transport/http/go18.go new file mode 100644 index 000000000..14ef6e9e7 --- /dev/null +++ b/vendor/google.golang.org/api/transport/http/go18.go @@ -0,0 +1,31 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.8 + +package http + +import ( + "net/http" + + "go.opencensus.io/plugin/ochttp" + "google.golang.org/api/transport/http/internal/propagation" +) + +func addOCTransport(trans http.RoundTripper) http.RoundTripper { + return &ochttp.Transport{ + Base: trans, + Propagation: &propagation.HTTPFormat{}, + } +} diff --git a/vendor/google.golang.org/api/transport/http/internal/propagation/http.go b/vendor/google.golang.org/api/transport/http/internal/propagation/http.go new file mode 100644 index 000000000..24b4f0d29 --- /dev/null +++ b/vendor/google.golang.org/api/transport/http/internal/propagation/http.go @@ -0,0 +1,96 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.8 + +// Package propagation implements X-Cloud-Trace-Context header propagation used +// by Google Cloud products. +package propagation + +import ( + "encoding/binary" + "encoding/hex" + "fmt" + "net/http" + "strconv" + "strings" + + "go.opencensus.io/trace" + "go.opencensus.io/trace/propagation" +) + +const ( + httpHeaderMaxSize = 200 + httpHeader = `X-Cloud-Trace-Context` +) + +var _ propagation.HTTPFormat = (*HTTPFormat)(nil) + +// HTTPFormat implements propagation.HTTPFormat to propagate +// traces in HTTP headers for Google Cloud Platform and Stackdriver Trace. +type HTTPFormat struct{} + +// SpanContextFromRequest extracts a Stackdriver Trace span context from incoming requests. +func (f *HTTPFormat) SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) { + h := req.Header.Get(httpHeader) + // See https://cloud.google.com/trace/docs/faq for the header HTTPFormat. + // Return if the header is empty or missing, or if the header is unreasonably + // large, to avoid making unnecessary copies of a large string. + if h == "" || len(h) > httpHeaderMaxSize { + return trace.SpanContext{}, false + } + + // Parse the trace id field. + slash := strings.Index(h, `/`) + if slash == -1 { + return trace.SpanContext{}, false + } + tid, h := h[:slash], h[slash+1:] + + buf, err := hex.DecodeString(tid) + if err != nil { + return trace.SpanContext{}, false + } + copy(sc.TraceID[:], buf) + + // Parse the span id field. + spanstr := h + semicolon := strings.Index(h, `;`) + if semicolon != -1 { + spanstr, h = h[:semicolon], h[semicolon+1:] + } + sid, err := strconv.ParseUint(spanstr, 10, 64) + if err != nil { + return trace.SpanContext{}, false + } + binary.BigEndian.PutUint64(sc.SpanID[:], sid) + + // Parse the options field, options field is optional. + if !strings.HasPrefix(h, "o=") { + return sc, true + } + o, err := strconv.ParseUint(h[2:], 10, 64) + if err != nil { + return trace.SpanContext{}, false + } + sc.TraceOptions = trace.TraceOptions(o) + return sc, true +} + +// SpanContextToRequest modifies the given request to include a Stackdriver Trace header. +func (f *HTTPFormat) SpanContextToRequest(sc trace.SpanContext, req *http.Request) { + sid := binary.BigEndian.Uint64(sc.SpanID[:]) + header := fmt.Sprintf("%s/%d;o=%d", hex.EncodeToString(sc.TraceID[:]), sid, int64(sc.TraceOptions)) + req.Header.Set(httpHeader, header) +} diff --git a/vendor/google.golang.org/api/transport/http/not_go18.go b/vendor/google.golang.org/api/transport/http/not_go18.go new file mode 100644 index 000000000..b8e1abe92 --- /dev/null +++ b/vendor/google.golang.org/api/transport/http/not_go18.go @@ -0,0 +1,21 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !go1.8 + +package http + +import "net/http" + +func addOCTransport(trans http.RoundTripper) http.RoundTripper { return trans } diff --git a/vendor/google.golang.org/api/transport/not_go19.go b/vendor/google.golang.org/api/transport/not_go19.go new file mode 100644 index 000000000..73de8aa7f --- /dev/null +++ b/vendor/google.golang.org/api/transport/not_go19.go @@ -0,0 +1,34 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !go1.9 + +package transport + +import ( + "golang.org/x/net/context" + "golang.org/x/oauth2/google" + "google.golang.org/api/internal" + "google.golang.org/api/option" +) + +// Creds constructs a google.DefaultCredentials from the information in the options, +// or obtains the default credentials in the same way as google.FindDefaultCredentials. +func Creds(ctx context.Context, opts ...option.ClientOption) (*google.DefaultCredentials, error) { + var ds internal.DialSettings + for _, opt := range opts { + opt.Apply(&ds) + } + return internal.Creds(ctx, &ds) +} diff --git a/vendor/google.golang.org/appengine/LICENSE b/vendor/google.golang.org/appengine/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/google.golang.org/appengine/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/google.golang.org/appengine/appengine.go b/vendor/google.golang.org/appengine/appengine.go new file mode 100644 index 000000000..76dedc81d --- /dev/null +++ b/vendor/google.golang.org/appengine/appengine.go @@ -0,0 +1,113 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// Package appengine provides basic functionality for Google App Engine. +// +// For more information on how to write Go apps for Google App Engine, see: +// https://cloud.google.com/appengine/docs/go/ +package appengine // import "google.golang.org/appengine" + +import ( + "net/http" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + + "google.golang.org/appengine/internal" +) + +// The gophers party all night; the rabbits provide the beats. + +// Main is the principal entry point for an app running in App Engine. +// +// On App Engine Flexible it installs a trivial health checker if one isn't +// already registered, and starts listening on port 8080 (overridden by the +// $PORT environment variable). +// +// See https://cloud.google.com/appengine/docs/flexible/custom-runtimes#health_check_requests +// for details on how to do your own health checking. +// +// On App Engine Standard it ensures the server has started and is prepared to +// receive requests. +// +// Main never returns. +// +// Main is designed so that the app's main package looks like this: +// +// package main +// +// import ( +// "google.golang.org/appengine" +// +// _ "myapp/package0" +// _ "myapp/package1" +// ) +// +// func main() { +// appengine.Main() +// } +// +// The "myapp/packageX" packages are expected to register HTTP handlers +// in their init functions. +func Main() { + internal.Main() +} + +// IsDevAppServer reports whether the App Engine app is running in the +// development App Server. +func IsDevAppServer() bool { + return internal.IsDevAppServer() +} + +// NewContext returns a context for an in-flight HTTP request. +// This function is cheap. +func NewContext(req *http.Request) context.Context { + return internal.ReqContext(req) +} + +// WithContext returns a copy of the parent context +// and associates it with an in-flight HTTP request. +// This function is cheap. +func WithContext(parent context.Context, req *http.Request) context.Context { + return internal.WithContext(parent, req) +} + +// TODO(dsymonds): Add a Call function here? Otherwise other packages can't access internal.Call. + +// BlobKey is a key for a blobstore blob. +// +// Conceptually, this type belongs in the blobstore package, but it lives in +// the appengine package to avoid a circular dependency: blobstore depends on +// datastore, and datastore needs to refer to the BlobKey type. +type BlobKey string + +// GeoPoint represents a location as latitude/longitude in degrees. +type GeoPoint struct { + Lat, Lng float64 +} + +// Valid returns whether a GeoPoint is within [-90, 90] latitude and [-180, 180] longitude. +func (g GeoPoint) Valid() bool { + return -90 <= g.Lat && g.Lat <= 90 && -180 <= g.Lng && g.Lng <= 180 +} + +// APICallFunc defines a function type for handling an API call. +// See WithCallOverride. +type APICallFunc func(ctx context.Context, service, method string, in, out proto.Message) error + +// WithAPICallFunc returns a copy of the parent context +// that will cause API calls to invoke f instead of their normal operation. +// +// This is intended for advanced users only. +func WithAPICallFunc(ctx context.Context, f APICallFunc) context.Context { + return internal.WithCallOverride(ctx, internal.CallOverrideFunc(f)) +} + +// APICall performs an API call. +// +// This is not intended for general use; it is exported for use in conjunction +// with WithAPICallFunc. +func APICall(ctx context.Context, service, method string, in, out proto.Message) error { + return internal.Call(ctx, service, method, in, out) +} diff --git a/vendor/google.golang.org/appengine/appengine_vm.go b/vendor/google.golang.org/appengine/appengine_vm.go new file mode 100644 index 000000000..f4b645aad --- /dev/null +++ b/vendor/google.golang.org/appengine/appengine_vm.go @@ -0,0 +1,20 @@ +// Copyright 2015 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build !appengine + +package appengine + +import ( + "golang.org/x/net/context" + + "google.golang.org/appengine/internal" +) + +// BackgroundContext returns a context not associated with a request. +// This should only be used when not servicing a request. +// This only works in App Engine "flexible environment". +func BackgroundContext() context.Context { + return internal.BackgroundContext() +} diff --git a/vendor/google.golang.org/appengine/errors.go b/vendor/google.golang.org/appengine/errors.go new file mode 100644 index 000000000..16d0772e2 --- /dev/null +++ b/vendor/google.golang.org/appengine/errors.go @@ -0,0 +1,46 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// This file provides error functions for common API failure modes. + +package appengine + +import ( + "fmt" + + "google.golang.org/appengine/internal" +) + +// IsOverQuota reports whether err represents an API call failure +// due to insufficient available quota. +func IsOverQuota(err error) bool { + callErr, ok := err.(*internal.CallError) + return ok && callErr.Code == 4 +} + +// MultiError is returned by batch operations when there are errors with +// particular elements. Errors will be in a one-to-one correspondence with +// the input elements; successful elements will have a nil entry. +type MultiError []error + +func (m MultiError) Error() string { + s, n := "", 0 + for _, e := range m { + if e != nil { + if n == 0 { + s = e.Error() + } + n++ + } + } + switch n { + case 0: + return "(0 errors)" + case 1: + return s + case 2: + return s + " (and 1 other error)" + } + return fmt.Sprintf("%s (and %d other errors)", s, n-1) +} diff --git a/vendor/google.golang.org/appengine/identity.go b/vendor/google.golang.org/appengine/identity.go new file mode 100644 index 000000000..b8dcf8f36 --- /dev/null +++ b/vendor/google.golang.org/appengine/identity.go @@ -0,0 +1,142 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package appengine + +import ( + "time" + + "golang.org/x/net/context" + + "google.golang.org/appengine/internal" + pb "google.golang.org/appengine/internal/app_identity" + modpb "google.golang.org/appengine/internal/modules" +) + +// AppID returns the application ID for the current application. +// The string will be a plain application ID (e.g. "appid"), with a +// domain prefix for custom domain deployments (e.g. "example.com:appid"). +func AppID(c context.Context) string { return internal.AppID(c) } + +// DefaultVersionHostname returns the standard hostname of the default version +// of the current application (e.g. "my-app.appspot.com"). This is suitable for +// use in constructing URLs. +func DefaultVersionHostname(c context.Context) string { + return internal.DefaultVersionHostname(c) +} + +// ModuleName returns the module name of the current instance. +func ModuleName(c context.Context) string { + return internal.ModuleName(c) +} + +// ModuleHostname returns a hostname of a module instance. +// If module is the empty string, it refers to the module of the current instance. +// If version is empty, it refers to the version of the current instance if valid, +// or the default version of the module of the current instance. +// If instance is empty, ModuleHostname returns the load-balancing hostname. +func ModuleHostname(c context.Context, module, version, instance string) (string, error) { + req := &modpb.GetHostnameRequest{} + if module != "" { + req.Module = &module + } + if version != "" { + req.Version = &version + } + if instance != "" { + req.Instance = &instance + } + res := &modpb.GetHostnameResponse{} + if err := internal.Call(c, "modules", "GetHostname", req, res); err != nil { + return "", err + } + return *res.Hostname, nil +} + +// VersionID returns the version ID for the current application. +// It will be of the form "X.Y", where X is specified in app.yaml, +// and Y is a number generated when each version of the app is uploaded. +// It does not include a module name. +func VersionID(c context.Context) string { return internal.VersionID(c) } + +// InstanceID returns a mostly-unique identifier for this instance. +func InstanceID() string { return internal.InstanceID() } + +// Datacenter returns an identifier for the datacenter that the instance is running in. +func Datacenter(c context.Context) string { return internal.Datacenter(c) } + +// ServerSoftware returns the App Engine release version. +// In production, it looks like "Google App Engine/X.Y.Z". +// In the development appserver, it looks like "Development/X.Y". +func ServerSoftware() string { return internal.ServerSoftware() } + +// RequestID returns a string that uniquely identifies the request. +func RequestID(c context.Context) string { return internal.RequestID(c) } + +// AccessToken generates an OAuth2 access token for the specified scopes on +// behalf of service account of this application. This token will expire after +// the returned time. +func AccessToken(c context.Context, scopes ...string) (token string, expiry time.Time, err error) { + req := &pb.GetAccessTokenRequest{Scope: scopes} + res := &pb.GetAccessTokenResponse{} + + err = internal.Call(c, "app_identity_service", "GetAccessToken", req, res) + if err != nil { + return "", time.Time{}, err + } + return res.GetAccessToken(), time.Unix(res.GetExpirationTime(), 0), nil +} + +// Certificate represents a public certificate for the app. +type Certificate struct { + KeyName string + Data []byte // PEM-encoded X.509 certificate +} + +// PublicCertificates retrieves the public certificates for the app. +// They can be used to verify a signature returned by SignBytes. +func PublicCertificates(c context.Context) ([]Certificate, error) { + req := &pb.GetPublicCertificateForAppRequest{} + res := &pb.GetPublicCertificateForAppResponse{} + if err := internal.Call(c, "app_identity_service", "GetPublicCertificatesForApp", req, res); err != nil { + return nil, err + } + var cs []Certificate + for _, pc := range res.PublicCertificateList { + cs = append(cs, Certificate{ + KeyName: pc.GetKeyName(), + Data: []byte(pc.GetX509CertificatePem()), + }) + } + return cs, nil +} + +// ServiceAccount returns a string representing the service account name, in +// the form of an email address (typically app_id@appspot.gserviceaccount.com). +func ServiceAccount(c context.Context) (string, error) { + req := &pb.GetServiceAccountNameRequest{} + res := &pb.GetServiceAccountNameResponse{} + + err := internal.Call(c, "app_identity_service", "GetServiceAccountName", req, res) + if err != nil { + return "", err + } + return res.GetServiceAccountName(), err +} + +// SignBytes signs bytes using a private key unique to your application. +func SignBytes(c context.Context, bytes []byte) (keyName string, signature []byte, err error) { + req := &pb.SignForAppRequest{BytesToSign: bytes} + res := &pb.SignForAppResponse{} + + if err := internal.Call(c, "app_identity_service", "SignForApp", req, res); err != nil { + return "", nil, err + } + return res.GetKeyName(), res.GetSignatureBytes(), nil +} + +func init() { + internal.RegisterErrorCodeMap("app_identity_service", pb.AppIdentityServiceError_ErrorCode_name) + internal.RegisterErrorCodeMap("modules", modpb.ModulesServiceError_ErrorCode_name) +} diff --git a/vendor/google.golang.org/appengine/internal/api.go b/vendor/google.golang.org/appengine/internal/api.go new file mode 100644 index 000000000..16f87c5d3 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/api.go @@ -0,0 +1,660 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build !appengine +// +build go1.7 + +package internal + +import ( + "bytes" + "errors" + "fmt" + "io/ioutil" + "log" + "net" + "net/http" + "net/url" + "os" + "runtime" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/golang/protobuf/proto" + netcontext "golang.org/x/net/context" + + basepb "google.golang.org/appengine/internal/base" + logpb "google.golang.org/appengine/internal/log" + remotepb "google.golang.org/appengine/internal/remote_api" +) + +const ( + apiPath = "/rpc_http" + defaultTicketSuffix = "/default.20150612t184001.0" +) + +var ( + // Incoming headers. + ticketHeader = http.CanonicalHeaderKey("X-AppEngine-API-Ticket") + dapperHeader = http.CanonicalHeaderKey("X-Google-DapperTraceInfo") + traceHeader = http.CanonicalHeaderKey("X-Cloud-Trace-Context") + curNamespaceHeader = http.CanonicalHeaderKey("X-AppEngine-Current-Namespace") + userIPHeader = http.CanonicalHeaderKey("X-AppEngine-User-IP") + remoteAddrHeader = http.CanonicalHeaderKey("X-AppEngine-Remote-Addr") + + // Outgoing headers. + apiEndpointHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Endpoint") + apiEndpointHeaderValue = []string{"app-engine-apis"} + apiMethodHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Method") + apiMethodHeaderValue = []string{"/VMRemoteAPI.CallRemoteAPI"} + apiDeadlineHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Deadline") + apiContentType = http.CanonicalHeaderKey("Content-Type") + apiContentTypeValue = []string{"application/octet-stream"} + logFlushHeader = http.CanonicalHeaderKey("X-AppEngine-Log-Flush-Count") + + apiHTTPClient = &http.Client{ + Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: limitDial, + }, + } + + defaultTicketOnce sync.Once + defaultTicket string + backgroundContextOnce sync.Once + backgroundContext netcontext.Context +) + +func apiURL() *url.URL { + host, port := "appengine.googleapis.internal", "10001" + if h := os.Getenv("API_HOST"); h != "" { + host = h + } + if p := os.Getenv("API_PORT"); p != "" { + port = p + } + return &url.URL{ + Scheme: "http", + Host: host + ":" + port, + Path: apiPath, + } +} + +func handleHTTP(w http.ResponseWriter, r *http.Request) { + c := &context{ + req: r, + outHeader: w.Header(), + apiURL: apiURL(), + } + r = r.WithContext(withContext(r.Context(), c)) + c.req = r + + stopFlushing := make(chan int) + + // Patch up RemoteAddr so it looks reasonable. + if addr := r.Header.Get(userIPHeader); addr != "" { + r.RemoteAddr = addr + } else if addr = r.Header.Get(remoteAddrHeader); addr != "" { + r.RemoteAddr = addr + } else { + // Should not normally reach here, but pick a sensible default anyway. + r.RemoteAddr = "127.0.0.1" + } + // The address in the headers will most likely be of these forms: + // 123.123.123.123 + // 2001:db8::1 + // net/http.Request.RemoteAddr is specified to be in "IP:port" form. + if _, _, err := net.SplitHostPort(r.RemoteAddr); err != nil { + // Assume the remote address is only a host; add a default port. + r.RemoteAddr = net.JoinHostPort(r.RemoteAddr, "80") + } + + // Start goroutine responsible for flushing app logs. + // This is done after adding c to ctx.m (and stopped before removing it) + // because flushing logs requires making an API call. + go c.logFlusher(stopFlushing) + + executeRequestSafely(c, r) + c.outHeader = nil // make sure header changes aren't respected any more + + stopFlushing <- 1 // any logging beyond this point will be dropped + + // Flush any pending logs asynchronously. + c.pendingLogs.Lock() + flushes := c.pendingLogs.flushes + if len(c.pendingLogs.lines) > 0 { + flushes++ + } + c.pendingLogs.Unlock() + go c.flushLog(false) + w.Header().Set(logFlushHeader, strconv.Itoa(flushes)) + + // Avoid nil Write call if c.Write is never called. + if c.outCode != 0 { + w.WriteHeader(c.outCode) + } + if c.outBody != nil { + w.Write(c.outBody) + } +} + +func executeRequestSafely(c *context, r *http.Request) { + defer func() { + if x := recover(); x != nil { + logf(c, 4, "%s", renderPanic(x)) // 4 == critical + c.outCode = 500 + } + }() + + http.DefaultServeMux.ServeHTTP(c, r) +} + +func renderPanic(x interface{}) string { + buf := make([]byte, 16<<10) // 16 KB should be plenty + buf = buf[:runtime.Stack(buf, false)] + + // Remove the first few stack frames: + // this func + // the recover closure in the caller + // That will root the stack trace at the site of the panic. + const ( + skipStart = "internal.renderPanic" + skipFrames = 2 + ) + start := bytes.Index(buf, []byte(skipStart)) + p := start + for i := 0; i < skipFrames*2 && p+1 < len(buf); i++ { + p = bytes.IndexByte(buf[p+1:], '\n') + p + 1 + if p < 0 { + break + } + } + if p >= 0 { + // buf[start:p+1] is the block to remove. + // Copy buf[p+1:] over buf[start:] and shrink buf. + copy(buf[start:], buf[p+1:]) + buf = buf[:len(buf)-(p+1-start)] + } + + // Add panic heading. + head := fmt.Sprintf("panic: %v\n\n", x) + if len(head) > len(buf) { + // Extremely unlikely to happen. + return head + } + copy(buf[len(head):], buf) + copy(buf, head) + + return string(buf) +} + +// context represents the context of an in-flight HTTP request. +// It implements the appengine.Context and http.ResponseWriter interfaces. +type context struct { + req *http.Request + + outCode int + outHeader http.Header + outBody []byte + + pendingLogs struct { + sync.Mutex + lines []*logpb.UserAppLogLine + flushes int + } + + apiURL *url.URL +} + +var contextKey = "holds a *context" + +// jointContext joins two contexts in a superficial way. +// It takes values and timeouts from a base context, and only values from another context. +type jointContext struct { + base netcontext.Context + valuesOnly netcontext.Context +} + +func (c jointContext) Deadline() (time.Time, bool) { + return c.base.Deadline() +} + +func (c jointContext) Done() <-chan struct{} { + return c.base.Done() +} + +func (c jointContext) Err() error { + return c.base.Err() +} + +func (c jointContext) Value(key interface{}) interface{} { + if val := c.base.Value(key); val != nil { + return val + } + return c.valuesOnly.Value(key) +} + +// fromContext returns the App Engine context or nil if ctx is not +// derived from an App Engine context. +func fromContext(ctx netcontext.Context) *context { + c, _ := ctx.Value(&contextKey).(*context) + return c +} + +func withContext(parent netcontext.Context, c *context) netcontext.Context { + ctx := netcontext.WithValue(parent, &contextKey, c) + if ns := c.req.Header.Get(curNamespaceHeader); ns != "" { + ctx = withNamespace(ctx, ns) + } + return ctx +} + +func toContext(c *context) netcontext.Context { + return withContext(netcontext.Background(), c) +} + +func IncomingHeaders(ctx netcontext.Context) http.Header { + if c := fromContext(ctx); c != nil { + return c.req.Header + } + return nil +} + +func ReqContext(req *http.Request) netcontext.Context { + return req.Context() +} + +func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context { + return jointContext{ + base: parent, + valuesOnly: req.Context(), + } +} + +// DefaultTicket returns a ticket used for background context or dev_appserver. +func DefaultTicket() string { + defaultTicketOnce.Do(func() { + if IsDevAppServer() { + defaultTicket = "testapp" + defaultTicketSuffix + return + } + appID := partitionlessAppID() + escAppID := strings.Replace(strings.Replace(appID, ":", "_", -1), ".", "_", -1) + majVersion := VersionID(nil) + if i := strings.Index(majVersion, "."); i > 0 { + majVersion = majVersion[:i] + } + defaultTicket = fmt.Sprintf("%s/%s.%s.%s", escAppID, ModuleName(nil), majVersion, InstanceID()) + }) + return defaultTicket +} + +func BackgroundContext() netcontext.Context { + backgroundContextOnce.Do(func() { + // Compute background security ticket. + ticket := DefaultTicket() + + c := &context{ + req: &http.Request{ + Header: http.Header{ + ticketHeader: []string{ticket}, + }, + }, + apiURL: apiURL(), + } + backgroundContext = toContext(c) + + // TODO(dsymonds): Wire up the shutdown handler to do a final flush. + go c.logFlusher(make(chan int)) + }) + + return backgroundContext +} + +// RegisterTestRequest registers the HTTP request req for testing, such that +// any API calls are sent to the provided URL. It returns a closure to delete +// the registration. +// It should only be used by aetest package. +func RegisterTestRequest(req *http.Request, apiURL *url.URL, decorate func(netcontext.Context) netcontext.Context) (*http.Request, func()) { + c := &context{ + req: req, + apiURL: apiURL, + } + ctx := withContext(decorate(req.Context()), c) + req = req.WithContext(ctx) + c.req = req + return req, func() {} +} + +var errTimeout = &CallError{ + Detail: "Deadline exceeded", + Code: int32(remotepb.RpcError_CANCELLED), + Timeout: true, +} + +func (c *context) Header() http.Header { return c.outHeader } + +// Copied from $GOROOT/src/pkg/net/http/transfer.go. Some response status +// codes do not permit a response body (nor response entity headers such as +// Content-Length, Content-Type, etc). +func bodyAllowedForStatus(status int) bool { + switch { + case status >= 100 && status <= 199: + return false + case status == 204: + return false + case status == 304: + return false + } + return true +} + +func (c *context) Write(b []byte) (int, error) { + if c.outCode == 0 { + c.WriteHeader(http.StatusOK) + } + if len(b) > 0 && !bodyAllowedForStatus(c.outCode) { + return 0, http.ErrBodyNotAllowed + } + c.outBody = append(c.outBody, b...) + return len(b), nil +} + +func (c *context) WriteHeader(code int) { + if c.outCode != 0 { + logf(c, 3, "WriteHeader called multiple times on request.") // error level + return + } + c.outCode = code +} + +func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) { + hreq := &http.Request{ + Method: "POST", + URL: c.apiURL, + Header: http.Header{ + apiEndpointHeader: apiEndpointHeaderValue, + apiMethodHeader: apiMethodHeaderValue, + apiContentType: apiContentTypeValue, + apiDeadlineHeader: []string{strconv.FormatFloat(timeout.Seconds(), 'f', -1, 64)}, + }, + Body: ioutil.NopCloser(bytes.NewReader(body)), + ContentLength: int64(len(body)), + Host: c.apiURL.Host, + } + if info := c.req.Header.Get(dapperHeader); info != "" { + hreq.Header.Set(dapperHeader, info) + } + if info := c.req.Header.Get(traceHeader); info != "" { + hreq.Header.Set(traceHeader, info) + } + + tr := apiHTTPClient.Transport.(*http.Transport) + + var timedOut int32 // atomic; set to 1 if timed out + t := time.AfterFunc(timeout, func() { + atomic.StoreInt32(&timedOut, 1) + tr.CancelRequest(hreq) + }) + defer t.Stop() + defer func() { + // Check if timeout was exceeded. + if atomic.LoadInt32(&timedOut) != 0 { + err = errTimeout + } + }() + + hresp, err := apiHTTPClient.Do(hreq) + if err != nil { + return nil, &CallError{ + Detail: fmt.Sprintf("service bridge HTTP failed: %v", err), + Code: int32(remotepb.RpcError_UNKNOWN), + } + } + defer hresp.Body.Close() + hrespBody, err := ioutil.ReadAll(hresp.Body) + if hresp.StatusCode != 200 { + return nil, &CallError{ + Detail: fmt.Sprintf("service bridge returned HTTP %d (%q)", hresp.StatusCode, hrespBody), + Code: int32(remotepb.RpcError_UNKNOWN), + } + } + if err != nil { + return nil, &CallError{ + Detail: fmt.Sprintf("service bridge response bad: %v", err), + Code: int32(remotepb.RpcError_UNKNOWN), + } + } + return hrespBody, nil +} + +func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error { + if ns := NamespaceFromContext(ctx); ns != "" { + if fn, ok := NamespaceMods[service]; ok { + fn(in, ns) + } + } + + if f, ctx, ok := callOverrideFromContext(ctx); ok { + return f(ctx, service, method, in, out) + } + + // Handle already-done contexts quickly. + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + c := fromContext(ctx) + if c == nil { + // Give a good error message rather than a panic lower down. + return errNotAppEngineContext + } + + // Apply transaction modifications if we're in a transaction. + if t := transactionFromContext(ctx); t != nil { + if t.finished { + return errors.New("transaction context has expired") + } + applyTransaction(in, &t.transaction) + } + + // Default RPC timeout is 60s. + timeout := 60 * time.Second + if deadline, ok := ctx.Deadline(); ok { + timeout = deadline.Sub(time.Now()) + } + + data, err := proto.Marshal(in) + if err != nil { + return err + } + + ticket := c.req.Header.Get(ticketHeader) + // Use a test ticket under test environment. + if ticket == "" { + if appid := ctx.Value(&appIDOverrideKey); appid != nil { + ticket = appid.(string) + defaultTicketSuffix + } + } + // Fall back to use background ticket when the request ticket is not available in Flex or dev_appserver. + if ticket == "" { + ticket = DefaultTicket() + } + req := &remotepb.Request{ + ServiceName: &service, + Method: &method, + Request: data, + RequestId: &ticket, + } + hreqBody, err := proto.Marshal(req) + if err != nil { + return err + } + + hrespBody, err := c.post(hreqBody, timeout) + if err != nil { + return err + } + + res := &remotepb.Response{} + if err := proto.Unmarshal(hrespBody, res); err != nil { + return err + } + if res.RpcError != nil { + ce := &CallError{ + Detail: res.RpcError.GetDetail(), + Code: *res.RpcError.Code, + } + switch remotepb.RpcError_ErrorCode(ce.Code) { + case remotepb.RpcError_CANCELLED, remotepb.RpcError_DEADLINE_EXCEEDED: + ce.Timeout = true + } + return ce + } + if res.ApplicationError != nil { + return &APIError{ + Service: *req.ServiceName, + Detail: res.ApplicationError.GetDetail(), + Code: *res.ApplicationError.Code, + } + } + if res.Exception != nil || res.JavaException != nil { + // This shouldn't happen, but let's be defensive. + return &CallError{ + Detail: "service bridge returned exception", + Code: int32(remotepb.RpcError_UNKNOWN), + } + } + return proto.Unmarshal(res.Response, out) +} + +func (c *context) Request() *http.Request { + return c.req +} + +func (c *context) addLogLine(ll *logpb.UserAppLogLine) { + // Truncate long log lines. + // TODO(dsymonds): Check if this is still necessary. + const lim = 8 << 10 + if len(*ll.Message) > lim { + suffix := fmt.Sprintf("...(length %d)", len(*ll.Message)) + ll.Message = proto.String((*ll.Message)[:lim-len(suffix)] + suffix) + } + + c.pendingLogs.Lock() + c.pendingLogs.lines = append(c.pendingLogs.lines, ll) + c.pendingLogs.Unlock() +} + +var logLevelName = map[int64]string{ + 0: "DEBUG", + 1: "INFO", + 2: "WARNING", + 3: "ERROR", + 4: "CRITICAL", +} + +func logf(c *context, level int64, format string, args ...interface{}) { + if c == nil { + panic("not an App Engine context") + } + s := fmt.Sprintf(format, args...) + s = strings.TrimRight(s, "\n") // Remove any trailing newline characters. + c.addLogLine(&logpb.UserAppLogLine{ + TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3), + Level: &level, + Message: &s, + }) + log.Print(logLevelName[level] + ": " + s) +} + +// flushLog attempts to flush any pending logs to the appserver. +// It should not be called concurrently. +func (c *context) flushLog(force bool) (flushed bool) { + c.pendingLogs.Lock() + // Grab up to 30 MB. We can get away with up to 32 MB, but let's be cautious. + n, rem := 0, 30<<20 + for ; n < len(c.pendingLogs.lines); n++ { + ll := c.pendingLogs.lines[n] + // Each log line will require about 3 bytes of overhead. + nb := proto.Size(ll) + 3 + if nb > rem { + break + } + rem -= nb + } + lines := c.pendingLogs.lines[:n] + c.pendingLogs.lines = c.pendingLogs.lines[n:] + c.pendingLogs.Unlock() + + if len(lines) == 0 && !force { + // Nothing to flush. + return false + } + + rescueLogs := false + defer func() { + if rescueLogs { + c.pendingLogs.Lock() + c.pendingLogs.lines = append(lines, c.pendingLogs.lines...) + c.pendingLogs.Unlock() + } + }() + + buf, err := proto.Marshal(&logpb.UserAppLogGroup{ + LogLine: lines, + }) + if err != nil { + log.Printf("internal.flushLog: marshaling UserAppLogGroup: %v", err) + rescueLogs = true + return false + } + + req := &logpb.FlushRequest{ + Logs: buf, + } + res := &basepb.VoidProto{} + c.pendingLogs.Lock() + c.pendingLogs.flushes++ + c.pendingLogs.Unlock() + if err := Call(toContext(c), "logservice", "Flush", req, res); err != nil { + log.Printf("internal.flushLog: Flush RPC: %v", err) + rescueLogs = true + return false + } + return true +} + +const ( + // Log flushing parameters. + flushInterval = 1 * time.Second + forceFlushInterval = 60 * time.Second +) + +func (c *context) logFlusher(stop <-chan int) { + lastFlush := time.Now() + tick := time.NewTicker(flushInterval) + for { + select { + case <-stop: + // Request finished. + tick.Stop() + return + case <-tick.C: + force := time.Now().Sub(lastFlush) > forceFlushInterval + if c.flushLog(force) { + lastFlush = time.Now() + } + } + } +} + +func ContextForTesting(req *http.Request) netcontext.Context { + return toContext(&context{req: req}) +} diff --git a/vendor/google.golang.org/appengine/internal/api_classic.go b/vendor/google.golang.org/appengine/internal/api_classic.go new file mode 100644 index 000000000..f0f40b2e3 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/api_classic.go @@ -0,0 +1,169 @@ +// Copyright 2015 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build appengine + +package internal + +import ( + "errors" + "fmt" + "net/http" + "time" + + "appengine" + "appengine_internal" + basepb "appengine_internal/base" + + "github.com/golang/protobuf/proto" + netcontext "golang.org/x/net/context" +) + +var contextKey = "holds an appengine.Context" + +// fromContext returns the App Engine context or nil if ctx is not +// derived from an App Engine context. +func fromContext(ctx netcontext.Context) appengine.Context { + c, _ := ctx.Value(&contextKey).(appengine.Context) + return c +} + +// This is only for classic App Engine adapters. +func ClassicContextFromContext(ctx netcontext.Context) (appengine.Context, error) { + c := fromContext(ctx) + if c == nil { + return nil, errNotAppEngineContext + } + return c, nil +} + +func withContext(parent netcontext.Context, c appengine.Context) netcontext.Context { + ctx := netcontext.WithValue(parent, &contextKey, c) + + s := &basepb.StringProto{} + c.Call("__go__", "GetNamespace", &basepb.VoidProto{}, s, nil) + if ns := s.GetValue(); ns != "" { + ctx = NamespacedContext(ctx, ns) + } + + return ctx +} + +func IncomingHeaders(ctx netcontext.Context) http.Header { + if c := fromContext(ctx); c != nil { + if req, ok := c.Request().(*http.Request); ok { + return req.Header + } + } + return nil +} + +func ReqContext(req *http.Request) netcontext.Context { + return WithContext(netcontext.Background(), req) +} + +func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context { + c := appengine.NewContext(req) + return withContext(parent, c) +} + +type testingContext struct { + appengine.Context + + req *http.Request +} + +func (t *testingContext) FullyQualifiedAppID() string { return "dev~testcontext" } +func (t *testingContext) Call(service, method string, _, _ appengine_internal.ProtoMessage, _ *appengine_internal.CallOptions) error { + if service == "__go__" && method == "GetNamespace" { + return nil + } + return fmt.Errorf("testingContext: unsupported Call") +} +func (t *testingContext) Request() interface{} { return t.req } + +func ContextForTesting(req *http.Request) netcontext.Context { + return withContext(netcontext.Background(), &testingContext{req: req}) +} + +func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error { + if ns := NamespaceFromContext(ctx); ns != "" { + if fn, ok := NamespaceMods[service]; ok { + fn(in, ns) + } + } + + if f, ctx, ok := callOverrideFromContext(ctx); ok { + return f(ctx, service, method, in, out) + } + + // Handle already-done contexts quickly. + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + c := fromContext(ctx) + if c == nil { + // Give a good error message rather than a panic lower down. + return errNotAppEngineContext + } + + // Apply transaction modifications if we're in a transaction. + if t := transactionFromContext(ctx); t != nil { + if t.finished { + return errors.New("transaction context has expired") + } + applyTransaction(in, &t.transaction) + } + + var opts *appengine_internal.CallOptions + if d, ok := ctx.Deadline(); ok { + opts = &appengine_internal.CallOptions{ + Timeout: d.Sub(time.Now()), + } + } + + err := c.Call(service, method, in, out, opts) + switch v := err.(type) { + case *appengine_internal.APIError: + return &APIError{ + Service: v.Service, + Detail: v.Detail, + Code: v.Code, + } + case *appengine_internal.CallError: + return &CallError{ + Detail: v.Detail, + Code: v.Code, + Timeout: v.Timeout, + } + } + return err +} + +func handleHTTP(w http.ResponseWriter, r *http.Request) { + panic("handleHTTP called; this should be impossible") +} + +func logf(c appengine.Context, level int64, format string, args ...interface{}) { + var fn func(format string, args ...interface{}) + switch level { + case 0: + fn = c.Debugf + case 1: + fn = c.Infof + case 2: + fn = c.Warningf + case 3: + fn = c.Errorf + case 4: + fn = c.Criticalf + default: + // This shouldn't happen. + fn = c.Criticalf + } + fn(format, args...) +} diff --git a/vendor/google.golang.org/appengine/internal/api_common.go b/vendor/google.golang.org/appengine/internal/api_common.go new file mode 100644 index 000000000..e0c0b214b --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/api_common.go @@ -0,0 +1,123 @@ +// Copyright 2015 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package internal + +import ( + "errors" + "os" + + "github.com/golang/protobuf/proto" + netcontext "golang.org/x/net/context" +) + +var errNotAppEngineContext = errors.New("not an App Engine context") + +type CallOverrideFunc func(ctx netcontext.Context, service, method string, in, out proto.Message) error + +var callOverrideKey = "holds []CallOverrideFunc" + +func WithCallOverride(ctx netcontext.Context, f CallOverrideFunc) netcontext.Context { + // We avoid appending to any existing call override + // so we don't risk overwriting a popped stack below. + var cofs []CallOverrideFunc + if uf, ok := ctx.Value(&callOverrideKey).([]CallOverrideFunc); ok { + cofs = append(cofs, uf...) + } + cofs = append(cofs, f) + return netcontext.WithValue(ctx, &callOverrideKey, cofs) +} + +func callOverrideFromContext(ctx netcontext.Context) (CallOverrideFunc, netcontext.Context, bool) { + cofs, _ := ctx.Value(&callOverrideKey).([]CallOverrideFunc) + if len(cofs) == 0 { + return nil, nil, false + } + // We found a list of overrides; grab the last, and reconstitute a + // context that will hide it. + f := cofs[len(cofs)-1] + ctx = netcontext.WithValue(ctx, &callOverrideKey, cofs[:len(cofs)-1]) + return f, ctx, true +} + +type logOverrideFunc func(level int64, format string, args ...interface{}) + +var logOverrideKey = "holds a logOverrideFunc" + +func WithLogOverride(ctx netcontext.Context, f logOverrideFunc) netcontext.Context { + return netcontext.WithValue(ctx, &logOverrideKey, f) +} + +var appIDOverrideKey = "holds a string, being the full app ID" + +func WithAppIDOverride(ctx netcontext.Context, appID string) netcontext.Context { + return netcontext.WithValue(ctx, &appIDOverrideKey, appID) +} + +var namespaceKey = "holds the namespace string" + +func withNamespace(ctx netcontext.Context, ns string) netcontext.Context { + return netcontext.WithValue(ctx, &namespaceKey, ns) +} + +func NamespaceFromContext(ctx netcontext.Context) string { + // If there's no namespace, return the empty string. + ns, _ := ctx.Value(&namespaceKey).(string) + return ns +} + +// FullyQualifiedAppID returns the fully-qualified application ID. +// This may contain a partition prefix (e.g. "s~" for High Replication apps), +// or a domain prefix (e.g. "example.com:"). +func FullyQualifiedAppID(ctx netcontext.Context) string { + if id, ok := ctx.Value(&appIDOverrideKey).(string); ok { + return id + } + return fullyQualifiedAppID(ctx) +} + +func Logf(ctx netcontext.Context, level int64, format string, args ...interface{}) { + if f, ok := ctx.Value(&logOverrideKey).(logOverrideFunc); ok { + f(level, format, args...) + return + } + c := fromContext(ctx) + if c == nil { + panic(errNotAppEngineContext) + } + logf(c, level, format, args...) +} + +// NamespacedContext wraps a Context to support namespaces. +func NamespacedContext(ctx netcontext.Context, namespace string) netcontext.Context { + return withNamespace(ctx, namespace) +} + +// SetTestEnv sets the env variables for testing background ticket in Flex. +func SetTestEnv() func() { + var environ = []struct { + key, value string + }{ + {"GAE_LONG_APP_ID", "my-app-id"}, + {"GAE_MINOR_VERSION", "067924799508853122"}, + {"GAE_MODULE_INSTANCE", "0"}, + {"GAE_MODULE_NAME", "default"}, + {"GAE_MODULE_VERSION", "20150612t184001"}, + } + + for _, v := range environ { + old := os.Getenv(v.key) + os.Setenv(v.key, v.value) + v.value = old + } + return func() { // Restore old environment after the test completes. + for _, v := range environ { + if v.value == "" { + os.Unsetenv(v.key) + continue + } + os.Setenv(v.key, v.value) + } + } +} diff --git a/vendor/google.golang.org/appengine/internal/api_pre17.go b/vendor/google.golang.org/appengine/internal/api_pre17.go new file mode 100644 index 000000000..028b4f056 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/api_pre17.go @@ -0,0 +1,682 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build !appengine +// +build !go1.7 + +package internal + +import ( + "bytes" + "errors" + "fmt" + "io/ioutil" + "log" + "net" + "net/http" + "net/url" + "os" + "runtime" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/golang/protobuf/proto" + netcontext "golang.org/x/net/context" + + basepb "google.golang.org/appengine/internal/base" + logpb "google.golang.org/appengine/internal/log" + remotepb "google.golang.org/appengine/internal/remote_api" +) + +const ( + apiPath = "/rpc_http" + defaultTicketSuffix = "/default.20150612t184001.0" +) + +var ( + // Incoming headers. + ticketHeader = http.CanonicalHeaderKey("X-AppEngine-API-Ticket") + dapperHeader = http.CanonicalHeaderKey("X-Google-DapperTraceInfo") + traceHeader = http.CanonicalHeaderKey("X-Cloud-Trace-Context") + curNamespaceHeader = http.CanonicalHeaderKey("X-AppEngine-Current-Namespace") + userIPHeader = http.CanonicalHeaderKey("X-AppEngine-User-IP") + remoteAddrHeader = http.CanonicalHeaderKey("X-AppEngine-Remote-Addr") + + // Outgoing headers. + apiEndpointHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Endpoint") + apiEndpointHeaderValue = []string{"app-engine-apis"} + apiMethodHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Method") + apiMethodHeaderValue = []string{"/VMRemoteAPI.CallRemoteAPI"} + apiDeadlineHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Deadline") + apiContentType = http.CanonicalHeaderKey("Content-Type") + apiContentTypeValue = []string{"application/octet-stream"} + logFlushHeader = http.CanonicalHeaderKey("X-AppEngine-Log-Flush-Count") + + apiHTTPClient = &http.Client{ + Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: limitDial, + }, + } + + defaultTicketOnce sync.Once + defaultTicket string +) + +func apiURL() *url.URL { + host, port := "appengine.googleapis.internal", "10001" + if h := os.Getenv("API_HOST"); h != "" { + host = h + } + if p := os.Getenv("API_PORT"); p != "" { + port = p + } + return &url.URL{ + Scheme: "http", + Host: host + ":" + port, + Path: apiPath, + } +} + +func handleHTTP(w http.ResponseWriter, r *http.Request) { + c := &context{ + req: r, + outHeader: w.Header(), + apiURL: apiURL(), + } + stopFlushing := make(chan int) + + ctxs.Lock() + ctxs.m[r] = c + ctxs.Unlock() + defer func() { + ctxs.Lock() + delete(ctxs.m, r) + ctxs.Unlock() + }() + + // Patch up RemoteAddr so it looks reasonable. + if addr := r.Header.Get(userIPHeader); addr != "" { + r.RemoteAddr = addr + } else if addr = r.Header.Get(remoteAddrHeader); addr != "" { + r.RemoteAddr = addr + } else { + // Should not normally reach here, but pick a sensible default anyway. + r.RemoteAddr = "127.0.0.1" + } + // The address in the headers will most likely be of these forms: + // 123.123.123.123 + // 2001:db8::1 + // net/http.Request.RemoteAddr is specified to be in "IP:port" form. + if _, _, err := net.SplitHostPort(r.RemoteAddr); err != nil { + // Assume the remote address is only a host; add a default port. + r.RemoteAddr = net.JoinHostPort(r.RemoteAddr, "80") + } + + // Start goroutine responsible for flushing app logs. + // This is done after adding c to ctx.m (and stopped before removing it) + // because flushing logs requires making an API call. + go c.logFlusher(stopFlushing) + + executeRequestSafely(c, r) + c.outHeader = nil // make sure header changes aren't respected any more + + stopFlushing <- 1 // any logging beyond this point will be dropped + + // Flush any pending logs asynchronously. + c.pendingLogs.Lock() + flushes := c.pendingLogs.flushes + if len(c.pendingLogs.lines) > 0 { + flushes++ + } + c.pendingLogs.Unlock() + go c.flushLog(false) + w.Header().Set(logFlushHeader, strconv.Itoa(flushes)) + + // Avoid nil Write call if c.Write is never called. + if c.outCode != 0 { + w.WriteHeader(c.outCode) + } + if c.outBody != nil { + w.Write(c.outBody) + } +} + +func executeRequestSafely(c *context, r *http.Request) { + defer func() { + if x := recover(); x != nil { + logf(c, 4, "%s", renderPanic(x)) // 4 == critical + c.outCode = 500 + } + }() + + http.DefaultServeMux.ServeHTTP(c, r) +} + +func renderPanic(x interface{}) string { + buf := make([]byte, 16<<10) // 16 KB should be plenty + buf = buf[:runtime.Stack(buf, false)] + + // Remove the first few stack frames: + // this func + // the recover closure in the caller + // That will root the stack trace at the site of the panic. + const ( + skipStart = "internal.renderPanic" + skipFrames = 2 + ) + start := bytes.Index(buf, []byte(skipStart)) + p := start + for i := 0; i < skipFrames*2 && p+1 < len(buf); i++ { + p = bytes.IndexByte(buf[p+1:], '\n') + p + 1 + if p < 0 { + break + } + } + if p >= 0 { + // buf[start:p+1] is the block to remove. + // Copy buf[p+1:] over buf[start:] and shrink buf. + copy(buf[start:], buf[p+1:]) + buf = buf[:len(buf)-(p+1-start)] + } + + // Add panic heading. + head := fmt.Sprintf("panic: %v\n\n", x) + if len(head) > len(buf) { + // Extremely unlikely to happen. + return head + } + copy(buf[len(head):], buf) + copy(buf, head) + + return string(buf) +} + +var ctxs = struct { + sync.Mutex + m map[*http.Request]*context + bg *context // background context, lazily initialized + // dec is used by tests to decorate the netcontext.Context returned + // for a given request. This allows tests to add overrides (such as + // WithAppIDOverride) to the context. The map is nil outside tests. + dec map[*http.Request]func(netcontext.Context) netcontext.Context +}{ + m: make(map[*http.Request]*context), +} + +// context represents the context of an in-flight HTTP request. +// It implements the appengine.Context and http.ResponseWriter interfaces. +type context struct { + req *http.Request + + outCode int + outHeader http.Header + outBody []byte + + pendingLogs struct { + sync.Mutex + lines []*logpb.UserAppLogLine + flushes int + } + + apiURL *url.URL +} + +var contextKey = "holds a *context" + +// fromContext returns the App Engine context or nil if ctx is not +// derived from an App Engine context. +func fromContext(ctx netcontext.Context) *context { + c, _ := ctx.Value(&contextKey).(*context) + return c +} + +func withContext(parent netcontext.Context, c *context) netcontext.Context { + ctx := netcontext.WithValue(parent, &contextKey, c) + if ns := c.req.Header.Get(curNamespaceHeader); ns != "" { + ctx = withNamespace(ctx, ns) + } + return ctx +} + +func toContext(c *context) netcontext.Context { + return withContext(netcontext.Background(), c) +} + +func IncomingHeaders(ctx netcontext.Context) http.Header { + if c := fromContext(ctx); c != nil { + return c.req.Header + } + return nil +} + +func ReqContext(req *http.Request) netcontext.Context { + return WithContext(netcontext.Background(), req) +} + +func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context { + ctxs.Lock() + c := ctxs.m[req] + d := ctxs.dec[req] + ctxs.Unlock() + + if d != nil { + parent = d(parent) + } + + if c == nil { + // Someone passed in an http.Request that is not in-flight. + // We panic here rather than panicking at a later point + // so that stack traces will be more sensible. + log.Panic("appengine: NewContext passed an unknown http.Request") + } + return withContext(parent, c) +} + +// DefaultTicket returns a ticket used for background context or dev_appserver. +func DefaultTicket() string { + defaultTicketOnce.Do(func() { + if IsDevAppServer() { + defaultTicket = "testapp" + defaultTicketSuffix + return + } + appID := partitionlessAppID() + escAppID := strings.Replace(strings.Replace(appID, ":", "_", -1), ".", "_", -1) + majVersion := VersionID(nil) + if i := strings.Index(majVersion, "."); i > 0 { + majVersion = majVersion[:i] + } + defaultTicket = fmt.Sprintf("%s/%s.%s.%s", escAppID, ModuleName(nil), majVersion, InstanceID()) + }) + return defaultTicket +} + +func BackgroundContext() netcontext.Context { + ctxs.Lock() + defer ctxs.Unlock() + + if ctxs.bg != nil { + return toContext(ctxs.bg) + } + + // Compute background security ticket. + ticket := DefaultTicket() + + ctxs.bg = &context{ + req: &http.Request{ + Header: http.Header{ + ticketHeader: []string{ticket}, + }, + }, + apiURL: apiURL(), + } + + // TODO(dsymonds): Wire up the shutdown handler to do a final flush. + go ctxs.bg.logFlusher(make(chan int)) + + return toContext(ctxs.bg) +} + +// RegisterTestRequest registers the HTTP request req for testing, such that +// any API calls are sent to the provided URL. It returns a closure to delete +// the registration. +// It should only be used by aetest package. +func RegisterTestRequest(req *http.Request, apiURL *url.URL, decorate func(netcontext.Context) netcontext.Context) (*http.Request, func()) { + c := &context{ + req: req, + apiURL: apiURL, + } + ctxs.Lock() + defer ctxs.Unlock() + if _, ok := ctxs.m[req]; ok { + log.Panic("req already associated with context") + } + if _, ok := ctxs.dec[req]; ok { + log.Panic("req already associated with context") + } + if ctxs.dec == nil { + ctxs.dec = make(map[*http.Request]func(netcontext.Context) netcontext.Context) + } + ctxs.m[req] = c + ctxs.dec[req] = decorate + + return req, func() { + ctxs.Lock() + delete(ctxs.m, req) + delete(ctxs.dec, req) + ctxs.Unlock() + } +} + +var errTimeout = &CallError{ + Detail: "Deadline exceeded", + Code: int32(remotepb.RpcError_CANCELLED), + Timeout: true, +} + +func (c *context) Header() http.Header { return c.outHeader } + +// Copied from $GOROOT/src/pkg/net/http/transfer.go. Some response status +// codes do not permit a response body (nor response entity headers such as +// Content-Length, Content-Type, etc). +func bodyAllowedForStatus(status int) bool { + switch { + case status >= 100 && status <= 199: + return false + case status == 204: + return false + case status == 304: + return false + } + return true +} + +func (c *context) Write(b []byte) (int, error) { + if c.outCode == 0 { + c.WriteHeader(http.StatusOK) + } + if len(b) > 0 && !bodyAllowedForStatus(c.outCode) { + return 0, http.ErrBodyNotAllowed + } + c.outBody = append(c.outBody, b...) + return len(b), nil +} + +func (c *context) WriteHeader(code int) { + if c.outCode != 0 { + logf(c, 3, "WriteHeader called multiple times on request.") // error level + return + } + c.outCode = code +} + +func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) { + hreq := &http.Request{ + Method: "POST", + URL: c.apiURL, + Header: http.Header{ + apiEndpointHeader: apiEndpointHeaderValue, + apiMethodHeader: apiMethodHeaderValue, + apiContentType: apiContentTypeValue, + apiDeadlineHeader: []string{strconv.FormatFloat(timeout.Seconds(), 'f', -1, 64)}, + }, + Body: ioutil.NopCloser(bytes.NewReader(body)), + ContentLength: int64(len(body)), + Host: c.apiURL.Host, + } + if info := c.req.Header.Get(dapperHeader); info != "" { + hreq.Header.Set(dapperHeader, info) + } + if info := c.req.Header.Get(traceHeader); info != "" { + hreq.Header.Set(traceHeader, info) + } + + tr := apiHTTPClient.Transport.(*http.Transport) + + var timedOut int32 // atomic; set to 1 if timed out + t := time.AfterFunc(timeout, func() { + atomic.StoreInt32(&timedOut, 1) + tr.CancelRequest(hreq) + }) + defer t.Stop() + defer func() { + // Check if timeout was exceeded. + if atomic.LoadInt32(&timedOut) != 0 { + err = errTimeout + } + }() + + hresp, err := apiHTTPClient.Do(hreq) + if err != nil { + return nil, &CallError{ + Detail: fmt.Sprintf("service bridge HTTP failed: %v", err), + Code: int32(remotepb.RpcError_UNKNOWN), + } + } + defer hresp.Body.Close() + hrespBody, err := ioutil.ReadAll(hresp.Body) + if hresp.StatusCode != 200 { + return nil, &CallError{ + Detail: fmt.Sprintf("service bridge returned HTTP %d (%q)", hresp.StatusCode, hrespBody), + Code: int32(remotepb.RpcError_UNKNOWN), + } + } + if err != nil { + return nil, &CallError{ + Detail: fmt.Sprintf("service bridge response bad: %v", err), + Code: int32(remotepb.RpcError_UNKNOWN), + } + } + return hrespBody, nil +} + +func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error { + if ns := NamespaceFromContext(ctx); ns != "" { + if fn, ok := NamespaceMods[service]; ok { + fn(in, ns) + } + } + + if f, ctx, ok := callOverrideFromContext(ctx); ok { + return f(ctx, service, method, in, out) + } + + // Handle already-done contexts quickly. + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + c := fromContext(ctx) + if c == nil { + // Give a good error message rather than a panic lower down. + return errNotAppEngineContext + } + + // Apply transaction modifications if we're in a transaction. + if t := transactionFromContext(ctx); t != nil { + if t.finished { + return errors.New("transaction context has expired") + } + applyTransaction(in, &t.transaction) + } + + // Default RPC timeout is 60s. + timeout := 60 * time.Second + if deadline, ok := ctx.Deadline(); ok { + timeout = deadline.Sub(time.Now()) + } + + data, err := proto.Marshal(in) + if err != nil { + return err + } + + ticket := c.req.Header.Get(ticketHeader) + // Use a test ticket under test environment. + if ticket == "" { + if appid := ctx.Value(&appIDOverrideKey); appid != nil { + ticket = appid.(string) + defaultTicketSuffix + } + } + // Fall back to use background ticket when the request ticket is not available in Flex or dev_appserver. + if ticket == "" { + ticket = DefaultTicket() + } + req := &remotepb.Request{ + ServiceName: &service, + Method: &method, + Request: data, + RequestId: &ticket, + } + hreqBody, err := proto.Marshal(req) + if err != nil { + return err + } + + hrespBody, err := c.post(hreqBody, timeout) + if err != nil { + return err + } + + res := &remotepb.Response{} + if err := proto.Unmarshal(hrespBody, res); err != nil { + return err + } + if res.RpcError != nil { + ce := &CallError{ + Detail: res.RpcError.GetDetail(), + Code: *res.RpcError.Code, + } + switch remotepb.RpcError_ErrorCode(ce.Code) { + case remotepb.RpcError_CANCELLED, remotepb.RpcError_DEADLINE_EXCEEDED: + ce.Timeout = true + } + return ce + } + if res.ApplicationError != nil { + return &APIError{ + Service: *req.ServiceName, + Detail: res.ApplicationError.GetDetail(), + Code: *res.ApplicationError.Code, + } + } + if res.Exception != nil || res.JavaException != nil { + // This shouldn't happen, but let's be defensive. + return &CallError{ + Detail: "service bridge returned exception", + Code: int32(remotepb.RpcError_UNKNOWN), + } + } + return proto.Unmarshal(res.Response, out) +} + +func (c *context) Request() *http.Request { + return c.req +} + +func (c *context) addLogLine(ll *logpb.UserAppLogLine) { + // Truncate long log lines. + // TODO(dsymonds): Check if this is still necessary. + const lim = 8 << 10 + if len(*ll.Message) > lim { + suffix := fmt.Sprintf("...(length %d)", len(*ll.Message)) + ll.Message = proto.String((*ll.Message)[:lim-len(suffix)] + suffix) + } + + c.pendingLogs.Lock() + c.pendingLogs.lines = append(c.pendingLogs.lines, ll) + c.pendingLogs.Unlock() +} + +var logLevelName = map[int64]string{ + 0: "DEBUG", + 1: "INFO", + 2: "WARNING", + 3: "ERROR", + 4: "CRITICAL", +} + +func logf(c *context, level int64, format string, args ...interface{}) { + if c == nil { + panic("not an App Engine context") + } + s := fmt.Sprintf(format, args...) + s = strings.TrimRight(s, "\n") // Remove any trailing newline characters. + c.addLogLine(&logpb.UserAppLogLine{ + TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3), + Level: &level, + Message: &s, + }) + log.Print(logLevelName[level] + ": " + s) +} + +// flushLog attempts to flush any pending logs to the appserver. +// It should not be called concurrently. +func (c *context) flushLog(force bool) (flushed bool) { + c.pendingLogs.Lock() + // Grab up to 30 MB. We can get away with up to 32 MB, but let's be cautious. + n, rem := 0, 30<<20 + for ; n < len(c.pendingLogs.lines); n++ { + ll := c.pendingLogs.lines[n] + // Each log line will require about 3 bytes of overhead. + nb := proto.Size(ll) + 3 + if nb > rem { + break + } + rem -= nb + } + lines := c.pendingLogs.lines[:n] + c.pendingLogs.lines = c.pendingLogs.lines[n:] + c.pendingLogs.Unlock() + + if len(lines) == 0 && !force { + // Nothing to flush. + return false + } + + rescueLogs := false + defer func() { + if rescueLogs { + c.pendingLogs.Lock() + c.pendingLogs.lines = append(lines, c.pendingLogs.lines...) + c.pendingLogs.Unlock() + } + }() + + buf, err := proto.Marshal(&logpb.UserAppLogGroup{ + LogLine: lines, + }) + if err != nil { + log.Printf("internal.flushLog: marshaling UserAppLogGroup: %v", err) + rescueLogs = true + return false + } + + req := &logpb.FlushRequest{ + Logs: buf, + } + res := &basepb.VoidProto{} + c.pendingLogs.Lock() + c.pendingLogs.flushes++ + c.pendingLogs.Unlock() + if err := Call(toContext(c), "logservice", "Flush", req, res); err != nil { + log.Printf("internal.flushLog: Flush RPC: %v", err) + rescueLogs = true + return false + } + return true +} + +const ( + // Log flushing parameters. + flushInterval = 1 * time.Second + forceFlushInterval = 60 * time.Second +) + +func (c *context) logFlusher(stop <-chan int) { + lastFlush := time.Now() + tick := time.NewTicker(flushInterval) + for { + select { + case <-stop: + // Request finished. + tick.Stop() + return + case <-tick.C: + force := time.Now().Sub(lastFlush) > forceFlushInterval + if c.flushLog(force) { + lastFlush = time.Now() + } + } + } +} + +func ContextForTesting(req *http.Request) netcontext.Context { + return toContext(&context{req: req}) +} diff --git a/vendor/google.golang.org/appengine/internal/app_id.go b/vendor/google.golang.org/appengine/internal/app_id.go new file mode 100644 index 000000000..11df8c07b --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/app_id.go @@ -0,0 +1,28 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package internal + +import ( + "strings" +) + +func parseFullAppID(appid string) (partition, domain, displayID string) { + if i := strings.Index(appid, "~"); i != -1 { + partition, appid = appid[:i], appid[i+1:] + } + if i := strings.Index(appid, ":"); i != -1 { + domain, appid = appid[:i], appid[i+1:] + } + return partition, domain, appid +} + +// appID returns "appid" or "domain.com:appid". +func appID(fullAppID string) string { + _, dom, dis := parseFullAppID(fullAppID) + if dom != "" { + return dom + ":" + dis + } + return dis +} diff --git a/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go b/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go new file mode 100644 index 000000000..9a2ff77ab --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go @@ -0,0 +1,611 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google.golang.org/appengine/internal/app_identity/app_identity_service.proto + +package app_identity + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type AppIdentityServiceError_ErrorCode int32 + +const ( + AppIdentityServiceError_SUCCESS AppIdentityServiceError_ErrorCode = 0 + AppIdentityServiceError_UNKNOWN_SCOPE AppIdentityServiceError_ErrorCode = 9 + AppIdentityServiceError_BLOB_TOO_LARGE AppIdentityServiceError_ErrorCode = 1000 + AppIdentityServiceError_DEADLINE_EXCEEDED AppIdentityServiceError_ErrorCode = 1001 + AppIdentityServiceError_NOT_A_VALID_APP AppIdentityServiceError_ErrorCode = 1002 + AppIdentityServiceError_UNKNOWN_ERROR AppIdentityServiceError_ErrorCode = 1003 + AppIdentityServiceError_NOT_ALLOWED AppIdentityServiceError_ErrorCode = 1005 + AppIdentityServiceError_NOT_IMPLEMENTED AppIdentityServiceError_ErrorCode = 1006 +) + +var AppIdentityServiceError_ErrorCode_name = map[int32]string{ + 0: "SUCCESS", + 9: "UNKNOWN_SCOPE", + 1000: "BLOB_TOO_LARGE", + 1001: "DEADLINE_EXCEEDED", + 1002: "NOT_A_VALID_APP", + 1003: "UNKNOWN_ERROR", + 1005: "NOT_ALLOWED", + 1006: "NOT_IMPLEMENTED", +} +var AppIdentityServiceError_ErrorCode_value = map[string]int32{ + "SUCCESS": 0, + "UNKNOWN_SCOPE": 9, + "BLOB_TOO_LARGE": 1000, + "DEADLINE_EXCEEDED": 1001, + "NOT_A_VALID_APP": 1002, + "UNKNOWN_ERROR": 1003, + "NOT_ALLOWED": 1005, + "NOT_IMPLEMENTED": 1006, +} + +func (x AppIdentityServiceError_ErrorCode) Enum() *AppIdentityServiceError_ErrorCode { + p := new(AppIdentityServiceError_ErrorCode) + *p = x + return p +} +func (x AppIdentityServiceError_ErrorCode) String() string { + return proto.EnumName(AppIdentityServiceError_ErrorCode_name, int32(x)) +} +func (x *AppIdentityServiceError_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(AppIdentityServiceError_ErrorCode_value, data, "AppIdentityServiceError_ErrorCode") + if err != nil { + return err + } + *x = AppIdentityServiceError_ErrorCode(value) + return nil +} +func (AppIdentityServiceError_ErrorCode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{0, 0} +} + +type AppIdentityServiceError struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AppIdentityServiceError) Reset() { *m = AppIdentityServiceError{} } +func (m *AppIdentityServiceError) String() string { return proto.CompactTextString(m) } +func (*AppIdentityServiceError) ProtoMessage() {} +func (*AppIdentityServiceError) Descriptor() ([]byte, []int) { + return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{0} +} +func (m *AppIdentityServiceError) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AppIdentityServiceError.Unmarshal(m, b) +} +func (m *AppIdentityServiceError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AppIdentityServiceError.Marshal(b, m, deterministic) +} +func (dst *AppIdentityServiceError) XXX_Merge(src proto.Message) { + xxx_messageInfo_AppIdentityServiceError.Merge(dst, src) +} +func (m *AppIdentityServiceError) XXX_Size() int { + return xxx_messageInfo_AppIdentityServiceError.Size(m) +} +func (m *AppIdentityServiceError) XXX_DiscardUnknown() { + xxx_messageInfo_AppIdentityServiceError.DiscardUnknown(m) +} + +var xxx_messageInfo_AppIdentityServiceError proto.InternalMessageInfo + +type SignForAppRequest struct { + BytesToSign []byte `protobuf:"bytes,1,opt,name=bytes_to_sign,json=bytesToSign" json:"bytes_to_sign,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SignForAppRequest) Reset() { *m = SignForAppRequest{} } +func (m *SignForAppRequest) String() string { return proto.CompactTextString(m) } +func (*SignForAppRequest) ProtoMessage() {} +func (*SignForAppRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{1} +} +func (m *SignForAppRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SignForAppRequest.Unmarshal(m, b) +} +func (m *SignForAppRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SignForAppRequest.Marshal(b, m, deterministic) +} +func (dst *SignForAppRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SignForAppRequest.Merge(dst, src) +} +func (m *SignForAppRequest) XXX_Size() int { + return xxx_messageInfo_SignForAppRequest.Size(m) +} +func (m *SignForAppRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SignForAppRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SignForAppRequest proto.InternalMessageInfo + +func (m *SignForAppRequest) GetBytesToSign() []byte { + if m != nil { + return m.BytesToSign + } + return nil +} + +type SignForAppResponse struct { + KeyName *string `protobuf:"bytes,1,opt,name=key_name,json=keyName" json:"key_name,omitempty"` + SignatureBytes []byte `protobuf:"bytes,2,opt,name=signature_bytes,json=signatureBytes" json:"signature_bytes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SignForAppResponse) Reset() { *m = SignForAppResponse{} } +func (m *SignForAppResponse) String() string { return proto.CompactTextString(m) } +func (*SignForAppResponse) ProtoMessage() {} +func (*SignForAppResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{2} +} +func (m *SignForAppResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SignForAppResponse.Unmarshal(m, b) +} +func (m *SignForAppResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SignForAppResponse.Marshal(b, m, deterministic) +} +func (dst *SignForAppResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SignForAppResponse.Merge(dst, src) +} +func (m *SignForAppResponse) XXX_Size() int { + return xxx_messageInfo_SignForAppResponse.Size(m) +} +func (m *SignForAppResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SignForAppResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SignForAppResponse proto.InternalMessageInfo + +func (m *SignForAppResponse) GetKeyName() string { + if m != nil && m.KeyName != nil { + return *m.KeyName + } + return "" +} + +func (m *SignForAppResponse) GetSignatureBytes() []byte { + if m != nil { + return m.SignatureBytes + } + return nil +} + +type GetPublicCertificateForAppRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetPublicCertificateForAppRequest) Reset() { *m = GetPublicCertificateForAppRequest{} } +func (m *GetPublicCertificateForAppRequest) String() string { return proto.CompactTextString(m) } +func (*GetPublicCertificateForAppRequest) ProtoMessage() {} +func (*GetPublicCertificateForAppRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{3} +} +func (m *GetPublicCertificateForAppRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetPublicCertificateForAppRequest.Unmarshal(m, b) +} +func (m *GetPublicCertificateForAppRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetPublicCertificateForAppRequest.Marshal(b, m, deterministic) +} +func (dst *GetPublicCertificateForAppRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetPublicCertificateForAppRequest.Merge(dst, src) +} +func (m *GetPublicCertificateForAppRequest) XXX_Size() int { + return xxx_messageInfo_GetPublicCertificateForAppRequest.Size(m) +} +func (m *GetPublicCertificateForAppRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetPublicCertificateForAppRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetPublicCertificateForAppRequest proto.InternalMessageInfo + +type PublicCertificate struct { + KeyName *string `protobuf:"bytes,1,opt,name=key_name,json=keyName" json:"key_name,omitempty"` + X509CertificatePem *string `protobuf:"bytes,2,opt,name=x509_certificate_pem,json=x509CertificatePem" json:"x509_certificate_pem,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PublicCertificate) Reset() { *m = PublicCertificate{} } +func (m *PublicCertificate) String() string { return proto.CompactTextString(m) } +func (*PublicCertificate) ProtoMessage() {} +func (*PublicCertificate) Descriptor() ([]byte, []int) { + return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{4} +} +func (m *PublicCertificate) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PublicCertificate.Unmarshal(m, b) +} +func (m *PublicCertificate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PublicCertificate.Marshal(b, m, deterministic) +} +func (dst *PublicCertificate) XXX_Merge(src proto.Message) { + xxx_messageInfo_PublicCertificate.Merge(dst, src) +} +func (m *PublicCertificate) XXX_Size() int { + return xxx_messageInfo_PublicCertificate.Size(m) +} +func (m *PublicCertificate) XXX_DiscardUnknown() { + xxx_messageInfo_PublicCertificate.DiscardUnknown(m) +} + +var xxx_messageInfo_PublicCertificate proto.InternalMessageInfo + +func (m *PublicCertificate) GetKeyName() string { + if m != nil && m.KeyName != nil { + return *m.KeyName + } + return "" +} + +func (m *PublicCertificate) GetX509CertificatePem() string { + if m != nil && m.X509CertificatePem != nil { + return *m.X509CertificatePem + } + return "" +} + +type GetPublicCertificateForAppResponse struct { + PublicCertificateList []*PublicCertificate `protobuf:"bytes,1,rep,name=public_certificate_list,json=publicCertificateList" json:"public_certificate_list,omitempty"` + MaxClientCacheTimeInSecond *int64 `protobuf:"varint,2,opt,name=max_client_cache_time_in_second,json=maxClientCacheTimeInSecond" json:"max_client_cache_time_in_second,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetPublicCertificateForAppResponse) Reset() { *m = GetPublicCertificateForAppResponse{} } +func (m *GetPublicCertificateForAppResponse) String() string { return proto.CompactTextString(m) } +func (*GetPublicCertificateForAppResponse) ProtoMessage() {} +func (*GetPublicCertificateForAppResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{5} +} +func (m *GetPublicCertificateForAppResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetPublicCertificateForAppResponse.Unmarshal(m, b) +} +func (m *GetPublicCertificateForAppResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetPublicCertificateForAppResponse.Marshal(b, m, deterministic) +} +func (dst *GetPublicCertificateForAppResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetPublicCertificateForAppResponse.Merge(dst, src) +} +func (m *GetPublicCertificateForAppResponse) XXX_Size() int { + return xxx_messageInfo_GetPublicCertificateForAppResponse.Size(m) +} +func (m *GetPublicCertificateForAppResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetPublicCertificateForAppResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetPublicCertificateForAppResponse proto.InternalMessageInfo + +func (m *GetPublicCertificateForAppResponse) GetPublicCertificateList() []*PublicCertificate { + if m != nil { + return m.PublicCertificateList + } + return nil +} + +func (m *GetPublicCertificateForAppResponse) GetMaxClientCacheTimeInSecond() int64 { + if m != nil && m.MaxClientCacheTimeInSecond != nil { + return *m.MaxClientCacheTimeInSecond + } + return 0 +} + +type GetServiceAccountNameRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetServiceAccountNameRequest) Reset() { *m = GetServiceAccountNameRequest{} } +func (m *GetServiceAccountNameRequest) String() string { return proto.CompactTextString(m) } +func (*GetServiceAccountNameRequest) ProtoMessage() {} +func (*GetServiceAccountNameRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{6} +} +func (m *GetServiceAccountNameRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetServiceAccountNameRequest.Unmarshal(m, b) +} +func (m *GetServiceAccountNameRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetServiceAccountNameRequest.Marshal(b, m, deterministic) +} +func (dst *GetServiceAccountNameRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetServiceAccountNameRequest.Merge(dst, src) +} +func (m *GetServiceAccountNameRequest) XXX_Size() int { + return xxx_messageInfo_GetServiceAccountNameRequest.Size(m) +} +func (m *GetServiceAccountNameRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetServiceAccountNameRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetServiceAccountNameRequest proto.InternalMessageInfo + +type GetServiceAccountNameResponse struct { + ServiceAccountName *string `protobuf:"bytes,1,opt,name=service_account_name,json=serviceAccountName" json:"service_account_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetServiceAccountNameResponse) Reset() { *m = GetServiceAccountNameResponse{} } +func (m *GetServiceAccountNameResponse) String() string { return proto.CompactTextString(m) } +func (*GetServiceAccountNameResponse) ProtoMessage() {} +func (*GetServiceAccountNameResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{7} +} +func (m *GetServiceAccountNameResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetServiceAccountNameResponse.Unmarshal(m, b) +} +func (m *GetServiceAccountNameResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetServiceAccountNameResponse.Marshal(b, m, deterministic) +} +func (dst *GetServiceAccountNameResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetServiceAccountNameResponse.Merge(dst, src) +} +func (m *GetServiceAccountNameResponse) XXX_Size() int { + return xxx_messageInfo_GetServiceAccountNameResponse.Size(m) +} +func (m *GetServiceAccountNameResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetServiceAccountNameResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetServiceAccountNameResponse proto.InternalMessageInfo + +func (m *GetServiceAccountNameResponse) GetServiceAccountName() string { + if m != nil && m.ServiceAccountName != nil { + return *m.ServiceAccountName + } + return "" +} + +type GetAccessTokenRequest struct { + Scope []string `protobuf:"bytes,1,rep,name=scope" json:"scope,omitempty"` + ServiceAccountId *int64 `protobuf:"varint,2,opt,name=service_account_id,json=serviceAccountId" json:"service_account_id,omitempty"` + ServiceAccountName *string `protobuf:"bytes,3,opt,name=service_account_name,json=serviceAccountName" json:"service_account_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetAccessTokenRequest) Reset() { *m = GetAccessTokenRequest{} } +func (m *GetAccessTokenRequest) String() string { return proto.CompactTextString(m) } +func (*GetAccessTokenRequest) ProtoMessage() {} +func (*GetAccessTokenRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{8} +} +func (m *GetAccessTokenRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetAccessTokenRequest.Unmarshal(m, b) +} +func (m *GetAccessTokenRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetAccessTokenRequest.Marshal(b, m, deterministic) +} +func (dst *GetAccessTokenRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetAccessTokenRequest.Merge(dst, src) +} +func (m *GetAccessTokenRequest) XXX_Size() int { + return xxx_messageInfo_GetAccessTokenRequest.Size(m) +} +func (m *GetAccessTokenRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetAccessTokenRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetAccessTokenRequest proto.InternalMessageInfo + +func (m *GetAccessTokenRequest) GetScope() []string { + if m != nil { + return m.Scope + } + return nil +} + +func (m *GetAccessTokenRequest) GetServiceAccountId() int64 { + if m != nil && m.ServiceAccountId != nil { + return *m.ServiceAccountId + } + return 0 +} + +func (m *GetAccessTokenRequest) GetServiceAccountName() string { + if m != nil && m.ServiceAccountName != nil { + return *m.ServiceAccountName + } + return "" +} + +type GetAccessTokenResponse struct { + AccessToken *string `protobuf:"bytes,1,opt,name=access_token,json=accessToken" json:"access_token,omitempty"` + ExpirationTime *int64 `protobuf:"varint,2,opt,name=expiration_time,json=expirationTime" json:"expiration_time,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetAccessTokenResponse) Reset() { *m = GetAccessTokenResponse{} } +func (m *GetAccessTokenResponse) String() string { return proto.CompactTextString(m) } +func (*GetAccessTokenResponse) ProtoMessage() {} +func (*GetAccessTokenResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{9} +} +func (m *GetAccessTokenResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetAccessTokenResponse.Unmarshal(m, b) +} +func (m *GetAccessTokenResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetAccessTokenResponse.Marshal(b, m, deterministic) +} +func (dst *GetAccessTokenResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetAccessTokenResponse.Merge(dst, src) +} +func (m *GetAccessTokenResponse) XXX_Size() int { + return xxx_messageInfo_GetAccessTokenResponse.Size(m) +} +func (m *GetAccessTokenResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetAccessTokenResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetAccessTokenResponse proto.InternalMessageInfo + +func (m *GetAccessTokenResponse) GetAccessToken() string { + if m != nil && m.AccessToken != nil { + return *m.AccessToken + } + return "" +} + +func (m *GetAccessTokenResponse) GetExpirationTime() int64 { + if m != nil && m.ExpirationTime != nil { + return *m.ExpirationTime + } + return 0 +} + +type GetDefaultGcsBucketNameRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetDefaultGcsBucketNameRequest) Reset() { *m = GetDefaultGcsBucketNameRequest{} } +func (m *GetDefaultGcsBucketNameRequest) String() string { return proto.CompactTextString(m) } +func (*GetDefaultGcsBucketNameRequest) ProtoMessage() {} +func (*GetDefaultGcsBucketNameRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{10} +} +func (m *GetDefaultGcsBucketNameRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetDefaultGcsBucketNameRequest.Unmarshal(m, b) +} +func (m *GetDefaultGcsBucketNameRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetDefaultGcsBucketNameRequest.Marshal(b, m, deterministic) +} +func (dst *GetDefaultGcsBucketNameRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetDefaultGcsBucketNameRequest.Merge(dst, src) +} +func (m *GetDefaultGcsBucketNameRequest) XXX_Size() int { + return xxx_messageInfo_GetDefaultGcsBucketNameRequest.Size(m) +} +func (m *GetDefaultGcsBucketNameRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetDefaultGcsBucketNameRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetDefaultGcsBucketNameRequest proto.InternalMessageInfo + +type GetDefaultGcsBucketNameResponse struct { + DefaultGcsBucketName *string `protobuf:"bytes,1,opt,name=default_gcs_bucket_name,json=defaultGcsBucketName" json:"default_gcs_bucket_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetDefaultGcsBucketNameResponse) Reset() { *m = GetDefaultGcsBucketNameResponse{} } +func (m *GetDefaultGcsBucketNameResponse) String() string { return proto.CompactTextString(m) } +func (*GetDefaultGcsBucketNameResponse) ProtoMessage() {} +func (*GetDefaultGcsBucketNameResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{11} +} +func (m *GetDefaultGcsBucketNameResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetDefaultGcsBucketNameResponse.Unmarshal(m, b) +} +func (m *GetDefaultGcsBucketNameResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetDefaultGcsBucketNameResponse.Marshal(b, m, deterministic) +} +func (dst *GetDefaultGcsBucketNameResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetDefaultGcsBucketNameResponse.Merge(dst, src) +} +func (m *GetDefaultGcsBucketNameResponse) XXX_Size() int { + return xxx_messageInfo_GetDefaultGcsBucketNameResponse.Size(m) +} +func (m *GetDefaultGcsBucketNameResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetDefaultGcsBucketNameResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetDefaultGcsBucketNameResponse proto.InternalMessageInfo + +func (m *GetDefaultGcsBucketNameResponse) GetDefaultGcsBucketName() string { + if m != nil && m.DefaultGcsBucketName != nil { + return *m.DefaultGcsBucketName + } + return "" +} + +func init() { + proto.RegisterType((*AppIdentityServiceError)(nil), "appengine.AppIdentityServiceError") + proto.RegisterType((*SignForAppRequest)(nil), "appengine.SignForAppRequest") + proto.RegisterType((*SignForAppResponse)(nil), "appengine.SignForAppResponse") + proto.RegisterType((*GetPublicCertificateForAppRequest)(nil), "appengine.GetPublicCertificateForAppRequest") + proto.RegisterType((*PublicCertificate)(nil), "appengine.PublicCertificate") + proto.RegisterType((*GetPublicCertificateForAppResponse)(nil), "appengine.GetPublicCertificateForAppResponse") + proto.RegisterType((*GetServiceAccountNameRequest)(nil), "appengine.GetServiceAccountNameRequest") + proto.RegisterType((*GetServiceAccountNameResponse)(nil), "appengine.GetServiceAccountNameResponse") + proto.RegisterType((*GetAccessTokenRequest)(nil), "appengine.GetAccessTokenRequest") + proto.RegisterType((*GetAccessTokenResponse)(nil), "appengine.GetAccessTokenResponse") + proto.RegisterType((*GetDefaultGcsBucketNameRequest)(nil), "appengine.GetDefaultGcsBucketNameRequest") + proto.RegisterType((*GetDefaultGcsBucketNameResponse)(nil), "appengine.GetDefaultGcsBucketNameResponse") +} + +func init() { + proto.RegisterFile("google.golang.org/appengine/internal/app_identity/app_identity_service.proto", fileDescriptor_app_identity_service_08a6e3f74b04cfa4) +} + +var fileDescriptor_app_identity_service_08a6e3f74b04cfa4 = []byte{ + // 676 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x54, 0xdb, 0x6e, 0xda, 0x58, + 0x14, 0x1d, 0x26, 0x1a, 0x31, 0x6c, 0x12, 0x62, 0xce, 0x90, 0xcb, 0x8c, 0x32, 0xb9, 0x78, 0x1e, + 0x26, 0x0f, 0x15, 0x89, 0x2a, 0x45, 0x55, 0x1f, 0x8d, 0xed, 0x22, 0x54, 0x07, 0x53, 0x43, 0x9a, + 0xa8, 0x2f, 0xa7, 0xce, 0x61, 0xc7, 0x3d, 0x02, 0x9f, 0xe3, 0xda, 0x87, 0x0a, 0x3e, 0xa2, 0x3f, + 0xd2, 0x9f, 0xe8, 0x5b, 0xbf, 0xa5, 0x17, 0xb5, 0xdf, 0x50, 0xd9, 0x38, 0x5c, 0x92, 0x92, 0x37, + 0xbc, 0xf6, 0x5a, 0xcb, 0x6b, 0x2f, 0x6d, 0x0c, 0x4e, 0x20, 0x65, 0x30, 0xc4, 0x7a, 0x20, 0x87, + 0xbe, 0x08, 0xea, 0x32, 0x0e, 0x4e, 0xfc, 0x28, 0x42, 0x11, 0x70, 0x81, 0x27, 0x5c, 0x28, 0x8c, + 0x85, 0x3f, 0x4c, 0x21, 0xca, 0xfb, 0x28, 0x14, 0x57, 0x93, 0xa5, 0x07, 0x9a, 0x60, 0xfc, 0x8e, + 0x33, 0xac, 0x47, 0xb1, 0x54, 0x92, 0x94, 0x66, 0x5a, 0xfd, 0x53, 0x01, 0x76, 0x8c, 0x28, 0x6a, + 0xe5, 0xc4, 0xee, 0x94, 0x67, 0xc7, 0xb1, 0x8c, 0xf5, 0x0f, 0x05, 0x28, 0x65, 0xbf, 0x4c, 0xd9, + 0x47, 0x52, 0x86, 0x62, 0xf7, 0xc2, 0x34, 0xed, 0x6e, 0x57, 0xfb, 0x8d, 0x54, 0x61, 0xe3, 0xa2, + 0xfd, 0xbc, 0xed, 0x5e, 0xb6, 0x69, 0xd7, 0x74, 0x3b, 0xb6, 0x56, 0x22, 0x7f, 0x41, 0xa5, 0xe1, + 0xb8, 0x0d, 0xda, 0x73, 0x5d, 0xea, 0x18, 0x5e, 0xd3, 0xd6, 0x3e, 0x17, 0xc9, 0x36, 0x54, 0x2d, + 0xdb, 0xb0, 0x9c, 0x56, 0xdb, 0xa6, 0xf6, 0x95, 0x69, 0xdb, 0x96, 0x6d, 0x69, 0x5f, 0x8a, 0xa4, + 0x06, 0x9b, 0x6d, 0xb7, 0x47, 0x0d, 0xfa, 0xd2, 0x70, 0x5a, 0x16, 0x35, 0x3a, 0x1d, 0xed, 0x6b, + 0x91, 0x90, 0xb9, 0xab, 0xed, 0x79, 0xae, 0xa7, 0x7d, 0x2b, 0x12, 0x0d, 0xca, 0x19, 0xd3, 0x71, + 0xdc, 0x4b, 0xdb, 0xd2, 0xbe, 0xcf, 0xb4, 0xad, 0xf3, 0x8e, 0x63, 0x9f, 0xdb, 0xed, 0x9e, 0x6d, + 0x69, 0x3f, 0x8a, 0xfa, 0x13, 0xa8, 0x76, 0x79, 0x20, 0x9e, 0xc9, 0xd8, 0x88, 0x22, 0x0f, 0xdf, + 0x8e, 0x30, 0x51, 0x44, 0x87, 0x8d, 0xeb, 0x89, 0xc2, 0x84, 0x2a, 0x49, 0x13, 0x1e, 0x88, 0xdd, + 0xc2, 0x61, 0xe1, 0x78, 0xdd, 0x2b, 0x67, 0x60, 0x4f, 0xa6, 0x02, 0xfd, 0x0a, 0xc8, 0xa2, 0x30, + 0x89, 0xa4, 0x48, 0x90, 0xfc, 0x0d, 0x7f, 0x0e, 0x70, 0x42, 0x85, 0x1f, 0x62, 0x26, 0x2a, 0x79, + 0xc5, 0x01, 0x4e, 0xda, 0x7e, 0x88, 0xe4, 0x7f, 0xd8, 0x4c, 0xbd, 0x7c, 0x35, 0x8a, 0x91, 0x66, + 0x4e, 0xbb, 0xbf, 0x67, 0xb6, 0x95, 0x19, 0xdc, 0x48, 0x51, 0xfd, 0x3f, 0x38, 0x6a, 0xa2, 0xea, + 0x8c, 0xae, 0x87, 0x9c, 0x99, 0x18, 0x2b, 0x7e, 0xc3, 0x99, 0xaf, 0x70, 0x29, 0xa2, 0xfe, 0x1a, + 0xaa, 0xf7, 0x18, 0x0f, 0xbd, 0xfd, 0x14, 0x6a, 0xe3, 0xb3, 0xd3, 0xa7, 0x94, 0xcd, 0xe9, 0x34, + 0xc2, 0x30, 0x8b, 0x50, 0xf2, 0x48, 0x3a, 0x5b, 0x70, 0xea, 0x60, 0xa8, 0x7f, 0x2c, 0x80, 0xfe, + 0x50, 0x8e, 0x7c, 0xe3, 0x1e, 0xec, 0x44, 0x19, 0x65, 0xc9, 0x7a, 0xc8, 0x13, 0xb5, 0x5b, 0x38, + 0x5c, 0x3b, 0x2e, 0x3f, 0xde, 0xab, 0xcf, 0xce, 0xa6, 0x7e, 0xcf, 0xcc, 0xdb, 0x8a, 0xee, 0x42, + 0x0e, 0x4f, 0x14, 0x31, 0xe1, 0x20, 0xf4, 0xc7, 0x94, 0x0d, 0x39, 0x0a, 0x45, 0x99, 0xcf, 0xde, + 0x20, 0x55, 0x3c, 0x44, 0xca, 0x05, 0x4d, 0x90, 0x49, 0xd1, 0xcf, 0x92, 0xaf, 0x79, 0xff, 0x84, + 0xfe, 0xd8, 0xcc, 0x58, 0x66, 0x4a, 0xea, 0xf1, 0x10, 0x5b, 0xa2, 0x9b, 0x31, 0xf4, 0x7d, 0xd8, + 0x6b, 0xa2, 0xca, 0x6f, 0xd3, 0x60, 0x4c, 0x8e, 0x84, 0x4a, 0xcb, 0xb8, 0xed, 0xf0, 0x05, 0xfc, + 0xbb, 0x62, 0x9e, 0xef, 0x76, 0x0a, 0xb5, 0xfc, 0x1f, 0x40, 0xfd, 0xe9, 0x78, 0xb1, 0x5b, 0x92, + 0xdc, 0x53, 0xea, 0xef, 0x0b, 0xb0, 0xd5, 0x44, 0x65, 0x30, 0x86, 0x49, 0xd2, 0x93, 0x03, 0x14, + 0xb7, 0x37, 0x55, 0x83, 0x3f, 0x12, 0x26, 0x23, 0xcc, 0x5a, 0x29, 0x79, 0xd3, 0x07, 0xf2, 0x08, + 0xc8, 0xdd, 0x37, 0xf0, 0xdb, 0xd5, 0xb4, 0x65, 0xff, 0x56, 0x7f, 0x65, 0x9e, 0xb5, 0x95, 0x79, + 0xfa, 0xb0, 0x7d, 0x37, 0x4e, 0xbe, 0xdb, 0x11, 0xac, 0xfb, 0x19, 0x4c, 0x55, 0x8a, 0xe7, 0x3b, + 0x95, 0xfd, 0x39, 0x35, 0xbd, 0x58, 0x1c, 0x47, 0x3c, 0xf6, 0x15, 0x97, 0x22, 0xab, 0x3f, 0x4f, + 0x56, 0x99, 0xc3, 0x69, 0xe1, 0xfa, 0x21, 0xec, 0x37, 0x51, 0x59, 0x78, 0xe3, 0x8f, 0x86, 0xaa, + 0xc9, 0x92, 0xc6, 0x88, 0x0d, 0x70, 0xa9, 0xea, 0x2b, 0x38, 0x58, 0xc9, 0xc8, 0x03, 0x9d, 0xc1, + 0x4e, 0x7f, 0x3a, 0xa7, 0x01, 0x4b, 0xe8, 0x75, 0xc6, 0x58, 0xec, 0xbb, 0xd6, 0xff, 0x85, 0xbc, + 0x51, 0x79, 0xb5, 0xbe, 0xf8, 0xc9, 0xfa, 0x19, 0x00, 0x00, 0xff, 0xff, 0x37, 0x4c, 0x56, 0x38, + 0xf3, 0x04, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/appengine/internal/base/api_base.pb.go b/vendor/google.golang.org/appengine/internal/base/api_base.pb.go new file mode 100644 index 000000000..db4777e68 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/base/api_base.pb.go @@ -0,0 +1,308 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google.golang.org/appengine/internal/base/api_base.proto + +package base + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type StringProto struct { + Value *string `protobuf:"bytes,1,req,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StringProto) Reset() { *m = StringProto{} } +func (m *StringProto) String() string { return proto.CompactTextString(m) } +func (*StringProto) ProtoMessage() {} +func (*StringProto) Descriptor() ([]byte, []int) { + return fileDescriptor_api_base_9d49f8792e0c1140, []int{0} +} +func (m *StringProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StringProto.Unmarshal(m, b) +} +func (m *StringProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StringProto.Marshal(b, m, deterministic) +} +func (dst *StringProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_StringProto.Merge(dst, src) +} +func (m *StringProto) XXX_Size() int { + return xxx_messageInfo_StringProto.Size(m) +} +func (m *StringProto) XXX_DiscardUnknown() { + xxx_messageInfo_StringProto.DiscardUnknown(m) +} + +var xxx_messageInfo_StringProto proto.InternalMessageInfo + +func (m *StringProto) GetValue() string { + if m != nil && m.Value != nil { + return *m.Value + } + return "" +} + +type Integer32Proto struct { + Value *int32 `protobuf:"varint,1,req,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Integer32Proto) Reset() { *m = Integer32Proto{} } +func (m *Integer32Proto) String() string { return proto.CompactTextString(m) } +func (*Integer32Proto) ProtoMessage() {} +func (*Integer32Proto) Descriptor() ([]byte, []int) { + return fileDescriptor_api_base_9d49f8792e0c1140, []int{1} +} +func (m *Integer32Proto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Integer32Proto.Unmarshal(m, b) +} +func (m *Integer32Proto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Integer32Proto.Marshal(b, m, deterministic) +} +func (dst *Integer32Proto) XXX_Merge(src proto.Message) { + xxx_messageInfo_Integer32Proto.Merge(dst, src) +} +func (m *Integer32Proto) XXX_Size() int { + return xxx_messageInfo_Integer32Proto.Size(m) +} +func (m *Integer32Proto) XXX_DiscardUnknown() { + xxx_messageInfo_Integer32Proto.DiscardUnknown(m) +} + +var xxx_messageInfo_Integer32Proto proto.InternalMessageInfo + +func (m *Integer32Proto) GetValue() int32 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type Integer64Proto struct { + Value *int64 `protobuf:"varint,1,req,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Integer64Proto) Reset() { *m = Integer64Proto{} } +func (m *Integer64Proto) String() string { return proto.CompactTextString(m) } +func (*Integer64Proto) ProtoMessage() {} +func (*Integer64Proto) Descriptor() ([]byte, []int) { + return fileDescriptor_api_base_9d49f8792e0c1140, []int{2} +} +func (m *Integer64Proto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Integer64Proto.Unmarshal(m, b) +} +func (m *Integer64Proto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Integer64Proto.Marshal(b, m, deterministic) +} +func (dst *Integer64Proto) XXX_Merge(src proto.Message) { + xxx_messageInfo_Integer64Proto.Merge(dst, src) +} +func (m *Integer64Proto) XXX_Size() int { + return xxx_messageInfo_Integer64Proto.Size(m) +} +func (m *Integer64Proto) XXX_DiscardUnknown() { + xxx_messageInfo_Integer64Proto.DiscardUnknown(m) +} + +var xxx_messageInfo_Integer64Proto proto.InternalMessageInfo + +func (m *Integer64Proto) GetValue() int64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type BoolProto struct { + Value *bool `protobuf:"varint,1,req,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BoolProto) Reset() { *m = BoolProto{} } +func (m *BoolProto) String() string { return proto.CompactTextString(m) } +func (*BoolProto) ProtoMessage() {} +func (*BoolProto) Descriptor() ([]byte, []int) { + return fileDescriptor_api_base_9d49f8792e0c1140, []int{3} +} +func (m *BoolProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BoolProto.Unmarshal(m, b) +} +func (m *BoolProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BoolProto.Marshal(b, m, deterministic) +} +func (dst *BoolProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_BoolProto.Merge(dst, src) +} +func (m *BoolProto) XXX_Size() int { + return xxx_messageInfo_BoolProto.Size(m) +} +func (m *BoolProto) XXX_DiscardUnknown() { + xxx_messageInfo_BoolProto.DiscardUnknown(m) +} + +var xxx_messageInfo_BoolProto proto.InternalMessageInfo + +func (m *BoolProto) GetValue() bool { + if m != nil && m.Value != nil { + return *m.Value + } + return false +} + +type DoubleProto struct { + Value *float64 `protobuf:"fixed64,1,req,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DoubleProto) Reset() { *m = DoubleProto{} } +func (m *DoubleProto) String() string { return proto.CompactTextString(m) } +func (*DoubleProto) ProtoMessage() {} +func (*DoubleProto) Descriptor() ([]byte, []int) { + return fileDescriptor_api_base_9d49f8792e0c1140, []int{4} +} +func (m *DoubleProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DoubleProto.Unmarshal(m, b) +} +func (m *DoubleProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DoubleProto.Marshal(b, m, deterministic) +} +func (dst *DoubleProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_DoubleProto.Merge(dst, src) +} +func (m *DoubleProto) XXX_Size() int { + return xxx_messageInfo_DoubleProto.Size(m) +} +func (m *DoubleProto) XXX_DiscardUnknown() { + xxx_messageInfo_DoubleProto.DiscardUnknown(m) +} + +var xxx_messageInfo_DoubleProto proto.InternalMessageInfo + +func (m *DoubleProto) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type BytesProto struct { + Value []byte `protobuf:"bytes,1,req,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BytesProto) Reset() { *m = BytesProto{} } +func (m *BytesProto) String() string { return proto.CompactTextString(m) } +func (*BytesProto) ProtoMessage() {} +func (*BytesProto) Descriptor() ([]byte, []int) { + return fileDescriptor_api_base_9d49f8792e0c1140, []int{5} +} +func (m *BytesProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BytesProto.Unmarshal(m, b) +} +func (m *BytesProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BytesProto.Marshal(b, m, deterministic) +} +func (dst *BytesProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_BytesProto.Merge(dst, src) +} +func (m *BytesProto) XXX_Size() int { + return xxx_messageInfo_BytesProto.Size(m) +} +func (m *BytesProto) XXX_DiscardUnknown() { + xxx_messageInfo_BytesProto.DiscardUnknown(m) +} + +var xxx_messageInfo_BytesProto proto.InternalMessageInfo + +func (m *BytesProto) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +type VoidProto struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VoidProto) Reset() { *m = VoidProto{} } +func (m *VoidProto) String() string { return proto.CompactTextString(m) } +func (*VoidProto) ProtoMessage() {} +func (*VoidProto) Descriptor() ([]byte, []int) { + return fileDescriptor_api_base_9d49f8792e0c1140, []int{6} +} +func (m *VoidProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VoidProto.Unmarshal(m, b) +} +func (m *VoidProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VoidProto.Marshal(b, m, deterministic) +} +func (dst *VoidProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_VoidProto.Merge(dst, src) +} +func (m *VoidProto) XXX_Size() int { + return xxx_messageInfo_VoidProto.Size(m) +} +func (m *VoidProto) XXX_DiscardUnknown() { + xxx_messageInfo_VoidProto.DiscardUnknown(m) +} + +var xxx_messageInfo_VoidProto proto.InternalMessageInfo + +func init() { + proto.RegisterType((*StringProto)(nil), "appengine.base.StringProto") + proto.RegisterType((*Integer32Proto)(nil), "appengine.base.Integer32Proto") + proto.RegisterType((*Integer64Proto)(nil), "appengine.base.Integer64Proto") + proto.RegisterType((*BoolProto)(nil), "appengine.base.BoolProto") + proto.RegisterType((*DoubleProto)(nil), "appengine.base.DoubleProto") + proto.RegisterType((*BytesProto)(nil), "appengine.base.BytesProto") + proto.RegisterType((*VoidProto)(nil), "appengine.base.VoidProto") +} + +func init() { + proto.RegisterFile("google.golang.org/appengine/internal/base/api_base.proto", fileDescriptor_api_base_9d49f8792e0c1140) +} + +var fileDescriptor_api_base_9d49f8792e0c1140 = []byte{ + // 199 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0xcf, 0x3f, 0x4b, 0xc6, 0x30, + 0x10, 0x06, 0x70, 0x5a, 0xad, 0xb4, 0x57, 0xe9, 0x20, 0x0e, 0x1d, 0xb5, 0x05, 0x71, 0x4a, 0x40, + 0x45, 0x9c, 0x83, 0x8b, 0x9b, 0x28, 0x38, 0xb8, 0x48, 0x8a, 0xc7, 0x11, 0x08, 0xb9, 0x90, 0xa6, + 0x82, 0xdf, 0x5e, 0xda, 0xd2, 0xfa, 0xc2, 0x9b, 0xed, 0xfe, 0xfc, 0xe0, 0xe1, 0x81, 0x27, 0x62, + 0x26, 0x8b, 0x82, 0xd8, 0x6a, 0x47, 0x82, 0x03, 0x49, 0xed, 0x3d, 0x3a, 0x32, 0x0e, 0xa5, 0x71, + 0x11, 0x83, 0xd3, 0x56, 0x0e, 0x7a, 0x44, 0xa9, 0xbd, 0xf9, 0x9a, 0x07, 0xe1, 0x03, 0x47, 0xbe, + 0x68, 0x76, 0x27, 0xe6, 0x6b, 0xd7, 0x43, 0xfd, 0x1e, 0x83, 0x71, 0xf4, 0xba, 0xbc, 0x2f, 0xa1, + 0xf8, 0xd1, 0x76, 0xc2, 0x36, 0xbb, 0xca, 0x6f, 0xab, 0xb7, 0x75, 0xe9, 0x6e, 0xa0, 0x79, 0x71, + 0x11, 0x09, 0xc3, 0xfd, 0x5d, 0xc2, 0x15, 0xc7, 0xee, 0xf1, 0x21, 0xe1, 0x4e, 0x36, 0x77, 0x0d, + 0x95, 0x62, 0xb6, 0x09, 0x52, 0x6e, 0xa4, 0x87, 0xfa, 0x99, 0xa7, 0xc1, 0x62, 0x02, 0x65, 0xff, + 0x79, 0xa0, 0x7e, 0x23, 0x8e, 0xab, 0x69, 0x0f, 0xcd, 0xb9, 0xca, 0xcb, 0xdd, 0xd5, 0x50, 0x7d, + 0xb0, 0xf9, 0x5e, 0x98, 0x3a, 0xfb, 0x3c, 0x9d, 0x9b, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0xba, + 0x37, 0x25, 0xea, 0x44, 0x01, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go new file mode 100644 index 000000000..2fb748289 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go @@ -0,0 +1,4367 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google.golang.org/appengine/internal/datastore/datastore_v3.proto + +package datastore + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Property_Meaning int32 + +const ( + Property_NO_MEANING Property_Meaning = 0 + Property_BLOB Property_Meaning = 14 + Property_TEXT Property_Meaning = 15 + Property_BYTESTRING Property_Meaning = 16 + Property_ATOM_CATEGORY Property_Meaning = 1 + Property_ATOM_LINK Property_Meaning = 2 + Property_ATOM_TITLE Property_Meaning = 3 + Property_ATOM_CONTENT Property_Meaning = 4 + Property_ATOM_SUMMARY Property_Meaning = 5 + Property_ATOM_AUTHOR Property_Meaning = 6 + Property_GD_WHEN Property_Meaning = 7 + Property_GD_EMAIL Property_Meaning = 8 + Property_GEORSS_POINT Property_Meaning = 9 + Property_GD_IM Property_Meaning = 10 + Property_GD_PHONENUMBER Property_Meaning = 11 + Property_GD_POSTALADDRESS Property_Meaning = 12 + Property_GD_RATING Property_Meaning = 13 + Property_BLOBKEY Property_Meaning = 17 + Property_ENTITY_PROTO Property_Meaning = 19 + Property_INDEX_VALUE Property_Meaning = 18 +) + +var Property_Meaning_name = map[int32]string{ + 0: "NO_MEANING", + 14: "BLOB", + 15: "TEXT", + 16: "BYTESTRING", + 1: "ATOM_CATEGORY", + 2: "ATOM_LINK", + 3: "ATOM_TITLE", + 4: "ATOM_CONTENT", + 5: "ATOM_SUMMARY", + 6: "ATOM_AUTHOR", + 7: "GD_WHEN", + 8: "GD_EMAIL", + 9: "GEORSS_POINT", + 10: "GD_IM", + 11: "GD_PHONENUMBER", + 12: "GD_POSTALADDRESS", + 13: "GD_RATING", + 17: "BLOBKEY", + 19: "ENTITY_PROTO", + 18: "INDEX_VALUE", +} +var Property_Meaning_value = map[string]int32{ + "NO_MEANING": 0, + "BLOB": 14, + "TEXT": 15, + "BYTESTRING": 16, + "ATOM_CATEGORY": 1, + "ATOM_LINK": 2, + "ATOM_TITLE": 3, + "ATOM_CONTENT": 4, + "ATOM_SUMMARY": 5, + "ATOM_AUTHOR": 6, + "GD_WHEN": 7, + "GD_EMAIL": 8, + "GEORSS_POINT": 9, + "GD_IM": 10, + "GD_PHONENUMBER": 11, + "GD_POSTALADDRESS": 12, + "GD_RATING": 13, + "BLOBKEY": 17, + "ENTITY_PROTO": 19, + "INDEX_VALUE": 18, +} + +func (x Property_Meaning) Enum() *Property_Meaning { + p := new(Property_Meaning) + *p = x + return p +} +func (x Property_Meaning) String() string { + return proto.EnumName(Property_Meaning_name, int32(x)) +} +func (x *Property_Meaning) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Property_Meaning_value, data, "Property_Meaning") + if err != nil { + return err + } + *x = Property_Meaning(value) + return nil +} +func (Property_Meaning) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{2, 0} +} + +type Property_FtsTokenizationOption int32 + +const ( + Property_HTML Property_FtsTokenizationOption = 1 + Property_ATOM Property_FtsTokenizationOption = 2 +) + +var Property_FtsTokenizationOption_name = map[int32]string{ + 1: "HTML", + 2: "ATOM", +} +var Property_FtsTokenizationOption_value = map[string]int32{ + "HTML": 1, + "ATOM": 2, +} + +func (x Property_FtsTokenizationOption) Enum() *Property_FtsTokenizationOption { + p := new(Property_FtsTokenizationOption) + *p = x + return p +} +func (x Property_FtsTokenizationOption) String() string { + return proto.EnumName(Property_FtsTokenizationOption_name, int32(x)) +} +func (x *Property_FtsTokenizationOption) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Property_FtsTokenizationOption_value, data, "Property_FtsTokenizationOption") + if err != nil { + return err + } + *x = Property_FtsTokenizationOption(value) + return nil +} +func (Property_FtsTokenizationOption) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{2, 1} +} + +type EntityProto_Kind int32 + +const ( + EntityProto_GD_CONTACT EntityProto_Kind = 1 + EntityProto_GD_EVENT EntityProto_Kind = 2 + EntityProto_GD_MESSAGE EntityProto_Kind = 3 +) + +var EntityProto_Kind_name = map[int32]string{ + 1: "GD_CONTACT", + 2: "GD_EVENT", + 3: "GD_MESSAGE", +} +var EntityProto_Kind_value = map[string]int32{ + "GD_CONTACT": 1, + "GD_EVENT": 2, + "GD_MESSAGE": 3, +} + +func (x EntityProto_Kind) Enum() *EntityProto_Kind { + p := new(EntityProto_Kind) + *p = x + return p +} +func (x EntityProto_Kind) String() string { + return proto.EnumName(EntityProto_Kind_name, int32(x)) +} +func (x *EntityProto_Kind) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(EntityProto_Kind_value, data, "EntityProto_Kind") + if err != nil { + return err + } + *x = EntityProto_Kind(value) + return nil +} +func (EntityProto_Kind) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{6, 0} +} + +type Index_Property_Direction int32 + +const ( + Index_Property_ASCENDING Index_Property_Direction = 1 + Index_Property_DESCENDING Index_Property_Direction = 2 +) + +var Index_Property_Direction_name = map[int32]string{ + 1: "ASCENDING", + 2: "DESCENDING", +} +var Index_Property_Direction_value = map[string]int32{ + "ASCENDING": 1, + "DESCENDING": 2, +} + +func (x Index_Property_Direction) Enum() *Index_Property_Direction { + p := new(Index_Property_Direction) + *p = x + return p +} +func (x Index_Property_Direction) String() string { + return proto.EnumName(Index_Property_Direction_name, int32(x)) +} +func (x *Index_Property_Direction) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Index_Property_Direction_value, data, "Index_Property_Direction") + if err != nil { + return err + } + *x = Index_Property_Direction(value) + return nil +} +func (Index_Property_Direction) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{8, 0, 0} +} + +type CompositeIndex_State int32 + +const ( + CompositeIndex_WRITE_ONLY CompositeIndex_State = 1 + CompositeIndex_READ_WRITE CompositeIndex_State = 2 + CompositeIndex_DELETED CompositeIndex_State = 3 + CompositeIndex_ERROR CompositeIndex_State = 4 +) + +var CompositeIndex_State_name = map[int32]string{ + 1: "WRITE_ONLY", + 2: "READ_WRITE", + 3: "DELETED", + 4: "ERROR", +} +var CompositeIndex_State_value = map[string]int32{ + "WRITE_ONLY": 1, + "READ_WRITE": 2, + "DELETED": 3, + "ERROR": 4, +} + +func (x CompositeIndex_State) Enum() *CompositeIndex_State { + p := new(CompositeIndex_State) + *p = x + return p +} +func (x CompositeIndex_State) String() string { + return proto.EnumName(CompositeIndex_State_name, int32(x)) +} +func (x *CompositeIndex_State) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(CompositeIndex_State_value, data, "CompositeIndex_State") + if err != nil { + return err + } + *x = CompositeIndex_State(value) + return nil +} +func (CompositeIndex_State) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{9, 0} +} + +type Snapshot_Status int32 + +const ( + Snapshot_INACTIVE Snapshot_Status = 0 + Snapshot_ACTIVE Snapshot_Status = 1 +) + +var Snapshot_Status_name = map[int32]string{ + 0: "INACTIVE", + 1: "ACTIVE", +} +var Snapshot_Status_value = map[string]int32{ + "INACTIVE": 0, + "ACTIVE": 1, +} + +func (x Snapshot_Status) Enum() *Snapshot_Status { + p := new(Snapshot_Status) + *p = x + return p +} +func (x Snapshot_Status) String() string { + return proto.EnumName(Snapshot_Status_name, int32(x)) +} +func (x *Snapshot_Status) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Snapshot_Status_value, data, "Snapshot_Status") + if err != nil { + return err + } + *x = Snapshot_Status(value) + return nil +} +func (Snapshot_Status) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{12, 0} +} + +type Query_Hint int32 + +const ( + Query_ORDER_FIRST Query_Hint = 1 + Query_ANCESTOR_FIRST Query_Hint = 2 + Query_FILTER_FIRST Query_Hint = 3 +) + +var Query_Hint_name = map[int32]string{ + 1: "ORDER_FIRST", + 2: "ANCESTOR_FIRST", + 3: "FILTER_FIRST", +} +var Query_Hint_value = map[string]int32{ + "ORDER_FIRST": 1, + "ANCESTOR_FIRST": 2, + "FILTER_FIRST": 3, +} + +func (x Query_Hint) Enum() *Query_Hint { + p := new(Query_Hint) + *p = x + return p +} +func (x Query_Hint) String() string { + return proto.EnumName(Query_Hint_name, int32(x)) +} +func (x *Query_Hint) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Query_Hint_value, data, "Query_Hint") + if err != nil { + return err + } + *x = Query_Hint(value) + return nil +} +func (Query_Hint) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15, 0} +} + +type Query_Filter_Operator int32 + +const ( + Query_Filter_LESS_THAN Query_Filter_Operator = 1 + Query_Filter_LESS_THAN_OR_EQUAL Query_Filter_Operator = 2 + Query_Filter_GREATER_THAN Query_Filter_Operator = 3 + Query_Filter_GREATER_THAN_OR_EQUAL Query_Filter_Operator = 4 + Query_Filter_EQUAL Query_Filter_Operator = 5 + Query_Filter_IN Query_Filter_Operator = 6 + Query_Filter_EXISTS Query_Filter_Operator = 7 +) + +var Query_Filter_Operator_name = map[int32]string{ + 1: "LESS_THAN", + 2: "LESS_THAN_OR_EQUAL", + 3: "GREATER_THAN", + 4: "GREATER_THAN_OR_EQUAL", + 5: "EQUAL", + 6: "IN", + 7: "EXISTS", +} +var Query_Filter_Operator_value = map[string]int32{ + "LESS_THAN": 1, + "LESS_THAN_OR_EQUAL": 2, + "GREATER_THAN": 3, + "GREATER_THAN_OR_EQUAL": 4, + "EQUAL": 5, + "IN": 6, + "EXISTS": 7, +} + +func (x Query_Filter_Operator) Enum() *Query_Filter_Operator { + p := new(Query_Filter_Operator) + *p = x + return p +} +func (x Query_Filter_Operator) String() string { + return proto.EnumName(Query_Filter_Operator_name, int32(x)) +} +func (x *Query_Filter_Operator) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Query_Filter_Operator_value, data, "Query_Filter_Operator") + if err != nil { + return err + } + *x = Query_Filter_Operator(value) + return nil +} +func (Query_Filter_Operator) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15, 0, 0} +} + +type Query_Order_Direction int32 + +const ( + Query_Order_ASCENDING Query_Order_Direction = 1 + Query_Order_DESCENDING Query_Order_Direction = 2 +) + +var Query_Order_Direction_name = map[int32]string{ + 1: "ASCENDING", + 2: "DESCENDING", +} +var Query_Order_Direction_value = map[string]int32{ + "ASCENDING": 1, + "DESCENDING": 2, +} + +func (x Query_Order_Direction) Enum() *Query_Order_Direction { + p := new(Query_Order_Direction) + *p = x + return p +} +func (x Query_Order_Direction) String() string { + return proto.EnumName(Query_Order_Direction_name, int32(x)) +} +func (x *Query_Order_Direction) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Query_Order_Direction_value, data, "Query_Order_Direction") + if err != nil { + return err + } + *x = Query_Order_Direction(value) + return nil +} +func (Query_Order_Direction) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15, 1, 0} +} + +type Error_ErrorCode int32 + +const ( + Error_BAD_REQUEST Error_ErrorCode = 1 + Error_CONCURRENT_TRANSACTION Error_ErrorCode = 2 + Error_INTERNAL_ERROR Error_ErrorCode = 3 + Error_NEED_INDEX Error_ErrorCode = 4 + Error_TIMEOUT Error_ErrorCode = 5 + Error_PERMISSION_DENIED Error_ErrorCode = 6 + Error_BIGTABLE_ERROR Error_ErrorCode = 7 + Error_COMMITTED_BUT_STILL_APPLYING Error_ErrorCode = 8 + Error_CAPABILITY_DISABLED Error_ErrorCode = 9 + Error_TRY_ALTERNATE_BACKEND Error_ErrorCode = 10 + Error_SAFE_TIME_TOO_OLD Error_ErrorCode = 11 +) + +var Error_ErrorCode_name = map[int32]string{ + 1: "BAD_REQUEST", + 2: "CONCURRENT_TRANSACTION", + 3: "INTERNAL_ERROR", + 4: "NEED_INDEX", + 5: "TIMEOUT", + 6: "PERMISSION_DENIED", + 7: "BIGTABLE_ERROR", + 8: "COMMITTED_BUT_STILL_APPLYING", + 9: "CAPABILITY_DISABLED", + 10: "TRY_ALTERNATE_BACKEND", + 11: "SAFE_TIME_TOO_OLD", +} +var Error_ErrorCode_value = map[string]int32{ + "BAD_REQUEST": 1, + "CONCURRENT_TRANSACTION": 2, + "INTERNAL_ERROR": 3, + "NEED_INDEX": 4, + "TIMEOUT": 5, + "PERMISSION_DENIED": 6, + "BIGTABLE_ERROR": 7, + "COMMITTED_BUT_STILL_APPLYING": 8, + "CAPABILITY_DISABLED": 9, + "TRY_ALTERNATE_BACKEND": 10, + "SAFE_TIME_TOO_OLD": 11, +} + +func (x Error_ErrorCode) Enum() *Error_ErrorCode { + p := new(Error_ErrorCode) + *p = x + return p +} +func (x Error_ErrorCode) String() string { + return proto.EnumName(Error_ErrorCode_name, int32(x)) +} +func (x *Error_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Error_ErrorCode_value, data, "Error_ErrorCode") + if err != nil { + return err + } + *x = Error_ErrorCode(value) + return nil +} +func (Error_ErrorCode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{19, 0} +} + +type PutRequest_AutoIdPolicy int32 + +const ( + PutRequest_CURRENT PutRequest_AutoIdPolicy = 0 + PutRequest_SEQUENTIAL PutRequest_AutoIdPolicy = 1 +) + +var PutRequest_AutoIdPolicy_name = map[int32]string{ + 0: "CURRENT", + 1: "SEQUENTIAL", +} +var PutRequest_AutoIdPolicy_value = map[string]int32{ + "CURRENT": 0, + "SEQUENTIAL": 1, +} + +func (x PutRequest_AutoIdPolicy) Enum() *PutRequest_AutoIdPolicy { + p := new(PutRequest_AutoIdPolicy) + *p = x + return p +} +func (x PutRequest_AutoIdPolicy) String() string { + return proto.EnumName(PutRequest_AutoIdPolicy_name, int32(x)) +} +func (x *PutRequest_AutoIdPolicy) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(PutRequest_AutoIdPolicy_value, data, "PutRequest_AutoIdPolicy") + if err != nil { + return err + } + *x = PutRequest_AutoIdPolicy(value) + return nil +} +func (PutRequest_AutoIdPolicy) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{23, 0} +} + +type BeginTransactionRequest_TransactionMode int32 + +const ( + BeginTransactionRequest_UNKNOWN BeginTransactionRequest_TransactionMode = 0 + BeginTransactionRequest_READ_ONLY BeginTransactionRequest_TransactionMode = 1 + BeginTransactionRequest_READ_WRITE BeginTransactionRequest_TransactionMode = 2 +) + +var BeginTransactionRequest_TransactionMode_name = map[int32]string{ + 0: "UNKNOWN", + 1: "READ_ONLY", + 2: "READ_WRITE", +} +var BeginTransactionRequest_TransactionMode_value = map[string]int32{ + "UNKNOWN": 0, + "READ_ONLY": 1, + "READ_WRITE": 2, +} + +func (x BeginTransactionRequest_TransactionMode) Enum() *BeginTransactionRequest_TransactionMode { + p := new(BeginTransactionRequest_TransactionMode) + *p = x + return p +} +func (x BeginTransactionRequest_TransactionMode) String() string { + return proto.EnumName(BeginTransactionRequest_TransactionMode_name, int32(x)) +} +func (x *BeginTransactionRequest_TransactionMode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(BeginTransactionRequest_TransactionMode_value, data, "BeginTransactionRequest_TransactionMode") + if err != nil { + return err + } + *x = BeginTransactionRequest_TransactionMode(value) + return nil +} +func (BeginTransactionRequest_TransactionMode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{36, 0} +} + +type Action struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Action) Reset() { *m = Action{} } +func (m *Action) String() string { return proto.CompactTextString(m) } +func (*Action) ProtoMessage() {} +func (*Action) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{0} +} +func (m *Action) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Action.Unmarshal(m, b) +} +func (m *Action) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Action.Marshal(b, m, deterministic) +} +func (dst *Action) XXX_Merge(src proto.Message) { + xxx_messageInfo_Action.Merge(dst, src) +} +func (m *Action) XXX_Size() int { + return xxx_messageInfo_Action.Size(m) +} +func (m *Action) XXX_DiscardUnknown() { + xxx_messageInfo_Action.DiscardUnknown(m) +} + +var xxx_messageInfo_Action proto.InternalMessageInfo + +type PropertyValue struct { + Int64Value *int64 `protobuf:"varint,1,opt,name=int64Value" json:"int64Value,omitempty"` + BooleanValue *bool `protobuf:"varint,2,opt,name=booleanValue" json:"booleanValue,omitempty"` + StringValue *string `protobuf:"bytes,3,opt,name=stringValue" json:"stringValue,omitempty"` + DoubleValue *float64 `protobuf:"fixed64,4,opt,name=doubleValue" json:"doubleValue,omitempty"` + Pointvalue *PropertyValue_PointValue `protobuf:"group,5,opt,name=PointValue,json=pointvalue" json:"pointvalue,omitempty"` + Uservalue *PropertyValue_UserValue `protobuf:"group,8,opt,name=UserValue,json=uservalue" json:"uservalue,omitempty"` + Referencevalue *PropertyValue_ReferenceValue `protobuf:"group,12,opt,name=ReferenceValue,json=referencevalue" json:"referencevalue,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PropertyValue) Reset() { *m = PropertyValue{} } +func (m *PropertyValue) String() string { return proto.CompactTextString(m) } +func (*PropertyValue) ProtoMessage() {} +func (*PropertyValue) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{1} +} +func (m *PropertyValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PropertyValue.Unmarshal(m, b) +} +func (m *PropertyValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PropertyValue.Marshal(b, m, deterministic) +} +func (dst *PropertyValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_PropertyValue.Merge(dst, src) +} +func (m *PropertyValue) XXX_Size() int { + return xxx_messageInfo_PropertyValue.Size(m) +} +func (m *PropertyValue) XXX_DiscardUnknown() { + xxx_messageInfo_PropertyValue.DiscardUnknown(m) +} + +var xxx_messageInfo_PropertyValue proto.InternalMessageInfo + +func (m *PropertyValue) GetInt64Value() int64 { + if m != nil && m.Int64Value != nil { + return *m.Int64Value + } + return 0 +} + +func (m *PropertyValue) GetBooleanValue() bool { + if m != nil && m.BooleanValue != nil { + return *m.BooleanValue + } + return false +} + +func (m *PropertyValue) GetStringValue() string { + if m != nil && m.StringValue != nil { + return *m.StringValue + } + return "" +} + +func (m *PropertyValue) GetDoubleValue() float64 { + if m != nil && m.DoubleValue != nil { + return *m.DoubleValue + } + return 0 +} + +func (m *PropertyValue) GetPointvalue() *PropertyValue_PointValue { + if m != nil { + return m.Pointvalue + } + return nil +} + +func (m *PropertyValue) GetUservalue() *PropertyValue_UserValue { + if m != nil { + return m.Uservalue + } + return nil +} + +func (m *PropertyValue) GetReferencevalue() *PropertyValue_ReferenceValue { + if m != nil { + return m.Referencevalue + } + return nil +} + +type PropertyValue_PointValue struct { + X *float64 `protobuf:"fixed64,6,req,name=x" json:"x,omitempty"` + Y *float64 `protobuf:"fixed64,7,req,name=y" json:"y,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PropertyValue_PointValue) Reset() { *m = PropertyValue_PointValue{} } +func (m *PropertyValue_PointValue) String() string { return proto.CompactTextString(m) } +func (*PropertyValue_PointValue) ProtoMessage() {} +func (*PropertyValue_PointValue) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{1, 0} +} +func (m *PropertyValue_PointValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PropertyValue_PointValue.Unmarshal(m, b) +} +func (m *PropertyValue_PointValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PropertyValue_PointValue.Marshal(b, m, deterministic) +} +func (dst *PropertyValue_PointValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_PropertyValue_PointValue.Merge(dst, src) +} +func (m *PropertyValue_PointValue) XXX_Size() int { + return xxx_messageInfo_PropertyValue_PointValue.Size(m) +} +func (m *PropertyValue_PointValue) XXX_DiscardUnknown() { + xxx_messageInfo_PropertyValue_PointValue.DiscardUnknown(m) +} + +var xxx_messageInfo_PropertyValue_PointValue proto.InternalMessageInfo + +func (m *PropertyValue_PointValue) GetX() float64 { + if m != nil && m.X != nil { + return *m.X + } + return 0 +} + +func (m *PropertyValue_PointValue) GetY() float64 { + if m != nil && m.Y != nil { + return *m.Y + } + return 0 +} + +type PropertyValue_UserValue struct { + Email *string `protobuf:"bytes,9,req,name=email" json:"email,omitempty"` + AuthDomain *string `protobuf:"bytes,10,req,name=auth_domain,json=authDomain" json:"auth_domain,omitempty"` + Nickname *string `protobuf:"bytes,11,opt,name=nickname" json:"nickname,omitempty"` + FederatedIdentity *string `protobuf:"bytes,21,opt,name=federated_identity,json=federatedIdentity" json:"federated_identity,omitempty"` + FederatedProvider *string `protobuf:"bytes,22,opt,name=federated_provider,json=federatedProvider" json:"federated_provider,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PropertyValue_UserValue) Reset() { *m = PropertyValue_UserValue{} } +func (m *PropertyValue_UserValue) String() string { return proto.CompactTextString(m) } +func (*PropertyValue_UserValue) ProtoMessage() {} +func (*PropertyValue_UserValue) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{1, 1} +} +func (m *PropertyValue_UserValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PropertyValue_UserValue.Unmarshal(m, b) +} +func (m *PropertyValue_UserValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PropertyValue_UserValue.Marshal(b, m, deterministic) +} +func (dst *PropertyValue_UserValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_PropertyValue_UserValue.Merge(dst, src) +} +func (m *PropertyValue_UserValue) XXX_Size() int { + return xxx_messageInfo_PropertyValue_UserValue.Size(m) +} +func (m *PropertyValue_UserValue) XXX_DiscardUnknown() { + xxx_messageInfo_PropertyValue_UserValue.DiscardUnknown(m) +} + +var xxx_messageInfo_PropertyValue_UserValue proto.InternalMessageInfo + +func (m *PropertyValue_UserValue) GetEmail() string { + if m != nil && m.Email != nil { + return *m.Email + } + return "" +} + +func (m *PropertyValue_UserValue) GetAuthDomain() string { + if m != nil && m.AuthDomain != nil { + return *m.AuthDomain + } + return "" +} + +func (m *PropertyValue_UserValue) GetNickname() string { + if m != nil && m.Nickname != nil { + return *m.Nickname + } + return "" +} + +func (m *PropertyValue_UserValue) GetFederatedIdentity() string { + if m != nil && m.FederatedIdentity != nil { + return *m.FederatedIdentity + } + return "" +} + +func (m *PropertyValue_UserValue) GetFederatedProvider() string { + if m != nil && m.FederatedProvider != nil { + return *m.FederatedProvider + } + return "" +} + +type PropertyValue_ReferenceValue struct { + App *string `protobuf:"bytes,13,req,name=app" json:"app,omitempty"` + NameSpace *string `protobuf:"bytes,20,opt,name=name_space,json=nameSpace" json:"name_space,omitempty"` + Pathelement []*PropertyValue_ReferenceValue_PathElement `protobuf:"group,14,rep,name=PathElement,json=pathelement" json:"pathelement,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PropertyValue_ReferenceValue) Reset() { *m = PropertyValue_ReferenceValue{} } +func (m *PropertyValue_ReferenceValue) String() string { return proto.CompactTextString(m) } +func (*PropertyValue_ReferenceValue) ProtoMessage() {} +func (*PropertyValue_ReferenceValue) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{1, 2} +} +func (m *PropertyValue_ReferenceValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PropertyValue_ReferenceValue.Unmarshal(m, b) +} +func (m *PropertyValue_ReferenceValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PropertyValue_ReferenceValue.Marshal(b, m, deterministic) +} +func (dst *PropertyValue_ReferenceValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_PropertyValue_ReferenceValue.Merge(dst, src) +} +func (m *PropertyValue_ReferenceValue) XXX_Size() int { + return xxx_messageInfo_PropertyValue_ReferenceValue.Size(m) +} +func (m *PropertyValue_ReferenceValue) XXX_DiscardUnknown() { + xxx_messageInfo_PropertyValue_ReferenceValue.DiscardUnknown(m) +} + +var xxx_messageInfo_PropertyValue_ReferenceValue proto.InternalMessageInfo + +func (m *PropertyValue_ReferenceValue) GetApp() string { + if m != nil && m.App != nil { + return *m.App + } + return "" +} + +func (m *PropertyValue_ReferenceValue) GetNameSpace() string { + if m != nil && m.NameSpace != nil { + return *m.NameSpace + } + return "" +} + +func (m *PropertyValue_ReferenceValue) GetPathelement() []*PropertyValue_ReferenceValue_PathElement { + if m != nil { + return m.Pathelement + } + return nil +} + +type PropertyValue_ReferenceValue_PathElement struct { + Type *string `protobuf:"bytes,15,req,name=type" json:"type,omitempty"` + Id *int64 `protobuf:"varint,16,opt,name=id" json:"id,omitempty"` + Name *string `protobuf:"bytes,17,opt,name=name" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PropertyValue_ReferenceValue_PathElement) Reset() { + *m = PropertyValue_ReferenceValue_PathElement{} +} +func (m *PropertyValue_ReferenceValue_PathElement) String() string { return proto.CompactTextString(m) } +func (*PropertyValue_ReferenceValue_PathElement) ProtoMessage() {} +func (*PropertyValue_ReferenceValue_PathElement) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{1, 2, 0} +} +func (m *PropertyValue_ReferenceValue_PathElement) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PropertyValue_ReferenceValue_PathElement.Unmarshal(m, b) +} +func (m *PropertyValue_ReferenceValue_PathElement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PropertyValue_ReferenceValue_PathElement.Marshal(b, m, deterministic) +} +func (dst *PropertyValue_ReferenceValue_PathElement) XXX_Merge(src proto.Message) { + xxx_messageInfo_PropertyValue_ReferenceValue_PathElement.Merge(dst, src) +} +func (m *PropertyValue_ReferenceValue_PathElement) XXX_Size() int { + return xxx_messageInfo_PropertyValue_ReferenceValue_PathElement.Size(m) +} +func (m *PropertyValue_ReferenceValue_PathElement) XXX_DiscardUnknown() { + xxx_messageInfo_PropertyValue_ReferenceValue_PathElement.DiscardUnknown(m) +} + +var xxx_messageInfo_PropertyValue_ReferenceValue_PathElement proto.InternalMessageInfo + +func (m *PropertyValue_ReferenceValue_PathElement) GetType() string { + if m != nil && m.Type != nil { + return *m.Type + } + return "" +} + +func (m *PropertyValue_ReferenceValue_PathElement) GetId() int64 { + if m != nil && m.Id != nil { + return *m.Id + } + return 0 +} + +func (m *PropertyValue_ReferenceValue_PathElement) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +type Property struct { + Meaning *Property_Meaning `protobuf:"varint,1,opt,name=meaning,enum=appengine.Property_Meaning,def=0" json:"meaning,omitempty"` + MeaningUri *string `protobuf:"bytes,2,opt,name=meaning_uri,json=meaningUri" json:"meaning_uri,omitempty"` + Name *string `protobuf:"bytes,3,req,name=name" json:"name,omitempty"` + Value *PropertyValue `protobuf:"bytes,5,req,name=value" json:"value,omitempty"` + Multiple *bool `protobuf:"varint,4,req,name=multiple" json:"multiple,omitempty"` + Searchable *bool `protobuf:"varint,6,opt,name=searchable,def=0" json:"searchable,omitempty"` + FtsTokenizationOption *Property_FtsTokenizationOption `protobuf:"varint,8,opt,name=fts_tokenization_option,json=ftsTokenizationOption,enum=appengine.Property_FtsTokenizationOption" json:"fts_tokenization_option,omitempty"` + Locale *string `protobuf:"bytes,9,opt,name=locale,def=en" json:"locale,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Property) Reset() { *m = Property{} } +func (m *Property) String() string { return proto.CompactTextString(m) } +func (*Property) ProtoMessage() {} +func (*Property) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{2} +} +func (m *Property) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Property.Unmarshal(m, b) +} +func (m *Property) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Property.Marshal(b, m, deterministic) +} +func (dst *Property) XXX_Merge(src proto.Message) { + xxx_messageInfo_Property.Merge(dst, src) +} +func (m *Property) XXX_Size() int { + return xxx_messageInfo_Property.Size(m) +} +func (m *Property) XXX_DiscardUnknown() { + xxx_messageInfo_Property.DiscardUnknown(m) +} + +var xxx_messageInfo_Property proto.InternalMessageInfo + +const Default_Property_Meaning Property_Meaning = Property_NO_MEANING +const Default_Property_Searchable bool = false +const Default_Property_Locale string = "en" + +func (m *Property) GetMeaning() Property_Meaning { + if m != nil && m.Meaning != nil { + return *m.Meaning + } + return Default_Property_Meaning +} + +func (m *Property) GetMeaningUri() string { + if m != nil && m.MeaningUri != nil { + return *m.MeaningUri + } + return "" +} + +func (m *Property) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *Property) GetValue() *PropertyValue { + if m != nil { + return m.Value + } + return nil +} + +func (m *Property) GetMultiple() bool { + if m != nil && m.Multiple != nil { + return *m.Multiple + } + return false +} + +func (m *Property) GetSearchable() bool { + if m != nil && m.Searchable != nil { + return *m.Searchable + } + return Default_Property_Searchable +} + +func (m *Property) GetFtsTokenizationOption() Property_FtsTokenizationOption { + if m != nil && m.FtsTokenizationOption != nil { + return *m.FtsTokenizationOption + } + return Property_HTML +} + +func (m *Property) GetLocale() string { + if m != nil && m.Locale != nil { + return *m.Locale + } + return Default_Property_Locale +} + +type Path struct { + Element []*Path_Element `protobuf:"group,1,rep,name=Element,json=element" json:"element,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Path) Reset() { *m = Path{} } +func (m *Path) String() string { return proto.CompactTextString(m) } +func (*Path) ProtoMessage() {} +func (*Path) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{3} +} +func (m *Path) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Path.Unmarshal(m, b) +} +func (m *Path) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Path.Marshal(b, m, deterministic) +} +func (dst *Path) XXX_Merge(src proto.Message) { + xxx_messageInfo_Path.Merge(dst, src) +} +func (m *Path) XXX_Size() int { + return xxx_messageInfo_Path.Size(m) +} +func (m *Path) XXX_DiscardUnknown() { + xxx_messageInfo_Path.DiscardUnknown(m) +} + +var xxx_messageInfo_Path proto.InternalMessageInfo + +func (m *Path) GetElement() []*Path_Element { + if m != nil { + return m.Element + } + return nil +} + +type Path_Element struct { + Type *string `protobuf:"bytes,2,req,name=type" json:"type,omitempty"` + Id *int64 `protobuf:"varint,3,opt,name=id" json:"id,omitempty"` + Name *string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Path_Element) Reset() { *m = Path_Element{} } +func (m *Path_Element) String() string { return proto.CompactTextString(m) } +func (*Path_Element) ProtoMessage() {} +func (*Path_Element) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{3, 0} +} +func (m *Path_Element) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Path_Element.Unmarshal(m, b) +} +func (m *Path_Element) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Path_Element.Marshal(b, m, deterministic) +} +func (dst *Path_Element) XXX_Merge(src proto.Message) { + xxx_messageInfo_Path_Element.Merge(dst, src) +} +func (m *Path_Element) XXX_Size() int { + return xxx_messageInfo_Path_Element.Size(m) +} +func (m *Path_Element) XXX_DiscardUnknown() { + xxx_messageInfo_Path_Element.DiscardUnknown(m) +} + +var xxx_messageInfo_Path_Element proto.InternalMessageInfo + +func (m *Path_Element) GetType() string { + if m != nil && m.Type != nil { + return *m.Type + } + return "" +} + +func (m *Path_Element) GetId() int64 { + if m != nil && m.Id != nil { + return *m.Id + } + return 0 +} + +func (m *Path_Element) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +type Reference struct { + App *string `protobuf:"bytes,13,req,name=app" json:"app,omitempty"` + NameSpace *string `protobuf:"bytes,20,opt,name=name_space,json=nameSpace" json:"name_space,omitempty"` + Path *Path `protobuf:"bytes,14,req,name=path" json:"path,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Reference) Reset() { *m = Reference{} } +func (m *Reference) String() string { return proto.CompactTextString(m) } +func (*Reference) ProtoMessage() {} +func (*Reference) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{4} +} +func (m *Reference) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Reference.Unmarshal(m, b) +} +func (m *Reference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Reference.Marshal(b, m, deterministic) +} +func (dst *Reference) XXX_Merge(src proto.Message) { + xxx_messageInfo_Reference.Merge(dst, src) +} +func (m *Reference) XXX_Size() int { + return xxx_messageInfo_Reference.Size(m) +} +func (m *Reference) XXX_DiscardUnknown() { + xxx_messageInfo_Reference.DiscardUnknown(m) +} + +var xxx_messageInfo_Reference proto.InternalMessageInfo + +func (m *Reference) GetApp() string { + if m != nil && m.App != nil { + return *m.App + } + return "" +} + +func (m *Reference) GetNameSpace() string { + if m != nil && m.NameSpace != nil { + return *m.NameSpace + } + return "" +} + +func (m *Reference) GetPath() *Path { + if m != nil { + return m.Path + } + return nil +} + +type User struct { + Email *string `protobuf:"bytes,1,req,name=email" json:"email,omitempty"` + AuthDomain *string `protobuf:"bytes,2,req,name=auth_domain,json=authDomain" json:"auth_domain,omitempty"` + Nickname *string `protobuf:"bytes,3,opt,name=nickname" json:"nickname,omitempty"` + FederatedIdentity *string `protobuf:"bytes,6,opt,name=federated_identity,json=federatedIdentity" json:"federated_identity,omitempty"` + FederatedProvider *string `protobuf:"bytes,7,opt,name=federated_provider,json=federatedProvider" json:"federated_provider,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *User) Reset() { *m = User{} } +func (m *User) String() string { return proto.CompactTextString(m) } +func (*User) ProtoMessage() {} +func (*User) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{5} +} +func (m *User) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_User.Unmarshal(m, b) +} +func (m *User) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_User.Marshal(b, m, deterministic) +} +func (dst *User) XXX_Merge(src proto.Message) { + xxx_messageInfo_User.Merge(dst, src) +} +func (m *User) XXX_Size() int { + return xxx_messageInfo_User.Size(m) +} +func (m *User) XXX_DiscardUnknown() { + xxx_messageInfo_User.DiscardUnknown(m) +} + +var xxx_messageInfo_User proto.InternalMessageInfo + +func (m *User) GetEmail() string { + if m != nil && m.Email != nil { + return *m.Email + } + return "" +} + +func (m *User) GetAuthDomain() string { + if m != nil && m.AuthDomain != nil { + return *m.AuthDomain + } + return "" +} + +func (m *User) GetNickname() string { + if m != nil && m.Nickname != nil { + return *m.Nickname + } + return "" +} + +func (m *User) GetFederatedIdentity() string { + if m != nil && m.FederatedIdentity != nil { + return *m.FederatedIdentity + } + return "" +} + +func (m *User) GetFederatedProvider() string { + if m != nil && m.FederatedProvider != nil { + return *m.FederatedProvider + } + return "" +} + +type EntityProto struct { + Key *Reference `protobuf:"bytes,13,req,name=key" json:"key,omitempty"` + EntityGroup *Path `protobuf:"bytes,16,req,name=entity_group,json=entityGroup" json:"entity_group,omitempty"` + Owner *User `protobuf:"bytes,17,opt,name=owner" json:"owner,omitempty"` + Kind *EntityProto_Kind `protobuf:"varint,4,opt,name=kind,enum=appengine.EntityProto_Kind" json:"kind,omitempty"` + KindUri *string `protobuf:"bytes,5,opt,name=kind_uri,json=kindUri" json:"kind_uri,omitempty"` + Property []*Property `protobuf:"bytes,14,rep,name=property" json:"property,omitempty"` + RawProperty []*Property `protobuf:"bytes,15,rep,name=raw_property,json=rawProperty" json:"raw_property,omitempty"` + Rank *int32 `protobuf:"varint,18,opt,name=rank" json:"rank,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EntityProto) Reset() { *m = EntityProto{} } +func (m *EntityProto) String() string { return proto.CompactTextString(m) } +func (*EntityProto) ProtoMessage() {} +func (*EntityProto) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{6} +} +func (m *EntityProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EntityProto.Unmarshal(m, b) +} +func (m *EntityProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EntityProto.Marshal(b, m, deterministic) +} +func (dst *EntityProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_EntityProto.Merge(dst, src) +} +func (m *EntityProto) XXX_Size() int { + return xxx_messageInfo_EntityProto.Size(m) +} +func (m *EntityProto) XXX_DiscardUnknown() { + xxx_messageInfo_EntityProto.DiscardUnknown(m) +} + +var xxx_messageInfo_EntityProto proto.InternalMessageInfo + +func (m *EntityProto) GetKey() *Reference { + if m != nil { + return m.Key + } + return nil +} + +func (m *EntityProto) GetEntityGroup() *Path { + if m != nil { + return m.EntityGroup + } + return nil +} + +func (m *EntityProto) GetOwner() *User { + if m != nil { + return m.Owner + } + return nil +} + +func (m *EntityProto) GetKind() EntityProto_Kind { + if m != nil && m.Kind != nil { + return *m.Kind + } + return EntityProto_GD_CONTACT +} + +func (m *EntityProto) GetKindUri() string { + if m != nil && m.KindUri != nil { + return *m.KindUri + } + return "" +} + +func (m *EntityProto) GetProperty() []*Property { + if m != nil { + return m.Property + } + return nil +} + +func (m *EntityProto) GetRawProperty() []*Property { + if m != nil { + return m.RawProperty + } + return nil +} + +func (m *EntityProto) GetRank() int32 { + if m != nil && m.Rank != nil { + return *m.Rank + } + return 0 +} + +type CompositeProperty struct { + IndexId *int64 `protobuf:"varint,1,req,name=index_id,json=indexId" json:"index_id,omitempty"` + Value []string `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CompositeProperty) Reset() { *m = CompositeProperty{} } +func (m *CompositeProperty) String() string { return proto.CompactTextString(m) } +func (*CompositeProperty) ProtoMessage() {} +func (*CompositeProperty) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{7} +} +func (m *CompositeProperty) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CompositeProperty.Unmarshal(m, b) +} +func (m *CompositeProperty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CompositeProperty.Marshal(b, m, deterministic) +} +func (dst *CompositeProperty) XXX_Merge(src proto.Message) { + xxx_messageInfo_CompositeProperty.Merge(dst, src) +} +func (m *CompositeProperty) XXX_Size() int { + return xxx_messageInfo_CompositeProperty.Size(m) +} +func (m *CompositeProperty) XXX_DiscardUnknown() { + xxx_messageInfo_CompositeProperty.DiscardUnknown(m) +} + +var xxx_messageInfo_CompositeProperty proto.InternalMessageInfo + +func (m *CompositeProperty) GetIndexId() int64 { + if m != nil && m.IndexId != nil { + return *m.IndexId + } + return 0 +} + +func (m *CompositeProperty) GetValue() []string { + if m != nil { + return m.Value + } + return nil +} + +type Index struct { + EntityType *string `protobuf:"bytes,1,req,name=entity_type,json=entityType" json:"entity_type,omitempty"` + Ancestor *bool `protobuf:"varint,5,req,name=ancestor" json:"ancestor,omitempty"` + Property []*Index_Property `protobuf:"group,2,rep,name=Property,json=property" json:"property,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Index) Reset() { *m = Index{} } +func (m *Index) String() string { return proto.CompactTextString(m) } +func (*Index) ProtoMessage() {} +func (*Index) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{8} +} +func (m *Index) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Index.Unmarshal(m, b) +} +func (m *Index) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Index.Marshal(b, m, deterministic) +} +func (dst *Index) XXX_Merge(src proto.Message) { + xxx_messageInfo_Index.Merge(dst, src) +} +func (m *Index) XXX_Size() int { + return xxx_messageInfo_Index.Size(m) +} +func (m *Index) XXX_DiscardUnknown() { + xxx_messageInfo_Index.DiscardUnknown(m) +} + +var xxx_messageInfo_Index proto.InternalMessageInfo + +func (m *Index) GetEntityType() string { + if m != nil && m.EntityType != nil { + return *m.EntityType + } + return "" +} + +func (m *Index) GetAncestor() bool { + if m != nil && m.Ancestor != nil { + return *m.Ancestor + } + return false +} + +func (m *Index) GetProperty() []*Index_Property { + if m != nil { + return m.Property + } + return nil +} + +type Index_Property struct { + Name *string `protobuf:"bytes,3,req,name=name" json:"name,omitempty"` + Direction *Index_Property_Direction `protobuf:"varint,4,opt,name=direction,enum=appengine.Index_Property_Direction,def=1" json:"direction,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Index_Property) Reset() { *m = Index_Property{} } +func (m *Index_Property) String() string { return proto.CompactTextString(m) } +func (*Index_Property) ProtoMessage() {} +func (*Index_Property) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{8, 0} +} +func (m *Index_Property) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Index_Property.Unmarshal(m, b) +} +func (m *Index_Property) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Index_Property.Marshal(b, m, deterministic) +} +func (dst *Index_Property) XXX_Merge(src proto.Message) { + xxx_messageInfo_Index_Property.Merge(dst, src) +} +func (m *Index_Property) XXX_Size() int { + return xxx_messageInfo_Index_Property.Size(m) +} +func (m *Index_Property) XXX_DiscardUnknown() { + xxx_messageInfo_Index_Property.DiscardUnknown(m) +} + +var xxx_messageInfo_Index_Property proto.InternalMessageInfo + +const Default_Index_Property_Direction Index_Property_Direction = Index_Property_ASCENDING + +func (m *Index_Property) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *Index_Property) GetDirection() Index_Property_Direction { + if m != nil && m.Direction != nil { + return *m.Direction + } + return Default_Index_Property_Direction +} + +type CompositeIndex struct { + AppId *string `protobuf:"bytes,1,req,name=app_id,json=appId" json:"app_id,omitempty"` + Id *int64 `protobuf:"varint,2,req,name=id" json:"id,omitempty"` + Definition *Index `protobuf:"bytes,3,req,name=definition" json:"definition,omitempty"` + State *CompositeIndex_State `protobuf:"varint,4,req,name=state,enum=appengine.CompositeIndex_State" json:"state,omitempty"` + OnlyUseIfRequired *bool `protobuf:"varint,6,opt,name=only_use_if_required,json=onlyUseIfRequired,def=0" json:"only_use_if_required,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CompositeIndex) Reset() { *m = CompositeIndex{} } +func (m *CompositeIndex) String() string { return proto.CompactTextString(m) } +func (*CompositeIndex) ProtoMessage() {} +func (*CompositeIndex) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{9} +} +func (m *CompositeIndex) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CompositeIndex.Unmarshal(m, b) +} +func (m *CompositeIndex) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CompositeIndex.Marshal(b, m, deterministic) +} +func (dst *CompositeIndex) XXX_Merge(src proto.Message) { + xxx_messageInfo_CompositeIndex.Merge(dst, src) +} +func (m *CompositeIndex) XXX_Size() int { + return xxx_messageInfo_CompositeIndex.Size(m) +} +func (m *CompositeIndex) XXX_DiscardUnknown() { + xxx_messageInfo_CompositeIndex.DiscardUnknown(m) +} + +var xxx_messageInfo_CompositeIndex proto.InternalMessageInfo + +const Default_CompositeIndex_OnlyUseIfRequired bool = false + +func (m *CompositeIndex) GetAppId() string { + if m != nil && m.AppId != nil { + return *m.AppId + } + return "" +} + +func (m *CompositeIndex) GetId() int64 { + if m != nil && m.Id != nil { + return *m.Id + } + return 0 +} + +func (m *CompositeIndex) GetDefinition() *Index { + if m != nil { + return m.Definition + } + return nil +} + +func (m *CompositeIndex) GetState() CompositeIndex_State { + if m != nil && m.State != nil { + return *m.State + } + return CompositeIndex_WRITE_ONLY +} + +func (m *CompositeIndex) GetOnlyUseIfRequired() bool { + if m != nil && m.OnlyUseIfRequired != nil { + return *m.OnlyUseIfRequired + } + return Default_CompositeIndex_OnlyUseIfRequired +} + +type IndexPostfix struct { + IndexValue []*IndexPostfix_IndexValue `protobuf:"bytes,1,rep,name=index_value,json=indexValue" json:"index_value,omitempty"` + Key *Reference `protobuf:"bytes,2,opt,name=key" json:"key,omitempty"` + Before *bool `protobuf:"varint,3,opt,name=before,def=1" json:"before,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *IndexPostfix) Reset() { *m = IndexPostfix{} } +func (m *IndexPostfix) String() string { return proto.CompactTextString(m) } +func (*IndexPostfix) ProtoMessage() {} +func (*IndexPostfix) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{10} +} +func (m *IndexPostfix) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_IndexPostfix.Unmarshal(m, b) +} +func (m *IndexPostfix) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_IndexPostfix.Marshal(b, m, deterministic) +} +func (dst *IndexPostfix) XXX_Merge(src proto.Message) { + xxx_messageInfo_IndexPostfix.Merge(dst, src) +} +func (m *IndexPostfix) XXX_Size() int { + return xxx_messageInfo_IndexPostfix.Size(m) +} +func (m *IndexPostfix) XXX_DiscardUnknown() { + xxx_messageInfo_IndexPostfix.DiscardUnknown(m) +} + +var xxx_messageInfo_IndexPostfix proto.InternalMessageInfo + +const Default_IndexPostfix_Before bool = true + +func (m *IndexPostfix) GetIndexValue() []*IndexPostfix_IndexValue { + if m != nil { + return m.IndexValue + } + return nil +} + +func (m *IndexPostfix) GetKey() *Reference { + if m != nil { + return m.Key + } + return nil +} + +func (m *IndexPostfix) GetBefore() bool { + if m != nil && m.Before != nil { + return *m.Before + } + return Default_IndexPostfix_Before +} + +type IndexPostfix_IndexValue struct { + PropertyName *string `protobuf:"bytes,1,req,name=property_name,json=propertyName" json:"property_name,omitempty"` + Value *PropertyValue `protobuf:"bytes,2,req,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *IndexPostfix_IndexValue) Reset() { *m = IndexPostfix_IndexValue{} } +func (m *IndexPostfix_IndexValue) String() string { return proto.CompactTextString(m) } +func (*IndexPostfix_IndexValue) ProtoMessage() {} +func (*IndexPostfix_IndexValue) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{10, 0} +} +func (m *IndexPostfix_IndexValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_IndexPostfix_IndexValue.Unmarshal(m, b) +} +func (m *IndexPostfix_IndexValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_IndexPostfix_IndexValue.Marshal(b, m, deterministic) +} +func (dst *IndexPostfix_IndexValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_IndexPostfix_IndexValue.Merge(dst, src) +} +func (m *IndexPostfix_IndexValue) XXX_Size() int { + return xxx_messageInfo_IndexPostfix_IndexValue.Size(m) +} +func (m *IndexPostfix_IndexValue) XXX_DiscardUnknown() { + xxx_messageInfo_IndexPostfix_IndexValue.DiscardUnknown(m) +} + +var xxx_messageInfo_IndexPostfix_IndexValue proto.InternalMessageInfo + +func (m *IndexPostfix_IndexValue) GetPropertyName() string { + if m != nil && m.PropertyName != nil { + return *m.PropertyName + } + return "" +} + +func (m *IndexPostfix_IndexValue) GetValue() *PropertyValue { + if m != nil { + return m.Value + } + return nil +} + +type IndexPosition struct { + Key *string `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"` + Before *bool `protobuf:"varint,2,opt,name=before,def=1" json:"before,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *IndexPosition) Reset() { *m = IndexPosition{} } +func (m *IndexPosition) String() string { return proto.CompactTextString(m) } +func (*IndexPosition) ProtoMessage() {} +func (*IndexPosition) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{11} +} +func (m *IndexPosition) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_IndexPosition.Unmarshal(m, b) +} +func (m *IndexPosition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_IndexPosition.Marshal(b, m, deterministic) +} +func (dst *IndexPosition) XXX_Merge(src proto.Message) { + xxx_messageInfo_IndexPosition.Merge(dst, src) +} +func (m *IndexPosition) XXX_Size() int { + return xxx_messageInfo_IndexPosition.Size(m) +} +func (m *IndexPosition) XXX_DiscardUnknown() { + xxx_messageInfo_IndexPosition.DiscardUnknown(m) +} + +var xxx_messageInfo_IndexPosition proto.InternalMessageInfo + +const Default_IndexPosition_Before bool = true + +func (m *IndexPosition) GetKey() string { + if m != nil && m.Key != nil { + return *m.Key + } + return "" +} + +func (m *IndexPosition) GetBefore() bool { + if m != nil && m.Before != nil { + return *m.Before + } + return Default_IndexPosition_Before +} + +type Snapshot struct { + Ts *int64 `protobuf:"varint,1,req,name=ts" json:"ts,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Snapshot) Reset() { *m = Snapshot{} } +func (m *Snapshot) String() string { return proto.CompactTextString(m) } +func (*Snapshot) ProtoMessage() {} +func (*Snapshot) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{12} +} +func (m *Snapshot) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Snapshot.Unmarshal(m, b) +} +func (m *Snapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Snapshot.Marshal(b, m, deterministic) +} +func (dst *Snapshot) XXX_Merge(src proto.Message) { + xxx_messageInfo_Snapshot.Merge(dst, src) +} +func (m *Snapshot) XXX_Size() int { + return xxx_messageInfo_Snapshot.Size(m) +} +func (m *Snapshot) XXX_DiscardUnknown() { + xxx_messageInfo_Snapshot.DiscardUnknown(m) +} + +var xxx_messageInfo_Snapshot proto.InternalMessageInfo + +func (m *Snapshot) GetTs() int64 { + if m != nil && m.Ts != nil { + return *m.Ts + } + return 0 +} + +type InternalHeader struct { + Qos *string `protobuf:"bytes,1,opt,name=qos" json:"qos,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InternalHeader) Reset() { *m = InternalHeader{} } +func (m *InternalHeader) String() string { return proto.CompactTextString(m) } +func (*InternalHeader) ProtoMessage() {} +func (*InternalHeader) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{13} +} +func (m *InternalHeader) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_InternalHeader.Unmarshal(m, b) +} +func (m *InternalHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_InternalHeader.Marshal(b, m, deterministic) +} +func (dst *InternalHeader) XXX_Merge(src proto.Message) { + xxx_messageInfo_InternalHeader.Merge(dst, src) +} +func (m *InternalHeader) XXX_Size() int { + return xxx_messageInfo_InternalHeader.Size(m) +} +func (m *InternalHeader) XXX_DiscardUnknown() { + xxx_messageInfo_InternalHeader.DiscardUnknown(m) +} + +var xxx_messageInfo_InternalHeader proto.InternalMessageInfo + +func (m *InternalHeader) GetQos() string { + if m != nil && m.Qos != nil { + return *m.Qos + } + return "" +} + +type Transaction struct { + Header *InternalHeader `protobuf:"bytes,4,opt,name=header" json:"header,omitempty"` + Handle *uint64 `protobuf:"fixed64,1,req,name=handle" json:"handle,omitempty"` + App *string `protobuf:"bytes,2,req,name=app" json:"app,omitempty"` + MarkChanges *bool `protobuf:"varint,3,opt,name=mark_changes,json=markChanges,def=0" json:"mark_changes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Transaction) Reset() { *m = Transaction{} } +func (m *Transaction) String() string { return proto.CompactTextString(m) } +func (*Transaction) ProtoMessage() {} +func (*Transaction) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{14} +} +func (m *Transaction) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Transaction.Unmarshal(m, b) +} +func (m *Transaction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Transaction.Marshal(b, m, deterministic) +} +func (dst *Transaction) XXX_Merge(src proto.Message) { + xxx_messageInfo_Transaction.Merge(dst, src) +} +func (m *Transaction) XXX_Size() int { + return xxx_messageInfo_Transaction.Size(m) +} +func (m *Transaction) XXX_DiscardUnknown() { + xxx_messageInfo_Transaction.DiscardUnknown(m) +} + +var xxx_messageInfo_Transaction proto.InternalMessageInfo + +const Default_Transaction_MarkChanges bool = false + +func (m *Transaction) GetHeader() *InternalHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *Transaction) GetHandle() uint64 { + if m != nil && m.Handle != nil { + return *m.Handle + } + return 0 +} + +func (m *Transaction) GetApp() string { + if m != nil && m.App != nil { + return *m.App + } + return "" +} + +func (m *Transaction) GetMarkChanges() bool { + if m != nil && m.MarkChanges != nil { + return *m.MarkChanges + } + return Default_Transaction_MarkChanges +} + +type Query struct { + Header *InternalHeader `protobuf:"bytes,39,opt,name=header" json:"header,omitempty"` + App *string `protobuf:"bytes,1,req,name=app" json:"app,omitempty"` + NameSpace *string `protobuf:"bytes,29,opt,name=name_space,json=nameSpace" json:"name_space,omitempty"` + Kind *string `protobuf:"bytes,3,opt,name=kind" json:"kind,omitempty"` + Ancestor *Reference `protobuf:"bytes,17,opt,name=ancestor" json:"ancestor,omitempty"` + Filter []*Query_Filter `protobuf:"group,4,rep,name=Filter,json=filter" json:"filter,omitempty"` + SearchQuery *string `protobuf:"bytes,8,opt,name=search_query,json=searchQuery" json:"search_query,omitempty"` + Order []*Query_Order `protobuf:"group,9,rep,name=Order,json=order" json:"order,omitempty"` + Hint *Query_Hint `protobuf:"varint,18,opt,name=hint,enum=appengine.Query_Hint" json:"hint,omitempty"` + Count *int32 `protobuf:"varint,23,opt,name=count" json:"count,omitempty"` + Offset *int32 `protobuf:"varint,12,opt,name=offset,def=0" json:"offset,omitempty"` + Limit *int32 `protobuf:"varint,16,opt,name=limit" json:"limit,omitempty"` + CompiledCursor *CompiledCursor `protobuf:"bytes,30,opt,name=compiled_cursor,json=compiledCursor" json:"compiled_cursor,omitempty"` + EndCompiledCursor *CompiledCursor `protobuf:"bytes,31,opt,name=end_compiled_cursor,json=endCompiledCursor" json:"end_compiled_cursor,omitempty"` + CompositeIndex []*CompositeIndex `protobuf:"bytes,19,rep,name=composite_index,json=compositeIndex" json:"composite_index,omitempty"` + RequirePerfectPlan *bool `protobuf:"varint,20,opt,name=require_perfect_plan,json=requirePerfectPlan,def=0" json:"require_perfect_plan,omitempty"` + KeysOnly *bool `protobuf:"varint,21,opt,name=keys_only,json=keysOnly,def=0" json:"keys_only,omitempty"` + Transaction *Transaction `protobuf:"bytes,22,opt,name=transaction" json:"transaction,omitempty"` + Compile *bool `protobuf:"varint,25,opt,name=compile,def=0" json:"compile,omitempty"` + FailoverMs *int64 `protobuf:"varint,26,opt,name=failover_ms,json=failoverMs" json:"failover_ms,omitempty"` + Strong *bool `protobuf:"varint,32,opt,name=strong" json:"strong,omitempty"` + PropertyName []string `protobuf:"bytes,33,rep,name=property_name,json=propertyName" json:"property_name,omitempty"` + GroupByPropertyName []string `protobuf:"bytes,34,rep,name=group_by_property_name,json=groupByPropertyName" json:"group_by_property_name,omitempty"` + Distinct *bool `protobuf:"varint,24,opt,name=distinct" json:"distinct,omitempty"` + MinSafeTimeSeconds *int64 `protobuf:"varint,35,opt,name=min_safe_time_seconds,json=minSafeTimeSeconds" json:"min_safe_time_seconds,omitempty"` + SafeReplicaName []string `protobuf:"bytes,36,rep,name=safe_replica_name,json=safeReplicaName" json:"safe_replica_name,omitempty"` + PersistOffset *bool `protobuf:"varint,37,opt,name=persist_offset,json=persistOffset,def=0" json:"persist_offset,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Query) Reset() { *m = Query{} } +func (m *Query) String() string { return proto.CompactTextString(m) } +func (*Query) ProtoMessage() {} +func (*Query) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15} +} +func (m *Query) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Query.Unmarshal(m, b) +} +func (m *Query) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Query.Marshal(b, m, deterministic) +} +func (dst *Query) XXX_Merge(src proto.Message) { + xxx_messageInfo_Query.Merge(dst, src) +} +func (m *Query) XXX_Size() int { + return xxx_messageInfo_Query.Size(m) +} +func (m *Query) XXX_DiscardUnknown() { + xxx_messageInfo_Query.DiscardUnknown(m) +} + +var xxx_messageInfo_Query proto.InternalMessageInfo + +const Default_Query_Offset int32 = 0 +const Default_Query_RequirePerfectPlan bool = false +const Default_Query_KeysOnly bool = false +const Default_Query_Compile bool = false +const Default_Query_PersistOffset bool = false + +func (m *Query) GetHeader() *InternalHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *Query) GetApp() string { + if m != nil && m.App != nil { + return *m.App + } + return "" +} + +func (m *Query) GetNameSpace() string { + if m != nil && m.NameSpace != nil { + return *m.NameSpace + } + return "" +} + +func (m *Query) GetKind() string { + if m != nil && m.Kind != nil { + return *m.Kind + } + return "" +} + +func (m *Query) GetAncestor() *Reference { + if m != nil { + return m.Ancestor + } + return nil +} + +func (m *Query) GetFilter() []*Query_Filter { + if m != nil { + return m.Filter + } + return nil +} + +func (m *Query) GetSearchQuery() string { + if m != nil && m.SearchQuery != nil { + return *m.SearchQuery + } + return "" +} + +func (m *Query) GetOrder() []*Query_Order { + if m != nil { + return m.Order + } + return nil +} + +func (m *Query) GetHint() Query_Hint { + if m != nil && m.Hint != nil { + return *m.Hint + } + return Query_ORDER_FIRST +} + +func (m *Query) GetCount() int32 { + if m != nil && m.Count != nil { + return *m.Count + } + return 0 +} + +func (m *Query) GetOffset() int32 { + if m != nil && m.Offset != nil { + return *m.Offset + } + return Default_Query_Offset +} + +func (m *Query) GetLimit() int32 { + if m != nil && m.Limit != nil { + return *m.Limit + } + return 0 +} + +func (m *Query) GetCompiledCursor() *CompiledCursor { + if m != nil { + return m.CompiledCursor + } + return nil +} + +func (m *Query) GetEndCompiledCursor() *CompiledCursor { + if m != nil { + return m.EndCompiledCursor + } + return nil +} + +func (m *Query) GetCompositeIndex() []*CompositeIndex { + if m != nil { + return m.CompositeIndex + } + return nil +} + +func (m *Query) GetRequirePerfectPlan() bool { + if m != nil && m.RequirePerfectPlan != nil { + return *m.RequirePerfectPlan + } + return Default_Query_RequirePerfectPlan +} + +func (m *Query) GetKeysOnly() bool { + if m != nil && m.KeysOnly != nil { + return *m.KeysOnly + } + return Default_Query_KeysOnly +} + +func (m *Query) GetTransaction() *Transaction { + if m != nil { + return m.Transaction + } + return nil +} + +func (m *Query) GetCompile() bool { + if m != nil && m.Compile != nil { + return *m.Compile + } + return Default_Query_Compile +} + +func (m *Query) GetFailoverMs() int64 { + if m != nil && m.FailoverMs != nil { + return *m.FailoverMs + } + return 0 +} + +func (m *Query) GetStrong() bool { + if m != nil && m.Strong != nil { + return *m.Strong + } + return false +} + +func (m *Query) GetPropertyName() []string { + if m != nil { + return m.PropertyName + } + return nil +} + +func (m *Query) GetGroupByPropertyName() []string { + if m != nil { + return m.GroupByPropertyName + } + return nil +} + +func (m *Query) GetDistinct() bool { + if m != nil && m.Distinct != nil { + return *m.Distinct + } + return false +} + +func (m *Query) GetMinSafeTimeSeconds() int64 { + if m != nil && m.MinSafeTimeSeconds != nil { + return *m.MinSafeTimeSeconds + } + return 0 +} + +func (m *Query) GetSafeReplicaName() []string { + if m != nil { + return m.SafeReplicaName + } + return nil +} + +func (m *Query) GetPersistOffset() bool { + if m != nil && m.PersistOffset != nil { + return *m.PersistOffset + } + return Default_Query_PersistOffset +} + +type Query_Filter struct { + Op *Query_Filter_Operator `protobuf:"varint,6,req,name=op,enum=appengine.Query_Filter_Operator" json:"op,omitempty"` + Property []*Property `protobuf:"bytes,14,rep,name=property" json:"property,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Query_Filter) Reset() { *m = Query_Filter{} } +func (m *Query_Filter) String() string { return proto.CompactTextString(m) } +func (*Query_Filter) ProtoMessage() {} +func (*Query_Filter) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15, 0} +} +func (m *Query_Filter) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Query_Filter.Unmarshal(m, b) +} +func (m *Query_Filter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Query_Filter.Marshal(b, m, deterministic) +} +func (dst *Query_Filter) XXX_Merge(src proto.Message) { + xxx_messageInfo_Query_Filter.Merge(dst, src) +} +func (m *Query_Filter) XXX_Size() int { + return xxx_messageInfo_Query_Filter.Size(m) +} +func (m *Query_Filter) XXX_DiscardUnknown() { + xxx_messageInfo_Query_Filter.DiscardUnknown(m) +} + +var xxx_messageInfo_Query_Filter proto.InternalMessageInfo + +func (m *Query_Filter) GetOp() Query_Filter_Operator { + if m != nil && m.Op != nil { + return *m.Op + } + return Query_Filter_LESS_THAN +} + +func (m *Query_Filter) GetProperty() []*Property { + if m != nil { + return m.Property + } + return nil +} + +type Query_Order struct { + Property *string `protobuf:"bytes,10,req,name=property" json:"property,omitempty"` + Direction *Query_Order_Direction `protobuf:"varint,11,opt,name=direction,enum=appengine.Query_Order_Direction,def=1" json:"direction,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Query_Order) Reset() { *m = Query_Order{} } +func (m *Query_Order) String() string { return proto.CompactTextString(m) } +func (*Query_Order) ProtoMessage() {} +func (*Query_Order) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15, 1} +} +func (m *Query_Order) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Query_Order.Unmarshal(m, b) +} +func (m *Query_Order) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Query_Order.Marshal(b, m, deterministic) +} +func (dst *Query_Order) XXX_Merge(src proto.Message) { + xxx_messageInfo_Query_Order.Merge(dst, src) +} +func (m *Query_Order) XXX_Size() int { + return xxx_messageInfo_Query_Order.Size(m) +} +func (m *Query_Order) XXX_DiscardUnknown() { + xxx_messageInfo_Query_Order.DiscardUnknown(m) +} + +var xxx_messageInfo_Query_Order proto.InternalMessageInfo + +const Default_Query_Order_Direction Query_Order_Direction = Query_Order_ASCENDING + +func (m *Query_Order) GetProperty() string { + if m != nil && m.Property != nil { + return *m.Property + } + return "" +} + +func (m *Query_Order) GetDirection() Query_Order_Direction { + if m != nil && m.Direction != nil { + return *m.Direction + } + return Default_Query_Order_Direction +} + +type CompiledQuery struct { + Primaryscan *CompiledQuery_PrimaryScan `protobuf:"group,1,req,name=PrimaryScan,json=primaryscan" json:"primaryscan,omitempty"` + Mergejoinscan []*CompiledQuery_MergeJoinScan `protobuf:"group,7,rep,name=MergeJoinScan,json=mergejoinscan" json:"mergejoinscan,omitempty"` + IndexDef *Index `protobuf:"bytes,21,opt,name=index_def,json=indexDef" json:"index_def,omitempty"` + Offset *int32 `protobuf:"varint,10,opt,name=offset,def=0" json:"offset,omitempty"` + Limit *int32 `protobuf:"varint,11,opt,name=limit" json:"limit,omitempty"` + KeysOnly *bool `protobuf:"varint,12,req,name=keys_only,json=keysOnly" json:"keys_only,omitempty"` + PropertyName []string `protobuf:"bytes,24,rep,name=property_name,json=propertyName" json:"property_name,omitempty"` + DistinctInfixSize *int32 `protobuf:"varint,25,opt,name=distinct_infix_size,json=distinctInfixSize" json:"distinct_infix_size,omitempty"` + Entityfilter *CompiledQuery_EntityFilter `protobuf:"group,13,opt,name=EntityFilter,json=entityfilter" json:"entityfilter,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CompiledQuery) Reset() { *m = CompiledQuery{} } +func (m *CompiledQuery) String() string { return proto.CompactTextString(m) } +func (*CompiledQuery) ProtoMessage() {} +func (*CompiledQuery) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{16} +} +func (m *CompiledQuery) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CompiledQuery.Unmarshal(m, b) +} +func (m *CompiledQuery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CompiledQuery.Marshal(b, m, deterministic) +} +func (dst *CompiledQuery) XXX_Merge(src proto.Message) { + xxx_messageInfo_CompiledQuery.Merge(dst, src) +} +func (m *CompiledQuery) XXX_Size() int { + return xxx_messageInfo_CompiledQuery.Size(m) +} +func (m *CompiledQuery) XXX_DiscardUnknown() { + xxx_messageInfo_CompiledQuery.DiscardUnknown(m) +} + +var xxx_messageInfo_CompiledQuery proto.InternalMessageInfo + +const Default_CompiledQuery_Offset int32 = 0 + +func (m *CompiledQuery) GetPrimaryscan() *CompiledQuery_PrimaryScan { + if m != nil { + return m.Primaryscan + } + return nil +} + +func (m *CompiledQuery) GetMergejoinscan() []*CompiledQuery_MergeJoinScan { + if m != nil { + return m.Mergejoinscan + } + return nil +} + +func (m *CompiledQuery) GetIndexDef() *Index { + if m != nil { + return m.IndexDef + } + return nil +} + +func (m *CompiledQuery) GetOffset() int32 { + if m != nil && m.Offset != nil { + return *m.Offset + } + return Default_CompiledQuery_Offset +} + +func (m *CompiledQuery) GetLimit() int32 { + if m != nil && m.Limit != nil { + return *m.Limit + } + return 0 +} + +func (m *CompiledQuery) GetKeysOnly() bool { + if m != nil && m.KeysOnly != nil { + return *m.KeysOnly + } + return false +} + +func (m *CompiledQuery) GetPropertyName() []string { + if m != nil { + return m.PropertyName + } + return nil +} + +func (m *CompiledQuery) GetDistinctInfixSize() int32 { + if m != nil && m.DistinctInfixSize != nil { + return *m.DistinctInfixSize + } + return 0 +} + +func (m *CompiledQuery) GetEntityfilter() *CompiledQuery_EntityFilter { + if m != nil { + return m.Entityfilter + } + return nil +} + +type CompiledQuery_PrimaryScan struct { + IndexName *string `protobuf:"bytes,2,opt,name=index_name,json=indexName" json:"index_name,omitempty"` + StartKey *string `protobuf:"bytes,3,opt,name=start_key,json=startKey" json:"start_key,omitempty"` + StartInclusive *bool `protobuf:"varint,4,opt,name=start_inclusive,json=startInclusive" json:"start_inclusive,omitempty"` + EndKey *string `protobuf:"bytes,5,opt,name=end_key,json=endKey" json:"end_key,omitempty"` + EndInclusive *bool `protobuf:"varint,6,opt,name=end_inclusive,json=endInclusive" json:"end_inclusive,omitempty"` + StartPostfixValue []string `protobuf:"bytes,22,rep,name=start_postfix_value,json=startPostfixValue" json:"start_postfix_value,omitempty"` + EndPostfixValue []string `protobuf:"bytes,23,rep,name=end_postfix_value,json=endPostfixValue" json:"end_postfix_value,omitempty"` + EndUnappliedLogTimestampUs *int64 `protobuf:"varint,19,opt,name=end_unapplied_log_timestamp_us,json=endUnappliedLogTimestampUs" json:"end_unapplied_log_timestamp_us,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CompiledQuery_PrimaryScan) Reset() { *m = CompiledQuery_PrimaryScan{} } +func (m *CompiledQuery_PrimaryScan) String() string { return proto.CompactTextString(m) } +func (*CompiledQuery_PrimaryScan) ProtoMessage() {} +func (*CompiledQuery_PrimaryScan) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{16, 0} +} +func (m *CompiledQuery_PrimaryScan) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CompiledQuery_PrimaryScan.Unmarshal(m, b) +} +func (m *CompiledQuery_PrimaryScan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CompiledQuery_PrimaryScan.Marshal(b, m, deterministic) +} +func (dst *CompiledQuery_PrimaryScan) XXX_Merge(src proto.Message) { + xxx_messageInfo_CompiledQuery_PrimaryScan.Merge(dst, src) +} +func (m *CompiledQuery_PrimaryScan) XXX_Size() int { + return xxx_messageInfo_CompiledQuery_PrimaryScan.Size(m) +} +func (m *CompiledQuery_PrimaryScan) XXX_DiscardUnknown() { + xxx_messageInfo_CompiledQuery_PrimaryScan.DiscardUnknown(m) +} + +var xxx_messageInfo_CompiledQuery_PrimaryScan proto.InternalMessageInfo + +func (m *CompiledQuery_PrimaryScan) GetIndexName() string { + if m != nil && m.IndexName != nil { + return *m.IndexName + } + return "" +} + +func (m *CompiledQuery_PrimaryScan) GetStartKey() string { + if m != nil && m.StartKey != nil { + return *m.StartKey + } + return "" +} + +func (m *CompiledQuery_PrimaryScan) GetStartInclusive() bool { + if m != nil && m.StartInclusive != nil { + return *m.StartInclusive + } + return false +} + +func (m *CompiledQuery_PrimaryScan) GetEndKey() string { + if m != nil && m.EndKey != nil { + return *m.EndKey + } + return "" +} + +func (m *CompiledQuery_PrimaryScan) GetEndInclusive() bool { + if m != nil && m.EndInclusive != nil { + return *m.EndInclusive + } + return false +} + +func (m *CompiledQuery_PrimaryScan) GetStartPostfixValue() []string { + if m != nil { + return m.StartPostfixValue + } + return nil +} + +func (m *CompiledQuery_PrimaryScan) GetEndPostfixValue() []string { + if m != nil { + return m.EndPostfixValue + } + return nil +} + +func (m *CompiledQuery_PrimaryScan) GetEndUnappliedLogTimestampUs() int64 { + if m != nil && m.EndUnappliedLogTimestampUs != nil { + return *m.EndUnappliedLogTimestampUs + } + return 0 +} + +type CompiledQuery_MergeJoinScan struct { + IndexName *string `protobuf:"bytes,8,req,name=index_name,json=indexName" json:"index_name,omitempty"` + PrefixValue []string `protobuf:"bytes,9,rep,name=prefix_value,json=prefixValue" json:"prefix_value,omitempty"` + ValuePrefix *bool `protobuf:"varint,20,opt,name=value_prefix,json=valuePrefix,def=0" json:"value_prefix,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CompiledQuery_MergeJoinScan) Reset() { *m = CompiledQuery_MergeJoinScan{} } +func (m *CompiledQuery_MergeJoinScan) String() string { return proto.CompactTextString(m) } +func (*CompiledQuery_MergeJoinScan) ProtoMessage() {} +func (*CompiledQuery_MergeJoinScan) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{16, 1} +} +func (m *CompiledQuery_MergeJoinScan) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CompiledQuery_MergeJoinScan.Unmarshal(m, b) +} +func (m *CompiledQuery_MergeJoinScan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CompiledQuery_MergeJoinScan.Marshal(b, m, deterministic) +} +func (dst *CompiledQuery_MergeJoinScan) XXX_Merge(src proto.Message) { + xxx_messageInfo_CompiledQuery_MergeJoinScan.Merge(dst, src) +} +func (m *CompiledQuery_MergeJoinScan) XXX_Size() int { + return xxx_messageInfo_CompiledQuery_MergeJoinScan.Size(m) +} +func (m *CompiledQuery_MergeJoinScan) XXX_DiscardUnknown() { + xxx_messageInfo_CompiledQuery_MergeJoinScan.DiscardUnknown(m) +} + +var xxx_messageInfo_CompiledQuery_MergeJoinScan proto.InternalMessageInfo + +const Default_CompiledQuery_MergeJoinScan_ValuePrefix bool = false + +func (m *CompiledQuery_MergeJoinScan) GetIndexName() string { + if m != nil && m.IndexName != nil { + return *m.IndexName + } + return "" +} + +func (m *CompiledQuery_MergeJoinScan) GetPrefixValue() []string { + if m != nil { + return m.PrefixValue + } + return nil +} + +func (m *CompiledQuery_MergeJoinScan) GetValuePrefix() bool { + if m != nil && m.ValuePrefix != nil { + return *m.ValuePrefix + } + return Default_CompiledQuery_MergeJoinScan_ValuePrefix +} + +type CompiledQuery_EntityFilter struct { + Distinct *bool `protobuf:"varint,14,opt,name=distinct,def=0" json:"distinct,omitempty"` + Kind *string `protobuf:"bytes,17,opt,name=kind" json:"kind,omitempty"` + Ancestor *Reference `protobuf:"bytes,18,opt,name=ancestor" json:"ancestor,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CompiledQuery_EntityFilter) Reset() { *m = CompiledQuery_EntityFilter{} } +func (m *CompiledQuery_EntityFilter) String() string { return proto.CompactTextString(m) } +func (*CompiledQuery_EntityFilter) ProtoMessage() {} +func (*CompiledQuery_EntityFilter) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{16, 2} +} +func (m *CompiledQuery_EntityFilter) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CompiledQuery_EntityFilter.Unmarshal(m, b) +} +func (m *CompiledQuery_EntityFilter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CompiledQuery_EntityFilter.Marshal(b, m, deterministic) +} +func (dst *CompiledQuery_EntityFilter) XXX_Merge(src proto.Message) { + xxx_messageInfo_CompiledQuery_EntityFilter.Merge(dst, src) +} +func (m *CompiledQuery_EntityFilter) XXX_Size() int { + return xxx_messageInfo_CompiledQuery_EntityFilter.Size(m) +} +func (m *CompiledQuery_EntityFilter) XXX_DiscardUnknown() { + xxx_messageInfo_CompiledQuery_EntityFilter.DiscardUnknown(m) +} + +var xxx_messageInfo_CompiledQuery_EntityFilter proto.InternalMessageInfo + +const Default_CompiledQuery_EntityFilter_Distinct bool = false + +func (m *CompiledQuery_EntityFilter) GetDistinct() bool { + if m != nil && m.Distinct != nil { + return *m.Distinct + } + return Default_CompiledQuery_EntityFilter_Distinct +} + +func (m *CompiledQuery_EntityFilter) GetKind() string { + if m != nil && m.Kind != nil { + return *m.Kind + } + return "" +} + +func (m *CompiledQuery_EntityFilter) GetAncestor() *Reference { + if m != nil { + return m.Ancestor + } + return nil +} + +type CompiledCursor struct { + Position *CompiledCursor_Position `protobuf:"group,2,opt,name=Position,json=position" json:"position,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CompiledCursor) Reset() { *m = CompiledCursor{} } +func (m *CompiledCursor) String() string { return proto.CompactTextString(m) } +func (*CompiledCursor) ProtoMessage() {} +func (*CompiledCursor) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{17} +} +func (m *CompiledCursor) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CompiledCursor.Unmarshal(m, b) +} +func (m *CompiledCursor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CompiledCursor.Marshal(b, m, deterministic) +} +func (dst *CompiledCursor) XXX_Merge(src proto.Message) { + xxx_messageInfo_CompiledCursor.Merge(dst, src) +} +func (m *CompiledCursor) XXX_Size() int { + return xxx_messageInfo_CompiledCursor.Size(m) +} +func (m *CompiledCursor) XXX_DiscardUnknown() { + xxx_messageInfo_CompiledCursor.DiscardUnknown(m) +} + +var xxx_messageInfo_CompiledCursor proto.InternalMessageInfo + +func (m *CompiledCursor) GetPosition() *CompiledCursor_Position { + if m != nil { + return m.Position + } + return nil +} + +type CompiledCursor_Position struct { + StartKey *string `protobuf:"bytes,27,opt,name=start_key,json=startKey" json:"start_key,omitempty"` + Indexvalue []*CompiledCursor_Position_IndexValue `protobuf:"group,29,rep,name=IndexValue,json=indexvalue" json:"indexvalue,omitempty"` + Key *Reference `protobuf:"bytes,32,opt,name=key" json:"key,omitempty"` + StartInclusive *bool `protobuf:"varint,28,opt,name=start_inclusive,json=startInclusive,def=1" json:"start_inclusive,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CompiledCursor_Position) Reset() { *m = CompiledCursor_Position{} } +func (m *CompiledCursor_Position) String() string { return proto.CompactTextString(m) } +func (*CompiledCursor_Position) ProtoMessage() {} +func (*CompiledCursor_Position) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{17, 0} +} +func (m *CompiledCursor_Position) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CompiledCursor_Position.Unmarshal(m, b) +} +func (m *CompiledCursor_Position) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CompiledCursor_Position.Marshal(b, m, deterministic) +} +func (dst *CompiledCursor_Position) XXX_Merge(src proto.Message) { + xxx_messageInfo_CompiledCursor_Position.Merge(dst, src) +} +func (m *CompiledCursor_Position) XXX_Size() int { + return xxx_messageInfo_CompiledCursor_Position.Size(m) +} +func (m *CompiledCursor_Position) XXX_DiscardUnknown() { + xxx_messageInfo_CompiledCursor_Position.DiscardUnknown(m) +} + +var xxx_messageInfo_CompiledCursor_Position proto.InternalMessageInfo + +const Default_CompiledCursor_Position_StartInclusive bool = true + +func (m *CompiledCursor_Position) GetStartKey() string { + if m != nil && m.StartKey != nil { + return *m.StartKey + } + return "" +} + +func (m *CompiledCursor_Position) GetIndexvalue() []*CompiledCursor_Position_IndexValue { + if m != nil { + return m.Indexvalue + } + return nil +} + +func (m *CompiledCursor_Position) GetKey() *Reference { + if m != nil { + return m.Key + } + return nil +} + +func (m *CompiledCursor_Position) GetStartInclusive() bool { + if m != nil && m.StartInclusive != nil { + return *m.StartInclusive + } + return Default_CompiledCursor_Position_StartInclusive +} + +type CompiledCursor_Position_IndexValue struct { + Property *string `protobuf:"bytes,30,opt,name=property" json:"property,omitempty"` + Value *PropertyValue `protobuf:"bytes,31,req,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CompiledCursor_Position_IndexValue) Reset() { *m = CompiledCursor_Position_IndexValue{} } +func (m *CompiledCursor_Position_IndexValue) String() string { return proto.CompactTextString(m) } +func (*CompiledCursor_Position_IndexValue) ProtoMessage() {} +func (*CompiledCursor_Position_IndexValue) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{17, 0, 0} +} +func (m *CompiledCursor_Position_IndexValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CompiledCursor_Position_IndexValue.Unmarshal(m, b) +} +func (m *CompiledCursor_Position_IndexValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CompiledCursor_Position_IndexValue.Marshal(b, m, deterministic) +} +func (dst *CompiledCursor_Position_IndexValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_CompiledCursor_Position_IndexValue.Merge(dst, src) +} +func (m *CompiledCursor_Position_IndexValue) XXX_Size() int { + return xxx_messageInfo_CompiledCursor_Position_IndexValue.Size(m) +} +func (m *CompiledCursor_Position_IndexValue) XXX_DiscardUnknown() { + xxx_messageInfo_CompiledCursor_Position_IndexValue.DiscardUnknown(m) +} + +var xxx_messageInfo_CompiledCursor_Position_IndexValue proto.InternalMessageInfo + +func (m *CompiledCursor_Position_IndexValue) GetProperty() string { + if m != nil && m.Property != nil { + return *m.Property + } + return "" +} + +func (m *CompiledCursor_Position_IndexValue) GetValue() *PropertyValue { + if m != nil { + return m.Value + } + return nil +} + +type Cursor struct { + Cursor *uint64 `protobuf:"fixed64,1,req,name=cursor" json:"cursor,omitempty"` + App *string `protobuf:"bytes,2,opt,name=app" json:"app,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Cursor) Reset() { *m = Cursor{} } +func (m *Cursor) String() string { return proto.CompactTextString(m) } +func (*Cursor) ProtoMessage() {} +func (*Cursor) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{18} +} +func (m *Cursor) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Cursor.Unmarshal(m, b) +} +func (m *Cursor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Cursor.Marshal(b, m, deterministic) +} +func (dst *Cursor) XXX_Merge(src proto.Message) { + xxx_messageInfo_Cursor.Merge(dst, src) +} +func (m *Cursor) XXX_Size() int { + return xxx_messageInfo_Cursor.Size(m) +} +func (m *Cursor) XXX_DiscardUnknown() { + xxx_messageInfo_Cursor.DiscardUnknown(m) +} + +var xxx_messageInfo_Cursor proto.InternalMessageInfo + +func (m *Cursor) GetCursor() uint64 { + if m != nil && m.Cursor != nil { + return *m.Cursor + } + return 0 +} + +func (m *Cursor) GetApp() string { + if m != nil && m.App != nil { + return *m.App + } + return "" +} + +type Error struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Error) Reset() { *m = Error{} } +func (m *Error) String() string { return proto.CompactTextString(m) } +func (*Error) ProtoMessage() {} +func (*Error) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{19} +} +func (m *Error) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Error.Unmarshal(m, b) +} +func (m *Error) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Error.Marshal(b, m, deterministic) +} +func (dst *Error) XXX_Merge(src proto.Message) { + xxx_messageInfo_Error.Merge(dst, src) +} +func (m *Error) XXX_Size() int { + return xxx_messageInfo_Error.Size(m) +} +func (m *Error) XXX_DiscardUnknown() { + xxx_messageInfo_Error.DiscardUnknown(m) +} + +var xxx_messageInfo_Error proto.InternalMessageInfo + +type Cost struct { + IndexWrites *int32 `protobuf:"varint,1,opt,name=index_writes,json=indexWrites" json:"index_writes,omitempty"` + IndexWriteBytes *int32 `protobuf:"varint,2,opt,name=index_write_bytes,json=indexWriteBytes" json:"index_write_bytes,omitempty"` + EntityWrites *int32 `protobuf:"varint,3,opt,name=entity_writes,json=entityWrites" json:"entity_writes,omitempty"` + EntityWriteBytes *int32 `protobuf:"varint,4,opt,name=entity_write_bytes,json=entityWriteBytes" json:"entity_write_bytes,omitempty"` + Commitcost *Cost_CommitCost `protobuf:"group,5,opt,name=CommitCost,json=commitcost" json:"commitcost,omitempty"` + ApproximateStorageDelta *int32 `protobuf:"varint,8,opt,name=approximate_storage_delta,json=approximateStorageDelta" json:"approximate_storage_delta,omitempty"` + IdSequenceUpdates *int32 `protobuf:"varint,9,opt,name=id_sequence_updates,json=idSequenceUpdates" json:"id_sequence_updates,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Cost) Reset() { *m = Cost{} } +func (m *Cost) String() string { return proto.CompactTextString(m) } +func (*Cost) ProtoMessage() {} +func (*Cost) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{20} +} +func (m *Cost) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Cost.Unmarshal(m, b) +} +func (m *Cost) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Cost.Marshal(b, m, deterministic) +} +func (dst *Cost) XXX_Merge(src proto.Message) { + xxx_messageInfo_Cost.Merge(dst, src) +} +func (m *Cost) XXX_Size() int { + return xxx_messageInfo_Cost.Size(m) +} +func (m *Cost) XXX_DiscardUnknown() { + xxx_messageInfo_Cost.DiscardUnknown(m) +} + +var xxx_messageInfo_Cost proto.InternalMessageInfo + +func (m *Cost) GetIndexWrites() int32 { + if m != nil && m.IndexWrites != nil { + return *m.IndexWrites + } + return 0 +} + +func (m *Cost) GetIndexWriteBytes() int32 { + if m != nil && m.IndexWriteBytes != nil { + return *m.IndexWriteBytes + } + return 0 +} + +func (m *Cost) GetEntityWrites() int32 { + if m != nil && m.EntityWrites != nil { + return *m.EntityWrites + } + return 0 +} + +func (m *Cost) GetEntityWriteBytes() int32 { + if m != nil && m.EntityWriteBytes != nil { + return *m.EntityWriteBytes + } + return 0 +} + +func (m *Cost) GetCommitcost() *Cost_CommitCost { + if m != nil { + return m.Commitcost + } + return nil +} + +func (m *Cost) GetApproximateStorageDelta() int32 { + if m != nil && m.ApproximateStorageDelta != nil { + return *m.ApproximateStorageDelta + } + return 0 +} + +func (m *Cost) GetIdSequenceUpdates() int32 { + if m != nil && m.IdSequenceUpdates != nil { + return *m.IdSequenceUpdates + } + return 0 +} + +type Cost_CommitCost struct { + RequestedEntityPuts *int32 `protobuf:"varint,6,opt,name=requested_entity_puts,json=requestedEntityPuts" json:"requested_entity_puts,omitempty"` + RequestedEntityDeletes *int32 `protobuf:"varint,7,opt,name=requested_entity_deletes,json=requestedEntityDeletes" json:"requested_entity_deletes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Cost_CommitCost) Reset() { *m = Cost_CommitCost{} } +func (m *Cost_CommitCost) String() string { return proto.CompactTextString(m) } +func (*Cost_CommitCost) ProtoMessage() {} +func (*Cost_CommitCost) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{20, 0} +} +func (m *Cost_CommitCost) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Cost_CommitCost.Unmarshal(m, b) +} +func (m *Cost_CommitCost) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Cost_CommitCost.Marshal(b, m, deterministic) +} +func (dst *Cost_CommitCost) XXX_Merge(src proto.Message) { + xxx_messageInfo_Cost_CommitCost.Merge(dst, src) +} +func (m *Cost_CommitCost) XXX_Size() int { + return xxx_messageInfo_Cost_CommitCost.Size(m) +} +func (m *Cost_CommitCost) XXX_DiscardUnknown() { + xxx_messageInfo_Cost_CommitCost.DiscardUnknown(m) +} + +var xxx_messageInfo_Cost_CommitCost proto.InternalMessageInfo + +func (m *Cost_CommitCost) GetRequestedEntityPuts() int32 { + if m != nil && m.RequestedEntityPuts != nil { + return *m.RequestedEntityPuts + } + return 0 +} + +func (m *Cost_CommitCost) GetRequestedEntityDeletes() int32 { + if m != nil && m.RequestedEntityDeletes != nil { + return *m.RequestedEntityDeletes + } + return 0 +} + +type GetRequest struct { + Header *InternalHeader `protobuf:"bytes,6,opt,name=header" json:"header,omitempty"` + Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"` + Transaction *Transaction `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"` + FailoverMs *int64 `protobuf:"varint,3,opt,name=failover_ms,json=failoverMs" json:"failover_ms,omitempty"` + Strong *bool `protobuf:"varint,4,opt,name=strong" json:"strong,omitempty"` + AllowDeferred *bool `protobuf:"varint,5,opt,name=allow_deferred,json=allowDeferred,def=0" json:"allow_deferred,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetRequest) Reset() { *m = GetRequest{} } +func (m *GetRequest) String() string { return proto.CompactTextString(m) } +func (*GetRequest) ProtoMessage() {} +func (*GetRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{21} +} +func (m *GetRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetRequest.Unmarshal(m, b) +} +func (m *GetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetRequest.Marshal(b, m, deterministic) +} +func (dst *GetRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetRequest.Merge(dst, src) +} +func (m *GetRequest) XXX_Size() int { + return xxx_messageInfo_GetRequest.Size(m) +} +func (m *GetRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetRequest proto.InternalMessageInfo + +const Default_GetRequest_AllowDeferred bool = false + +func (m *GetRequest) GetHeader() *InternalHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *GetRequest) GetKey() []*Reference { + if m != nil { + return m.Key + } + return nil +} + +func (m *GetRequest) GetTransaction() *Transaction { + if m != nil { + return m.Transaction + } + return nil +} + +func (m *GetRequest) GetFailoverMs() int64 { + if m != nil && m.FailoverMs != nil { + return *m.FailoverMs + } + return 0 +} + +func (m *GetRequest) GetStrong() bool { + if m != nil && m.Strong != nil { + return *m.Strong + } + return false +} + +func (m *GetRequest) GetAllowDeferred() bool { + if m != nil && m.AllowDeferred != nil { + return *m.AllowDeferred + } + return Default_GetRequest_AllowDeferred +} + +type GetResponse struct { + Entity []*GetResponse_Entity `protobuf:"group,1,rep,name=Entity,json=entity" json:"entity,omitempty"` + Deferred []*Reference `protobuf:"bytes,5,rep,name=deferred" json:"deferred,omitempty"` + InOrder *bool `protobuf:"varint,6,opt,name=in_order,json=inOrder,def=1" json:"in_order,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetResponse) Reset() { *m = GetResponse{} } +func (m *GetResponse) String() string { return proto.CompactTextString(m) } +func (*GetResponse) ProtoMessage() {} +func (*GetResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{22} +} +func (m *GetResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetResponse.Unmarshal(m, b) +} +func (m *GetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetResponse.Marshal(b, m, deterministic) +} +func (dst *GetResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetResponse.Merge(dst, src) +} +func (m *GetResponse) XXX_Size() int { + return xxx_messageInfo_GetResponse.Size(m) +} +func (m *GetResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetResponse proto.InternalMessageInfo + +const Default_GetResponse_InOrder bool = true + +func (m *GetResponse) GetEntity() []*GetResponse_Entity { + if m != nil { + return m.Entity + } + return nil +} + +func (m *GetResponse) GetDeferred() []*Reference { + if m != nil { + return m.Deferred + } + return nil +} + +func (m *GetResponse) GetInOrder() bool { + if m != nil && m.InOrder != nil { + return *m.InOrder + } + return Default_GetResponse_InOrder +} + +type GetResponse_Entity struct { + Entity *EntityProto `protobuf:"bytes,2,opt,name=entity" json:"entity,omitempty"` + Key *Reference `protobuf:"bytes,4,opt,name=key" json:"key,omitempty"` + Version *int64 `protobuf:"varint,3,opt,name=version" json:"version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetResponse_Entity) Reset() { *m = GetResponse_Entity{} } +func (m *GetResponse_Entity) String() string { return proto.CompactTextString(m) } +func (*GetResponse_Entity) ProtoMessage() {} +func (*GetResponse_Entity) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{22, 0} +} +func (m *GetResponse_Entity) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetResponse_Entity.Unmarshal(m, b) +} +func (m *GetResponse_Entity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetResponse_Entity.Marshal(b, m, deterministic) +} +func (dst *GetResponse_Entity) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetResponse_Entity.Merge(dst, src) +} +func (m *GetResponse_Entity) XXX_Size() int { + return xxx_messageInfo_GetResponse_Entity.Size(m) +} +func (m *GetResponse_Entity) XXX_DiscardUnknown() { + xxx_messageInfo_GetResponse_Entity.DiscardUnknown(m) +} + +var xxx_messageInfo_GetResponse_Entity proto.InternalMessageInfo + +func (m *GetResponse_Entity) GetEntity() *EntityProto { + if m != nil { + return m.Entity + } + return nil +} + +func (m *GetResponse_Entity) GetKey() *Reference { + if m != nil { + return m.Key + } + return nil +} + +func (m *GetResponse_Entity) GetVersion() int64 { + if m != nil && m.Version != nil { + return *m.Version + } + return 0 +} + +type PutRequest struct { + Header *InternalHeader `protobuf:"bytes,11,opt,name=header" json:"header,omitempty"` + Entity []*EntityProto `protobuf:"bytes,1,rep,name=entity" json:"entity,omitempty"` + Transaction *Transaction `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"` + CompositeIndex []*CompositeIndex `protobuf:"bytes,3,rep,name=composite_index,json=compositeIndex" json:"composite_index,omitempty"` + Trusted *bool `protobuf:"varint,4,opt,name=trusted,def=0" json:"trusted,omitempty"` + Force *bool `protobuf:"varint,7,opt,name=force,def=0" json:"force,omitempty"` + MarkChanges *bool `protobuf:"varint,8,opt,name=mark_changes,json=markChanges,def=0" json:"mark_changes,omitempty"` + Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"` + AutoIdPolicy *PutRequest_AutoIdPolicy `protobuf:"varint,10,opt,name=auto_id_policy,json=autoIdPolicy,enum=appengine.PutRequest_AutoIdPolicy,def=0" json:"auto_id_policy,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PutRequest) Reset() { *m = PutRequest{} } +func (m *PutRequest) String() string { return proto.CompactTextString(m) } +func (*PutRequest) ProtoMessage() {} +func (*PutRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{23} +} +func (m *PutRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PutRequest.Unmarshal(m, b) +} +func (m *PutRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PutRequest.Marshal(b, m, deterministic) +} +func (dst *PutRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_PutRequest.Merge(dst, src) +} +func (m *PutRequest) XXX_Size() int { + return xxx_messageInfo_PutRequest.Size(m) +} +func (m *PutRequest) XXX_DiscardUnknown() { + xxx_messageInfo_PutRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_PutRequest proto.InternalMessageInfo + +const Default_PutRequest_Trusted bool = false +const Default_PutRequest_Force bool = false +const Default_PutRequest_MarkChanges bool = false +const Default_PutRequest_AutoIdPolicy PutRequest_AutoIdPolicy = PutRequest_CURRENT + +func (m *PutRequest) GetHeader() *InternalHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *PutRequest) GetEntity() []*EntityProto { + if m != nil { + return m.Entity + } + return nil +} + +func (m *PutRequest) GetTransaction() *Transaction { + if m != nil { + return m.Transaction + } + return nil +} + +func (m *PutRequest) GetCompositeIndex() []*CompositeIndex { + if m != nil { + return m.CompositeIndex + } + return nil +} + +func (m *PutRequest) GetTrusted() bool { + if m != nil && m.Trusted != nil { + return *m.Trusted + } + return Default_PutRequest_Trusted +} + +func (m *PutRequest) GetForce() bool { + if m != nil && m.Force != nil { + return *m.Force + } + return Default_PutRequest_Force +} + +func (m *PutRequest) GetMarkChanges() bool { + if m != nil && m.MarkChanges != nil { + return *m.MarkChanges + } + return Default_PutRequest_MarkChanges +} + +func (m *PutRequest) GetSnapshot() []*Snapshot { + if m != nil { + return m.Snapshot + } + return nil +} + +func (m *PutRequest) GetAutoIdPolicy() PutRequest_AutoIdPolicy { + if m != nil && m.AutoIdPolicy != nil { + return *m.AutoIdPolicy + } + return Default_PutRequest_AutoIdPolicy +} + +type PutResponse struct { + Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"` + Cost *Cost `protobuf:"bytes,2,opt,name=cost" json:"cost,omitempty"` + Version []int64 `protobuf:"varint,3,rep,name=version" json:"version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PutResponse) Reset() { *m = PutResponse{} } +func (m *PutResponse) String() string { return proto.CompactTextString(m) } +func (*PutResponse) ProtoMessage() {} +func (*PutResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{24} +} +func (m *PutResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PutResponse.Unmarshal(m, b) +} +func (m *PutResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PutResponse.Marshal(b, m, deterministic) +} +func (dst *PutResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_PutResponse.Merge(dst, src) +} +func (m *PutResponse) XXX_Size() int { + return xxx_messageInfo_PutResponse.Size(m) +} +func (m *PutResponse) XXX_DiscardUnknown() { + xxx_messageInfo_PutResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_PutResponse proto.InternalMessageInfo + +func (m *PutResponse) GetKey() []*Reference { + if m != nil { + return m.Key + } + return nil +} + +func (m *PutResponse) GetCost() *Cost { + if m != nil { + return m.Cost + } + return nil +} + +func (m *PutResponse) GetVersion() []int64 { + if m != nil { + return m.Version + } + return nil +} + +type TouchRequest struct { + Header *InternalHeader `protobuf:"bytes,10,opt,name=header" json:"header,omitempty"` + Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"` + CompositeIndex []*CompositeIndex `protobuf:"bytes,2,rep,name=composite_index,json=compositeIndex" json:"composite_index,omitempty"` + Force *bool `protobuf:"varint,3,opt,name=force,def=0" json:"force,omitempty"` + Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TouchRequest) Reset() { *m = TouchRequest{} } +func (m *TouchRequest) String() string { return proto.CompactTextString(m) } +func (*TouchRequest) ProtoMessage() {} +func (*TouchRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{25} +} +func (m *TouchRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TouchRequest.Unmarshal(m, b) +} +func (m *TouchRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TouchRequest.Marshal(b, m, deterministic) +} +func (dst *TouchRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_TouchRequest.Merge(dst, src) +} +func (m *TouchRequest) XXX_Size() int { + return xxx_messageInfo_TouchRequest.Size(m) +} +func (m *TouchRequest) XXX_DiscardUnknown() { + xxx_messageInfo_TouchRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_TouchRequest proto.InternalMessageInfo + +const Default_TouchRequest_Force bool = false + +func (m *TouchRequest) GetHeader() *InternalHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *TouchRequest) GetKey() []*Reference { + if m != nil { + return m.Key + } + return nil +} + +func (m *TouchRequest) GetCompositeIndex() []*CompositeIndex { + if m != nil { + return m.CompositeIndex + } + return nil +} + +func (m *TouchRequest) GetForce() bool { + if m != nil && m.Force != nil { + return *m.Force + } + return Default_TouchRequest_Force +} + +func (m *TouchRequest) GetSnapshot() []*Snapshot { + if m != nil { + return m.Snapshot + } + return nil +} + +type TouchResponse struct { + Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TouchResponse) Reset() { *m = TouchResponse{} } +func (m *TouchResponse) String() string { return proto.CompactTextString(m) } +func (*TouchResponse) ProtoMessage() {} +func (*TouchResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{26} +} +func (m *TouchResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TouchResponse.Unmarshal(m, b) +} +func (m *TouchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TouchResponse.Marshal(b, m, deterministic) +} +func (dst *TouchResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_TouchResponse.Merge(dst, src) +} +func (m *TouchResponse) XXX_Size() int { + return xxx_messageInfo_TouchResponse.Size(m) +} +func (m *TouchResponse) XXX_DiscardUnknown() { + xxx_messageInfo_TouchResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_TouchResponse proto.InternalMessageInfo + +func (m *TouchResponse) GetCost() *Cost { + if m != nil { + return m.Cost + } + return nil +} + +type DeleteRequest struct { + Header *InternalHeader `protobuf:"bytes,10,opt,name=header" json:"header,omitempty"` + Key []*Reference `protobuf:"bytes,6,rep,name=key" json:"key,omitempty"` + Transaction *Transaction `protobuf:"bytes,5,opt,name=transaction" json:"transaction,omitempty"` + Trusted *bool `protobuf:"varint,4,opt,name=trusted,def=0" json:"trusted,omitempty"` + Force *bool `protobuf:"varint,7,opt,name=force,def=0" json:"force,omitempty"` + MarkChanges *bool `protobuf:"varint,8,opt,name=mark_changes,json=markChanges,def=0" json:"mark_changes,omitempty"` + Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteRequest) Reset() { *m = DeleteRequest{} } +func (m *DeleteRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteRequest) ProtoMessage() {} +func (*DeleteRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{27} +} +func (m *DeleteRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteRequest.Unmarshal(m, b) +} +func (m *DeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteRequest.Marshal(b, m, deterministic) +} +func (dst *DeleteRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteRequest.Merge(dst, src) +} +func (m *DeleteRequest) XXX_Size() int { + return xxx_messageInfo_DeleteRequest.Size(m) +} +func (m *DeleteRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteRequest proto.InternalMessageInfo + +const Default_DeleteRequest_Trusted bool = false +const Default_DeleteRequest_Force bool = false +const Default_DeleteRequest_MarkChanges bool = false + +func (m *DeleteRequest) GetHeader() *InternalHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *DeleteRequest) GetKey() []*Reference { + if m != nil { + return m.Key + } + return nil +} + +func (m *DeleteRequest) GetTransaction() *Transaction { + if m != nil { + return m.Transaction + } + return nil +} + +func (m *DeleteRequest) GetTrusted() bool { + if m != nil && m.Trusted != nil { + return *m.Trusted + } + return Default_DeleteRequest_Trusted +} + +func (m *DeleteRequest) GetForce() bool { + if m != nil && m.Force != nil { + return *m.Force + } + return Default_DeleteRequest_Force +} + +func (m *DeleteRequest) GetMarkChanges() bool { + if m != nil && m.MarkChanges != nil { + return *m.MarkChanges + } + return Default_DeleteRequest_MarkChanges +} + +func (m *DeleteRequest) GetSnapshot() []*Snapshot { + if m != nil { + return m.Snapshot + } + return nil +} + +type DeleteResponse struct { + Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"` + Version []int64 `protobuf:"varint,3,rep,name=version" json:"version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteResponse) Reset() { *m = DeleteResponse{} } +func (m *DeleteResponse) String() string { return proto.CompactTextString(m) } +func (*DeleteResponse) ProtoMessage() {} +func (*DeleteResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{28} +} +func (m *DeleteResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteResponse.Unmarshal(m, b) +} +func (m *DeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteResponse.Marshal(b, m, deterministic) +} +func (dst *DeleteResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteResponse.Merge(dst, src) +} +func (m *DeleteResponse) XXX_Size() int { + return xxx_messageInfo_DeleteResponse.Size(m) +} +func (m *DeleteResponse) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteResponse proto.InternalMessageInfo + +func (m *DeleteResponse) GetCost() *Cost { + if m != nil { + return m.Cost + } + return nil +} + +func (m *DeleteResponse) GetVersion() []int64 { + if m != nil { + return m.Version + } + return nil +} + +type NextRequest struct { + Header *InternalHeader `protobuf:"bytes,5,opt,name=header" json:"header,omitempty"` + Cursor *Cursor `protobuf:"bytes,1,req,name=cursor" json:"cursor,omitempty"` + Count *int32 `protobuf:"varint,2,opt,name=count" json:"count,omitempty"` + Offset *int32 `protobuf:"varint,4,opt,name=offset,def=0" json:"offset,omitempty"` + Compile *bool `protobuf:"varint,3,opt,name=compile,def=0" json:"compile,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NextRequest) Reset() { *m = NextRequest{} } +func (m *NextRequest) String() string { return proto.CompactTextString(m) } +func (*NextRequest) ProtoMessage() {} +func (*NextRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{29} +} +func (m *NextRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NextRequest.Unmarshal(m, b) +} +func (m *NextRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NextRequest.Marshal(b, m, deterministic) +} +func (dst *NextRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_NextRequest.Merge(dst, src) +} +func (m *NextRequest) XXX_Size() int { + return xxx_messageInfo_NextRequest.Size(m) +} +func (m *NextRequest) XXX_DiscardUnknown() { + xxx_messageInfo_NextRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_NextRequest proto.InternalMessageInfo + +const Default_NextRequest_Offset int32 = 0 +const Default_NextRequest_Compile bool = false + +func (m *NextRequest) GetHeader() *InternalHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *NextRequest) GetCursor() *Cursor { + if m != nil { + return m.Cursor + } + return nil +} + +func (m *NextRequest) GetCount() int32 { + if m != nil && m.Count != nil { + return *m.Count + } + return 0 +} + +func (m *NextRequest) GetOffset() int32 { + if m != nil && m.Offset != nil { + return *m.Offset + } + return Default_NextRequest_Offset +} + +func (m *NextRequest) GetCompile() bool { + if m != nil && m.Compile != nil { + return *m.Compile + } + return Default_NextRequest_Compile +} + +type QueryResult struct { + Cursor *Cursor `protobuf:"bytes,1,opt,name=cursor" json:"cursor,omitempty"` + Result []*EntityProto `protobuf:"bytes,2,rep,name=result" json:"result,omitempty"` + SkippedResults *int32 `protobuf:"varint,7,opt,name=skipped_results,json=skippedResults" json:"skipped_results,omitempty"` + MoreResults *bool `protobuf:"varint,3,req,name=more_results,json=moreResults" json:"more_results,omitempty"` + KeysOnly *bool `protobuf:"varint,4,opt,name=keys_only,json=keysOnly" json:"keys_only,omitempty"` + IndexOnly *bool `protobuf:"varint,9,opt,name=index_only,json=indexOnly" json:"index_only,omitempty"` + SmallOps *bool `protobuf:"varint,10,opt,name=small_ops,json=smallOps" json:"small_ops,omitempty"` + CompiledQuery *CompiledQuery `protobuf:"bytes,5,opt,name=compiled_query,json=compiledQuery" json:"compiled_query,omitempty"` + CompiledCursor *CompiledCursor `protobuf:"bytes,6,opt,name=compiled_cursor,json=compiledCursor" json:"compiled_cursor,omitempty"` + Index []*CompositeIndex `protobuf:"bytes,8,rep,name=index" json:"index,omitempty"` + Version []int64 `protobuf:"varint,11,rep,name=version" json:"version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *QueryResult) Reset() { *m = QueryResult{} } +func (m *QueryResult) String() string { return proto.CompactTextString(m) } +func (*QueryResult) ProtoMessage() {} +func (*QueryResult) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{30} +} +func (m *QueryResult) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_QueryResult.Unmarshal(m, b) +} +func (m *QueryResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_QueryResult.Marshal(b, m, deterministic) +} +func (dst *QueryResult) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryResult.Merge(dst, src) +} +func (m *QueryResult) XXX_Size() int { + return xxx_messageInfo_QueryResult.Size(m) +} +func (m *QueryResult) XXX_DiscardUnknown() { + xxx_messageInfo_QueryResult.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryResult proto.InternalMessageInfo + +func (m *QueryResult) GetCursor() *Cursor { + if m != nil { + return m.Cursor + } + return nil +} + +func (m *QueryResult) GetResult() []*EntityProto { + if m != nil { + return m.Result + } + return nil +} + +func (m *QueryResult) GetSkippedResults() int32 { + if m != nil && m.SkippedResults != nil { + return *m.SkippedResults + } + return 0 +} + +func (m *QueryResult) GetMoreResults() bool { + if m != nil && m.MoreResults != nil { + return *m.MoreResults + } + return false +} + +func (m *QueryResult) GetKeysOnly() bool { + if m != nil && m.KeysOnly != nil { + return *m.KeysOnly + } + return false +} + +func (m *QueryResult) GetIndexOnly() bool { + if m != nil && m.IndexOnly != nil { + return *m.IndexOnly + } + return false +} + +func (m *QueryResult) GetSmallOps() bool { + if m != nil && m.SmallOps != nil { + return *m.SmallOps + } + return false +} + +func (m *QueryResult) GetCompiledQuery() *CompiledQuery { + if m != nil { + return m.CompiledQuery + } + return nil +} + +func (m *QueryResult) GetCompiledCursor() *CompiledCursor { + if m != nil { + return m.CompiledCursor + } + return nil +} + +func (m *QueryResult) GetIndex() []*CompositeIndex { + if m != nil { + return m.Index + } + return nil +} + +func (m *QueryResult) GetVersion() []int64 { + if m != nil { + return m.Version + } + return nil +} + +type AllocateIdsRequest struct { + Header *InternalHeader `protobuf:"bytes,4,opt,name=header" json:"header,omitempty"` + ModelKey *Reference `protobuf:"bytes,1,opt,name=model_key,json=modelKey" json:"model_key,omitempty"` + Size *int64 `protobuf:"varint,2,opt,name=size" json:"size,omitempty"` + Max *int64 `protobuf:"varint,3,opt,name=max" json:"max,omitempty"` + Reserve []*Reference `protobuf:"bytes,5,rep,name=reserve" json:"reserve,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AllocateIdsRequest) Reset() { *m = AllocateIdsRequest{} } +func (m *AllocateIdsRequest) String() string { return proto.CompactTextString(m) } +func (*AllocateIdsRequest) ProtoMessage() {} +func (*AllocateIdsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{31} +} +func (m *AllocateIdsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AllocateIdsRequest.Unmarshal(m, b) +} +func (m *AllocateIdsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AllocateIdsRequest.Marshal(b, m, deterministic) +} +func (dst *AllocateIdsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_AllocateIdsRequest.Merge(dst, src) +} +func (m *AllocateIdsRequest) XXX_Size() int { + return xxx_messageInfo_AllocateIdsRequest.Size(m) +} +func (m *AllocateIdsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_AllocateIdsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_AllocateIdsRequest proto.InternalMessageInfo + +func (m *AllocateIdsRequest) GetHeader() *InternalHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *AllocateIdsRequest) GetModelKey() *Reference { + if m != nil { + return m.ModelKey + } + return nil +} + +func (m *AllocateIdsRequest) GetSize() int64 { + if m != nil && m.Size != nil { + return *m.Size + } + return 0 +} + +func (m *AllocateIdsRequest) GetMax() int64 { + if m != nil && m.Max != nil { + return *m.Max + } + return 0 +} + +func (m *AllocateIdsRequest) GetReserve() []*Reference { + if m != nil { + return m.Reserve + } + return nil +} + +type AllocateIdsResponse struct { + Start *int64 `protobuf:"varint,1,req,name=start" json:"start,omitempty"` + End *int64 `protobuf:"varint,2,req,name=end" json:"end,omitempty"` + Cost *Cost `protobuf:"bytes,3,opt,name=cost" json:"cost,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AllocateIdsResponse) Reset() { *m = AllocateIdsResponse{} } +func (m *AllocateIdsResponse) String() string { return proto.CompactTextString(m) } +func (*AllocateIdsResponse) ProtoMessage() {} +func (*AllocateIdsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{32} +} +func (m *AllocateIdsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AllocateIdsResponse.Unmarshal(m, b) +} +func (m *AllocateIdsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AllocateIdsResponse.Marshal(b, m, deterministic) +} +func (dst *AllocateIdsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_AllocateIdsResponse.Merge(dst, src) +} +func (m *AllocateIdsResponse) XXX_Size() int { + return xxx_messageInfo_AllocateIdsResponse.Size(m) +} +func (m *AllocateIdsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_AllocateIdsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_AllocateIdsResponse proto.InternalMessageInfo + +func (m *AllocateIdsResponse) GetStart() int64 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *AllocateIdsResponse) GetEnd() int64 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +func (m *AllocateIdsResponse) GetCost() *Cost { + if m != nil { + return m.Cost + } + return nil +} + +type CompositeIndices struct { + Index []*CompositeIndex `protobuf:"bytes,1,rep,name=index" json:"index,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CompositeIndices) Reset() { *m = CompositeIndices{} } +func (m *CompositeIndices) String() string { return proto.CompactTextString(m) } +func (*CompositeIndices) ProtoMessage() {} +func (*CompositeIndices) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{33} +} +func (m *CompositeIndices) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CompositeIndices.Unmarshal(m, b) +} +func (m *CompositeIndices) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CompositeIndices.Marshal(b, m, deterministic) +} +func (dst *CompositeIndices) XXX_Merge(src proto.Message) { + xxx_messageInfo_CompositeIndices.Merge(dst, src) +} +func (m *CompositeIndices) XXX_Size() int { + return xxx_messageInfo_CompositeIndices.Size(m) +} +func (m *CompositeIndices) XXX_DiscardUnknown() { + xxx_messageInfo_CompositeIndices.DiscardUnknown(m) +} + +var xxx_messageInfo_CompositeIndices proto.InternalMessageInfo + +func (m *CompositeIndices) GetIndex() []*CompositeIndex { + if m != nil { + return m.Index + } + return nil +} + +type AddActionsRequest struct { + Header *InternalHeader `protobuf:"bytes,3,opt,name=header" json:"header,omitempty"` + Transaction *Transaction `protobuf:"bytes,1,req,name=transaction" json:"transaction,omitempty"` + Action []*Action `protobuf:"bytes,2,rep,name=action" json:"action,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AddActionsRequest) Reset() { *m = AddActionsRequest{} } +func (m *AddActionsRequest) String() string { return proto.CompactTextString(m) } +func (*AddActionsRequest) ProtoMessage() {} +func (*AddActionsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{34} +} +func (m *AddActionsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AddActionsRequest.Unmarshal(m, b) +} +func (m *AddActionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AddActionsRequest.Marshal(b, m, deterministic) +} +func (dst *AddActionsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_AddActionsRequest.Merge(dst, src) +} +func (m *AddActionsRequest) XXX_Size() int { + return xxx_messageInfo_AddActionsRequest.Size(m) +} +func (m *AddActionsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_AddActionsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_AddActionsRequest proto.InternalMessageInfo + +func (m *AddActionsRequest) GetHeader() *InternalHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *AddActionsRequest) GetTransaction() *Transaction { + if m != nil { + return m.Transaction + } + return nil +} + +func (m *AddActionsRequest) GetAction() []*Action { + if m != nil { + return m.Action + } + return nil +} + +type AddActionsResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AddActionsResponse) Reset() { *m = AddActionsResponse{} } +func (m *AddActionsResponse) String() string { return proto.CompactTextString(m) } +func (*AddActionsResponse) ProtoMessage() {} +func (*AddActionsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{35} +} +func (m *AddActionsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AddActionsResponse.Unmarshal(m, b) +} +func (m *AddActionsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AddActionsResponse.Marshal(b, m, deterministic) +} +func (dst *AddActionsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_AddActionsResponse.Merge(dst, src) +} +func (m *AddActionsResponse) XXX_Size() int { + return xxx_messageInfo_AddActionsResponse.Size(m) +} +func (m *AddActionsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_AddActionsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_AddActionsResponse proto.InternalMessageInfo + +type BeginTransactionRequest struct { + Header *InternalHeader `protobuf:"bytes,3,opt,name=header" json:"header,omitempty"` + App *string `protobuf:"bytes,1,req,name=app" json:"app,omitempty"` + AllowMultipleEg *bool `protobuf:"varint,2,opt,name=allow_multiple_eg,json=allowMultipleEg,def=0" json:"allow_multiple_eg,omitempty"` + DatabaseId *string `protobuf:"bytes,4,opt,name=database_id,json=databaseId" json:"database_id,omitempty"` + Mode *BeginTransactionRequest_TransactionMode `protobuf:"varint,5,opt,name=mode,enum=appengine.BeginTransactionRequest_TransactionMode,def=0" json:"mode,omitempty"` + PreviousTransaction *Transaction `protobuf:"bytes,7,opt,name=previous_transaction,json=previousTransaction" json:"previous_transaction,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BeginTransactionRequest) Reset() { *m = BeginTransactionRequest{} } +func (m *BeginTransactionRequest) String() string { return proto.CompactTextString(m) } +func (*BeginTransactionRequest) ProtoMessage() {} +func (*BeginTransactionRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{36} +} +func (m *BeginTransactionRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BeginTransactionRequest.Unmarshal(m, b) +} +func (m *BeginTransactionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BeginTransactionRequest.Marshal(b, m, deterministic) +} +func (dst *BeginTransactionRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_BeginTransactionRequest.Merge(dst, src) +} +func (m *BeginTransactionRequest) XXX_Size() int { + return xxx_messageInfo_BeginTransactionRequest.Size(m) +} +func (m *BeginTransactionRequest) XXX_DiscardUnknown() { + xxx_messageInfo_BeginTransactionRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_BeginTransactionRequest proto.InternalMessageInfo + +const Default_BeginTransactionRequest_AllowMultipleEg bool = false +const Default_BeginTransactionRequest_Mode BeginTransactionRequest_TransactionMode = BeginTransactionRequest_UNKNOWN + +func (m *BeginTransactionRequest) GetHeader() *InternalHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *BeginTransactionRequest) GetApp() string { + if m != nil && m.App != nil { + return *m.App + } + return "" +} + +func (m *BeginTransactionRequest) GetAllowMultipleEg() bool { + if m != nil && m.AllowMultipleEg != nil { + return *m.AllowMultipleEg + } + return Default_BeginTransactionRequest_AllowMultipleEg +} + +func (m *BeginTransactionRequest) GetDatabaseId() string { + if m != nil && m.DatabaseId != nil { + return *m.DatabaseId + } + return "" +} + +func (m *BeginTransactionRequest) GetMode() BeginTransactionRequest_TransactionMode { + if m != nil && m.Mode != nil { + return *m.Mode + } + return Default_BeginTransactionRequest_Mode +} + +func (m *BeginTransactionRequest) GetPreviousTransaction() *Transaction { + if m != nil { + return m.PreviousTransaction + } + return nil +} + +type CommitResponse struct { + Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"` + Version []*CommitResponse_Version `protobuf:"group,3,rep,name=Version,json=version" json:"version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CommitResponse) Reset() { *m = CommitResponse{} } +func (m *CommitResponse) String() string { return proto.CompactTextString(m) } +func (*CommitResponse) ProtoMessage() {} +func (*CommitResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{37} +} +func (m *CommitResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CommitResponse.Unmarshal(m, b) +} +func (m *CommitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CommitResponse.Marshal(b, m, deterministic) +} +func (dst *CommitResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_CommitResponse.Merge(dst, src) +} +func (m *CommitResponse) XXX_Size() int { + return xxx_messageInfo_CommitResponse.Size(m) +} +func (m *CommitResponse) XXX_DiscardUnknown() { + xxx_messageInfo_CommitResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_CommitResponse proto.InternalMessageInfo + +func (m *CommitResponse) GetCost() *Cost { + if m != nil { + return m.Cost + } + return nil +} + +func (m *CommitResponse) GetVersion() []*CommitResponse_Version { + if m != nil { + return m.Version + } + return nil +} + +type CommitResponse_Version struct { + RootEntityKey *Reference `protobuf:"bytes,4,req,name=root_entity_key,json=rootEntityKey" json:"root_entity_key,omitempty"` + Version *int64 `protobuf:"varint,5,req,name=version" json:"version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CommitResponse_Version) Reset() { *m = CommitResponse_Version{} } +func (m *CommitResponse_Version) String() string { return proto.CompactTextString(m) } +func (*CommitResponse_Version) ProtoMessage() {} +func (*CommitResponse_Version) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{37, 0} +} +func (m *CommitResponse_Version) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CommitResponse_Version.Unmarshal(m, b) +} +func (m *CommitResponse_Version) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CommitResponse_Version.Marshal(b, m, deterministic) +} +func (dst *CommitResponse_Version) XXX_Merge(src proto.Message) { + xxx_messageInfo_CommitResponse_Version.Merge(dst, src) +} +func (m *CommitResponse_Version) XXX_Size() int { + return xxx_messageInfo_CommitResponse_Version.Size(m) +} +func (m *CommitResponse_Version) XXX_DiscardUnknown() { + xxx_messageInfo_CommitResponse_Version.DiscardUnknown(m) +} + +var xxx_messageInfo_CommitResponse_Version proto.InternalMessageInfo + +func (m *CommitResponse_Version) GetRootEntityKey() *Reference { + if m != nil { + return m.RootEntityKey + } + return nil +} + +func (m *CommitResponse_Version) GetVersion() int64 { + if m != nil && m.Version != nil { + return *m.Version + } + return 0 +} + +func init() { + proto.RegisterType((*Action)(nil), "appengine.Action") + proto.RegisterType((*PropertyValue)(nil), "appengine.PropertyValue") + proto.RegisterType((*PropertyValue_PointValue)(nil), "appengine.PropertyValue.PointValue") + proto.RegisterType((*PropertyValue_UserValue)(nil), "appengine.PropertyValue.UserValue") + proto.RegisterType((*PropertyValue_ReferenceValue)(nil), "appengine.PropertyValue.ReferenceValue") + proto.RegisterType((*PropertyValue_ReferenceValue_PathElement)(nil), "appengine.PropertyValue.ReferenceValue.PathElement") + proto.RegisterType((*Property)(nil), "appengine.Property") + proto.RegisterType((*Path)(nil), "appengine.Path") + proto.RegisterType((*Path_Element)(nil), "appengine.Path.Element") + proto.RegisterType((*Reference)(nil), "appengine.Reference") + proto.RegisterType((*User)(nil), "appengine.User") + proto.RegisterType((*EntityProto)(nil), "appengine.EntityProto") + proto.RegisterType((*CompositeProperty)(nil), "appengine.CompositeProperty") + proto.RegisterType((*Index)(nil), "appengine.Index") + proto.RegisterType((*Index_Property)(nil), "appengine.Index.Property") + proto.RegisterType((*CompositeIndex)(nil), "appengine.CompositeIndex") + proto.RegisterType((*IndexPostfix)(nil), "appengine.IndexPostfix") + proto.RegisterType((*IndexPostfix_IndexValue)(nil), "appengine.IndexPostfix.IndexValue") + proto.RegisterType((*IndexPosition)(nil), "appengine.IndexPosition") + proto.RegisterType((*Snapshot)(nil), "appengine.Snapshot") + proto.RegisterType((*InternalHeader)(nil), "appengine.InternalHeader") + proto.RegisterType((*Transaction)(nil), "appengine.Transaction") + proto.RegisterType((*Query)(nil), "appengine.Query") + proto.RegisterType((*Query_Filter)(nil), "appengine.Query.Filter") + proto.RegisterType((*Query_Order)(nil), "appengine.Query.Order") + proto.RegisterType((*CompiledQuery)(nil), "appengine.CompiledQuery") + proto.RegisterType((*CompiledQuery_PrimaryScan)(nil), "appengine.CompiledQuery.PrimaryScan") + proto.RegisterType((*CompiledQuery_MergeJoinScan)(nil), "appengine.CompiledQuery.MergeJoinScan") + proto.RegisterType((*CompiledQuery_EntityFilter)(nil), "appengine.CompiledQuery.EntityFilter") + proto.RegisterType((*CompiledCursor)(nil), "appengine.CompiledCursor") + proto.RegisterType((*CompiledCursor_Position)(nil), "appengine.CompiledCursor.Position") + proto.RegisterType((*CompiledCursor_Position_IndexValue)(nil), "appengine.CompiledCursor.Position.IndexValue") + proto.RegisterType((*Cursor)(nil), "appengine.Cursor") + proto.RegisterType((*Error)(nil), "appengine.Error") + proto.RegisterType((*Cost)(nil), "appengine.Cost") + proto.RegisterType((*Cost_CommitCost)(nil), "appengine.Cost.CommitCost") + proto.RegisterType((*GetRequest)(nil), "appengine.GetRequest") + proto.RegisterType((*GetResponse)(nil), "appengine.GetResponse") + proto.RegisterType((*GetResponse_Entity)(nil), "appengine.GetResponse.Entity") + proto.RegisterType((*PutRequest)(nil), "appengine.PutRequest") + proto.RegisterType((*PutResponse)(nil), "appengine.PutResponse") + proto.RegisterType((*TouchRequest)(nil), "appengine.TouchRequest") + proto.RegisterType((*TouchResponse)(nil), "appengine.TouchResponse") + proto.RegisterType((*DeleteRequest)(nil), "appengine.DeleteRequest") + proto.RegisterType((*DeleteResponse)(nil), "appengine.DeleteResponse") + proto.RegisterType((*NextRequest)(nil), "appengine.NextRequest") + proto.RegisterType((*QueryResult)(nil), "appengine.QueryResult") + proto.RegisterType((*AllocateIdsRequest)(nil), "appengine.AllocateIdsRequest") + proto.RegisterType((*AllocateIdsResponse)(nil), "appengine.AllocateIdsResponse") + proto.RegisterType((*CompositeIndices)(nil), "appengine.CompositeIndices") + proto.RegisterType((*AddActionsRequest)(nil), "appengine.AddActionsRequest") + proto.RegisterType((*AddActionsResponse)(nil), "appengine.AddActionsResponse") + proto.RegisterType((*BeginTransactionRequest)(nil), "appengine.BeginTransactionRequest") + proto.RegisterType((*CommitResponse)(nil), "appengine.CommitResponse") + proto.RegisterType((*CommitResponse_Version)(nil), "appengine.CommitResponse.Version") +} + +func init() { + proto.RegisterFile("google.golang.org/appengine/internal/datastore/datastore_v3.proto", fileDescriptor_datastore_v3_83b17b80c34f6179) +} + +var fileDescriptor_datastore_v3_83b17b80c34f6179 = []byte{ + // 4156 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x5a, 0xcd, 0x73, 0xe3, 0x46, + 0x76, 0x37, 0xc1, 0xef, 0x47, 0x89, 0x82, 0x5a, 0xf3, 0xc1, 0xa1, 0x3f, 0x46, 0xc6, 0xac, 0x6d, + 0xd9, 0x6b, 0x73, 0x6c, 0xf9, 0x23, 0x5b, 0x4a, 0x76, 0x1d, 0x4a, 0xc4, 0x68, 0x90, 0xa1, 0x48, + 0xb9, 0x09, 0xd9, 0x9e, 0x5c, 0x50, 0x18, 0xa2, 0x29, 0x21, 0x43, 0x02, 0x30, 0x00, 0x6a, 0x46, + 0x93, 0xe4, 0x90, 0x4b, 0x2a, 0x55, 0x5b, 0xa9, 0x1c, 0x92, 0x4a, 0x25, 0xf9, 0x07, 0x72, 0xc8, + 0x39, 0x95, 0xaa, 0x54, 0xf6, 0x98, 0x5b, 0x0e, 0x7b, 0xc9, 0x31, 0x95, 0x73, 0xf2, 0x27, 0x24, + 0x39, 0xa4, 0xfa, 0x75, 0x03, 0x02, 0x28, 0x4a, 0x23, 0x6d, 0xf6, 0x90, 0x13, 0xd1, 0xef, 0xfd, + 0xba, 0xf1, 0xfa, 0xf5, 0xfb, 0x6c, 0x10, 0xba, 0xc7, 0xbe, 0x7f, 0x3c, 0x65, 0x9d, 0x63, 0x7f, + 0x6a, 0x7b, 0xc7, 0x1d, 0x3f, 0x3c, 0x7e, 0x68, 0x07, 0x01, 0xf3, 0x8e, 0x5d, 0x8f, 0x3d, 0x74, + 0xbd, 0x98, 0x85, 0x9e, 0x3d, 0x7d, 0xe8, 0xd8, 0xb1, 0x1d, 0xc5, 0x7e, 0xc8, 0xce, 0x9f, 0xac, + 0xd3, 0xcf, 0x3b, 0x41, 0xe8, 0xc7, 0x3e, 0xa9, 0xa7, 0x13, 0xb4, 0x1a, 0x54, 0xba, 0xe3, 0xd8, + 0xf5, 0x3d, 0xed, 0x1f, 0x2b, 0xb0, 0x7a, 0x18, 0xfa, 0x01, 0x0b, 0xe3, 0xb3, 0x6f, 0xed, 0xe9, + 0x9c, 0x91, 0x77, 0x00, 0x5c, 0x2f, 0xfe, 0xea, 0x0b, 0x1c, 0xb5, 0x0a, 0x9b, 0x85, 0xad, 0x22, + 0xcd, 0x50, 0x88, 0x06, 0x2b, 0xcf, 0x7c, 0x7f, 0xca, 0x6c, 0x4f, 0x20, 0x94, 0xcd, 0xc2, 0x56, + 0x8d, 0xe6, 0x68, 0x64, 0x13, 0x1a, 0x51, 0x1c, 0xba, 0xde, 0xb1, 0x80, 0x14, 0x37, 0x0b, 0x5b, + 0x75, 0x9a, 0x25, 0x71, 0x84, 0xe3, 0xcf, 0x9f, 0x4d, 0x99, 0x40, 0x94, 0x36, 0x0b, 0x5b, 0x05, + 0x9a, 0x25, 0x91, 0x3d, 0x80, 0xc0, 0x77, 0xbd, 0xf8, 0x14, 0x01, 0xe5, 0xcd, 0xc2, 0x16, 0x6c, + 0x3f, 0xe8, 0xa4, 0x7b, 0xe8, 0xe4, 0xa4, 0xee, 0x1c, 0x72, 0x28, 0x3e, 0xd2, 0xcc, 0x34, 0xf2, + 0xdb, 0x50, 0x9f, 0x47, 0x2c, 0x14, 0x6b, 0xd4, 0x70, 0x0d, 0xed, 0xd2, 0x35, 0x8e, 0x22, 0x16, + 0x8a, 0x25, 0xce, 0x27, 0x91, 0x21, 0x34, 0x43, 0x36, 0x61, 0x21, 0xf3, 0xc6, 0x4c, 0x2c, 0xb3, + 0x82, 0xcb, 0x7c, 0x70, 0xe9, 0x32, 0x34, 0x81, 0x8b, 0xb5, 0x16, 0xa6, 0xb7, 0xb7, 0x00, 0xce, + 0x85, 0x25, 0x2b, 0x50, 0x78, 0xd9, 0xaa, 0x6c, 0x2a, 0x5b, 0x05, 0x5a, 0x78, 0xc9, 0x47, 0x67, + 0xad, 0xaa, 0x18, 0x9d, 0xb5, 0xff, 0xa9, 0x00, 0xf5, 0x54, 0x26, 0x72, 0x0b, 0xca, 0x6c, 0x66, + 0xbb, 0xd3, 0x56, 0x7d, 0x53, 0xd9, 0xaa, 0x53, 0x31, 0x20, 0xf7, 0xa1, 0x61, 0xcf, 0xe3, 0x13, + 0xcb, 0xf1, 0x67, 0xb6, 0xeb, 0xb5, 0x00, 0x79, 0xc0, 0x49, 0x3d, 0xa4, 0x90, 0x36, 0xd4, 0x3c, + 0x77, 0xfc, 0xdc, 0xb3, 0x67, 0xac, 0xd5, 0xc0, 0x73, 0x48, 0xc7, 0xe4, 0x13, 0x20, 0x13, 0xe6, + 0xb0, 0xd0, 0x8e, 0x99, 0x63, 0xb9, 0x0e, 0xf3, 0x62, 0x37, 0x3e, 0x6b, 0xdd, 0x46, 0xd4, 0x7a, + 0xca, 0x31, 0x24, 0x23, 0x0f, 0x0f, 0x42, 0xff, 0xd4, 0x75, 0x58, 0xd8, 0xba, 0xb3, 0x00, 0x3f, + 0x94, 0x8c, 0xf6, 0xbf, 0x17, 0xa0, 0x99, 0xd7, 0x05, 0x51, 0xa1, 0x68, 0x07, 0x41, 0x6b, 0x15, + 0xa5, 0xe4, 0x8f, 0xe4, 0x6d, 0x00, 0x2e, 0x8a, 0x15, 0x05, 0xf6, 0x98, 0xb5, 0x6e, 0xe1, 0x5a, + 0x75, 0x4e, 0x19, 0x71, 0x02, 0x39, 0x82, 0x46, 0x60, 0xc7, 0x27, 0x6c, 0xca, 0x66, 0xcc, 0x8b, + 0x5b, 0xcd, 0xcd, 0xe2, 0x16, 0x6c, 0x7f, 0x7e, 0x4d, 0xd5, 0x77, 0x0e, 0xed, 0xf8, 0x44, 0x17, + 0x53, 0x69, 0x76, 0x9d, 0xb6, 0x0e, 0x8d, 0x0c, 0x8f, 0x10, 0x28, 0xc5, 0x67, 0x01, 0x6b, 0xad, + 0xa1, 0x5c, 0xf8, 0x4c, 0x9a, 0xa0, 0xb8, 0x4e, 0x4b, 0x45, 0xf3, 0x57, 0x5c, 0x87, 0x63, 0x50, + 0x87, 0xeb, 0x28, 0x22, 0x3e, 0x6b, 0xff, 0x51, 0x86, 0x5a, 0x22, 0x00, 0xe9, 0x42, 0x75, 0xc6, + 0x6c, 0xcf, 0xf5, 0x8e, 0xd1, 0x69, 0x9a, 0xdb, 0x6f, 0x2e, 0x11, 0xb3, 0x73, 0x20, 0x20, 0x3b, + 0x30, 0x18, 0x5a, 0x07, 0x7a, 0x77, 0x60, 0x0c, 0xf6, 0x69, 0x32, 0x8f, 0x1f, 0xa6, 0x7c, 0xb4, + 0xe6, 0xa1, 0x8b, 0x9e, 0x55, 0xa7, 0x20, 0x49, 0x47, 0xa1, 0x9b, 0x0a, 0x51, 0x14, 0x82, 0xe2, + 0x21, 0x76, 0xa0, 0x9c, 0xb8, 0x88, 0xb2, 0xd5, 0xd8, 0x6e, 0x5d, 0xa6, 0x1c, 0x2a, 0x60, 0xdc, + 0x20, 0x66, 0xf3, 0x69, 0xec, 0x06, 0x53, 0xee, 0x76, 0xca, 0x56, 0x8d, 0xa6, 0x63, 0xf2, 0x1e, + 0x40, 0xc4, 0xec, 0x70, 0x7c, 0x62, 0x3f, 0x9b, 0xb2, 0x56, 0x85, 0x7b, 0xf6, 0x4e, 0x79, 0x62, + 0x4f, 0x23, 0x46, 0x33, 0x0c, 0x62, 0xc3, 0xdd, 0x49, 0x1c, 0x59, 0xb1, 0xff, 0x9c, 0x79, 0xee, + 0x2b, 0x9b, 0x07, 0x12, 0xcb, 0x0f, 0xf8, 0x0f, 0xfa, 0x58, 0x73, 0xfb, 0xc3, 0x65, 0x5b, 0x7f, + 0x14, 0x47, 0x66, 0x66, 0xc6, 0x10, 0x27, 0xd0, 0xdb, 0x93, 0x65, 0x64, 0xd2, 0x86, 0xca, 0xd4, + 0x1f, 0xdb, 0x53, 0xd6, 0xaa, 0x73, 0x2d, 0xec, 0x28, 0xcc, 0xa3, 0x92, 0xa2, 0xfd, 0xb3, 0x02, + 0x55, 0xa9, 0x47, 0xd2, 0x84, 0x8c, 0x26, 0xd5, 0x37, 0x48, 0x0d, 0x4a, 0xbb, 0xfd, 0xe1, 0xae, + 0xda, 0xe4, 0x4f, 0xa6, 0xfe, 0xbd, 0xa9, 0xae, 0x71, 0xcc, 0xee, 0x53, 0x53, 0x1f, 0x99, 0x94, + 0x63, 0x54, 0xb2, 0x0e, 0xab, 0x5d, 0x73, 0x78, 0x60, 0xed, 0x75, 0x4d, 0x7d, 0x7f, 0x48, 0x9f, + 0xaa, 0x05, 0xb2, 0x0a, 0x75, 0x24, 0xf5, 0x8d, 0xc1, 0x13, 0x55, 0xe1, 0x33, 0x70, 0x68, 0x1a, + 0x66, 0x5f, 0x57, 0x8b, 0x44, 0x85, 0x15, 0x31, 0x63, 0x38, 0x30, 0xf5, 0x81, 0xa9, 0x96, 0x52, + 0xca, 0xe8, 0xe8, 0xe0, 0xa0, 0x4b, 0x9f, 0xaa, 0x65, 0xb2, 0x06, 0x0d, 0xa4, 0x74, 0x8f, 0xcc, + 0xc7, 0x43, 0xaa, 0x56, 0x48, 0x03, 0xaa, 0xfb, 0x3d, 0xeb, 0xbb, 0xc7, 0xfa, 0x40, 0xad, 0x92, + 0x15, 0xa8, 0xed, 0xf7, 0x2c, 0xfd, 0xa0, 0x6b, 0xf4, 0xd5, 0x1a, 0x9f, 0xbd, 0xaf, 0x0f, 0xe9, + 0x68, 0x64, 0x1d, 0x0e, 0x8d, 0x81, 0xa9, 0xd6, 0x49, 0x1d, 0xca, 0xfb, 0x3d, 0xcb, 0x38, 0x50, + 0x81, 0x10, 0x68, 0xee, 0xf7, 0xac, 0xc3, 0xc7, 0xc3, 0x81, 0x3e, 0x38, 0x3a, 0xd8, 0xd5, 0xa9, + 0xda, 0x20, 0xb7, 0x40, 0xe5, 0xb4, 0xe1, 0xc8, 0xec, 0xf6, 0xbb, 0xbd, 0x1e, 0xd5, 0x47, 0x23, + 0x75, 0x85, 0x4b, 0xbd, 0xdf, 0xb3, 0x68, 0xd7, 0xe4, 0xfb, 0x5a, 0xe5, 0x2f, 0xe4, 0x7b, 0x7f, + 0xa2, 0x3f, 0x55, 0xd7, 0xf9, 0x2b, 0xf4, 0x81, 0x69, 0x98, 0x4f, 0xad, 0x43, 0x3a, 0x34, 0x87, + 0xea, 0x06, 0x17, 0xd0, 0x18, 0xf4, 0xf4, 0xef, 0xad, 0x6f, 0xbb, 0xfd, 0x23, 0x5d, 0x25, 0xda, + 0x8f, 0xe1, 0xf6, 0xd2, 0x33, 0xe1, 0xaa, 0x7b, 0x6c, 0x1e, 0xf4, 0xd5, 0x02, 0x7f, 0xe2, 0x9b, + 0x52, 0x15, 0xed, 0x0f, 0xa0, 0xc4, 0x5d, 0x86, 0x7c, 0x06, 0xd5, 0xc4, 0x1b, 0x0b, 0xe8, 0x8d, + 0x77, 0xb3, 0x67, 0x6d, 0xc7, 0x27, 0x9d, 0xc4, 0xe3, 0x12, 0x5c, 0xbb, 0x0b, 0xd5, 0x45, 0x4f, + 0x53, 0x2e, 0x78, 0x5a, 0xf1, 0x82, 0xa7, 0x95, 0x32, 0x9e, 0x66, 0x43, 0x3d, 0xf5, 0xed, 0x9b, + 0x47, 0x91, 0x07, 0x50, 0xe2, 0xde, 0xdf, 0x6a, 0xa2, 0x87, 0xac, 0x2d, 0x08, 0x4c, 0x91, 0xa9, + 0xfd, 0x43, 0x01, 0x4a, 0x3c, 0xda, 0x9e, 0x07, 0xda, 0xc2, 0x15, 0x81, 0x56, 0xb9, 0x32, 0xd0, + 0x16, 0xaf, 0x15, 0x68, 0x2b, 0x37, 0x0b, 0xb4, 0xd5, 0x4b, 0x02, 0xad, 0xf6, 0x67, 0x45, 0x68, + 0xe8, 0x38, 0xf3, 0x10, 0x13, 0xfd, 0xfb, 0x50, 0x7c, 0xce, 0xce, 0x50, 0x3f, 0x8d, 0xed, 0x5b, + 0x99, 0xdd, 0xa6, 0x2a, 0xa4, 0x1c, 0x40, 0xb6, 0x61, 0x45, 0xbc, 0xd0, 0x3a, 0x0e, 0xfd, 0x79, + 0xd0, 0x52, 0x97, 0xab, 0xa7, 0x21, 0x40, 0xfb, 0x1c, 0x43, 0xde, 0x83, 0xb2, 0xff, 0xc2, 0x63, + 0x21, 0xc6, 0xc1, 0x3c, 0x98, 0x2b, 0x8f, 0x0a, 0x2e, 0x79, 0x08, 0xa5, 0xe7, 0xae, 0xe7, 0xe0, + 0x19, 0xe6, 0x23, 0x61, 0x46, 0xd0, 0xce, 0x13, 0xd7, 0x73, 0x28, 0x02, 0xc9, 0x3d, 0xa8, 0xf1, + 0x5f, 0x8c, 0x7b, 0x65, 0xdc, 0x68, 0x95, 0x8f, 0x79, 0xd0, 0x7b, 0x08, 0xb5, 0x40, 0xc6, 0x10, + 0x4c, 0x00, 0x8d, 0xed, 0x8d, 0x25, 0xe1, 0x85, 0xa6, 0x20, 0xf2, 0x15, 0xac, 0x84, 0xf6, 0x0b, + 0x2b, 0x9d, 0xb4, 0x76, 0xf9, 0xa4, 0x46, 0x68, 0xbf, 0x48, 0x23, 0x38, 0x81, 0x52, 0x68, 0x7b, + 0xcf, 0x5b, 0x64, 0xb3, 0xb0, 0x55, 0xa6, 0xf8, 0xac, 0x7d, 0x01, 0x25, 0x2e, 0x25, 0x8f, 0x08, + 0xfb, 0x3d, 0xf4, 0xff, 0xee, 0x9e, 0xa9, 0x16, 0x12, 0x7f, 0xfe, 0x96, 0x47, 0x03, 0x45, 0x72, + 0x0f, 0xf4, 0xd1, 0xa8, 0xbb, 0xaf, 0xab, 0x45, 0xad, 0x07, 0xeb, 0x7b, 0xfe, 0x2c, 0xf0, 0x23, + 0x37, 0x66, 0xe9, 0xf2, 0xf7, 0xa0, 0xe6, 0x7a, 0x0e, 0x7b, 0x69, 0xb9, 0x0e, 0x9a, 0x56, 0x91, + 0x56, 0x71, 0x6c, 0x38, 0xdc, 0xe4, 0x4e, 0x65, 0x31, 0x55, 0xe4, 0x26, 0x87, 0x03, 0xed, 0x2f, + 0x15, 0x28, 0x1b, 0x1c, 0xc1, 0x8d, 0x4f, 0x9e, 0x14, 0x7a, 0x8f, 0x30, 0x4c, 0x10, 0x24, 0x93, + 0xfb, 0x50, 0x1b, 0x6a, 0xb6, 0x37, 0x66, 0xbc, 0xe2, 0xc3, 0x3c, 0x50, 0xa3, 0xe9, 0x98, 0x7c, + 0x99, 0xd1, 0x9f, 0x82, 0x2e, 0x7b, 0x2f, 0xa3, 0x0a, 0x7c, 0xc1, 0x12, 0x2d, 0xb6, 0xff, 0xaa, + 0x90, 0x49, 0x6e, 0xcb, 0x12, 0x4f, 0x1f, 0xea, 0x8e, 0x1b, 0x32, 0xac, 0x23, 0xe5, 0x41, 0x3f, + 0xb8, 0x74, 0xe1, 0x4e, 0x2f, 0x81, 0xee, 0xd4, 0xbb, 0xa3, 0x3d, 0x7d, 0xd0, 0xe3, 0x99, 0xef, + 0x7c, 0x01, 0xed, 0x23, 0xa8, 0xa7, 0x10, 0x0c, 0xc7, 0x09, 0x48, 0x2d, 0x70, 0xf5, 0xf6, 0xf4, + 0x74, 0xac, 0x68, 0x7f, 0xad, 0x40, 0x33, 0xd5, 0xaf, 0xd0, 0xd0, 0x6d, 0xa8, 0xd8, 0x41, 0x90, + 0xa8, 0xb6, 0x4e, 0xcb, 0x76, 0x10, 0x18, 0x8e, 0x8c, 0x2d, 0x0a, 0x6a, 0x9b, 0xc7, 0x96, 0x4f, + 0x01, 0x1c, 0x36, 0x71, 0x3d, 0x17, 0x85, 0x2e, 0xa2, 0xc1, 0xab, 0x8b, 0x42, 0xd3, 0x0c, 0x86, + 0x7c, 0x09, 0xe5, 0x28, 0xb6, 0x63, 0x91, 0x2b, 0x9b, 0xdb, 0xf7, 0x33, 0xe0, 0xbc, 0x08, 0x9d, + 0x11, 0x87, 0x51, 0x81, 0x26, 0x5f, 0xc1, 0x2d, 0xdf, 0x9b, 0x9e, 0x59, 0xf3, 0x88, 0x59, 0xee, + 0xc4, 0x0a, 0xd9, 0x0f, 0x73, 0x37, 0x64, 0x4e, 0x3e, 0xa7, 0xae, 0x73, 0xc8, 0x51, 0xc4, 0x8c, + 0x09, 0x95, 0x7c, 0xed, 0x6b, 0x28, 0xe3, 0x3a, 0x7c, 0xcf, 0xdf, 0x51, 0xc3, 0xd4, 0xad, 0xe1, + 0xa0, 0xff, 0x54, 0xe8, 0x80, 0xea, 0xdd, 0x9e, 0x85, 0x44, 0x55, 0xe1, 0xc1, 0xbe, 0xa7, 0xf7, + 0x75, 0x53, 0xef, 0xa9, 0x45, 0x9e, 0x3d, 0x74, 0x4a, 0x87, 0x54, 0x2d, 0x69, 0xff, 0x53, 0x80, + 0x15, 0x94, 0xe7, 0xd0, 0x8f, 0xe2, 0x89, 0xfb, 0x92, 0xec, 0x41, 0x43, 0x98, 0xdd, 0xa9, 0x2c, + 0xe8, 0xb9, 0x33, 0x68, 0x8b, 0x7b, 0x96, 0x68, 0x31, 0x90, 0x75, 0xb4, 0x9b, 0x3e, 0x27, 0x21, + 0x45, 0x41, 0xa7, 0xbf, 0x22, 0xa4, 0xbc, 0x05, 0x95, 0x67, 0x6c, 0xe2, 0x87, 0x22, 0x04, 0xd6, + 0x76, 0x4a, 0x71, 0x38, 0x67, 0x54, 0xd2, 0xda, 0x36, 0xc0, 0xf9, 0xfa, 0xe4, 0x01, 0xac, 0x26, + 0xc6, 0x66, 0xa1, 0x71, 0x89, 0x93, 0x5b, 0x49, 0x88, 0x83, 0x5c, 0x75, 0xa3, 0x5c, 0xab, 0xba, + 0xd1, 0xbe, 0x86, 0xd5, 0x64, 0x3f, 0xe2, 0xfc, 0x54, 0x21, 0x79, 0x01, 0x63, 0xca, 0x82, 0x8c, + 0xca, 0x45, 0x19, 0xb5, 0x9f, 0x41, 0x6d, 0xe4, 0xd9, 0x41, 0x74, 0xe2, 0xc7, 0xdc, 0x7a, 0xe2, + 0x48, 0xfa, 0xaa, 0x12, 0x47, 0x9a, 0x06, 0x15, 0x7e, 0x38, 0xf3, 0x88, 0xbb, 0xbf, 0x31, 0xe8, + 0xee, 0x99, 0xc6, 0xb7, 0xba, 0xfa, 0x06, 0x01, 0xa8, 0xc8, 0xe7, 0x82, 0xa6, 0x41, 0xd3, 0x90, + 0xed, 0xd8, 0x63, 0x66, 0x3b, 0x2c, 0xe4, 0x12, 0xfc, 0xe0, 0x47, 0x89, 0x04, 0x3f, 0xf8, 0x91, + 0xf6, 0x17, 0x05, 0x68, 0x98, 0xa1, 0xed, 0x45, 0xb6, 0x30, 0xf7, 0xcf, 0xa0, 0x72, 0x82, 0x58, + 0x74, 0xa3, 0xc6, 0x82, 0x7f, 0x66, 0x17, 0xa3, 0x12, 0x48, 0xee, 0x40, 0xe5, 0xc4, 0xf6, 0x9c, + 0xa9, 0xd0, 0x5a, 0x85, 0xca, 0x51, 0x92, 0x1b, 0x95, 0xf3, 0xdc, 0xb8, 0x05, 0x2b, 0x33, 0x3b, + 0x7c, 0x6e, 0x8d, 0x4f, 0x6c, 0xef, 0x98, 0x45, 0xf2, 0x60, 0xa4, 0x05, 0x36, 0x38, 0x6b, 0x4f, + 0x70, 0xb4, 0xbf, 0x5f, 0x81, 0xf2, 0x37, 0x73, 0x16, 0x9e, 0x65, 0x04, 0xfa, 0xe0, 0xba, 0x02, + 0xc9, 0x17, 0x17, 0x2e, 0x4b, 0xca, 0x6f, 0x2f, 0x26, 0x65, 0x22, 0x53, 0x84, 0xc8, 0x95, 0x22, + 0x0b, 0x7c, 0x9a, 0x09, 0x63, 0xeb, 0x57, 0xd8, 0xda, 0x79, 0x70, 0x7b, 0x08, 0x95, 0x89, 0x3b, + 0x8d, 0x51, 0x75, 0x8b, 0xd5, 0x08, 0xee, 0xa5, 0xf3, 0x08, 0xd9, 0x54, 0xc2, 0xc8, 0xbb, 0xb0, + 0x22, 0x2a, 0x59, 0xeb, 0x07, 0xce, 0xc6, 0x82, 0x95, 0xf7, 0xa6, 0x48, 0x13, 0xbb, 0xff, 0x18, + 0xca, 0x7e, 0xc8, 0x37, 0x5f, 0xc7, 0x25, 0xef, 0x5c, 0x58, 0x72, 0xc8, 0xb9, 0x54, 0x80, 0xc8, + 0x87, 0x50, 0x3a, 0x71, 0xbd, 0x18, 0xb3, 0x46, 0x73, 0xfb, 0xf6, 0x05, 0xf0, 0x63, 0xd7, 0x8b, + 0x29, 0x42, 0x78, 0x98, 0x1f, 0xfb, 0x73, 0x2f, 0x6e, 0xdd, 0xc5, 0x0c, 0x23, 0x06, 0xe4, 0x1e, + 0x54, 0xfc, 0xc9, 0x24, 0x62, 0x31, 0x76, 0x96, 0xe5, 0x9d, 0xc2, 0xa7, 0x54, 0x12, 0xf8, 0x84, + 0xa9, 0x3b, 0x73, 0x63, 0xec, 0x43, 0xca, 0x54, 0x0c, 0xc8, 0x2e, 0xac, 0x8d, 0xfd, 0x59, 0xe0, + 0x4e, 0x99, 0x63, 0x8d, 0xe7, 0x61, 0xe4, 0x87, 0xad, 0x77, 0x2e, 0x1c, 0xd3, 0x9e, 0x44, 0xec, + 0x21, 0x80, 0x36, 0xc7, 0xb9, 0x31, 0x31, 0x60, 0x83, 0x79, 0x8e, 0xb5, 0xb8, 0xce, 0xfd, 0xd7, + 0xad, 0xb3, 0xce, 0x3c, 0x27, 0x4f, 0x4a, 0xc4, 0xc1, 0x48, 0x68, 0x61, 0xcc, 0x68, 0x6d, 0x60, + 0x90, 0xb9, 0x77, 0x69, 0xac, 0x14, 0xe2, 0x64, 0xc2, 0xf7, 0x6f, 0xc0, 0x2d, 0x19, 0x22, 0xad, + 0x80, 0x85, 0x13, 0x36, 0x8e, 0xad, 0x60, 0x6a, 0x7b, 0x58, 0xca, 0xa5, 0xc6, 0x4a, 0x24, 0xe4, + 0x50, 0x20, 0x0e, 0xa7, 0xb6, 0x47, 0x34, 0xa8, 0x3f, 0x67, 0x67, 0x91, 0xc5, 0x23, 0x29, 0x76, + 0xae, 0x29, 0xba, 0xc6, 0xe9, 0x43, 0x6f, 0x7a, 0x46, 0x7e, 0x02, 0x8d, 0xf8, 0xdc, 0xdb, 0xb0, + 0x61, 0x6d, 0xe4, 0x4e, 0x35, 0xe3, 0x8b, 0x34, 0x0b, 0x25, 0xf7, 0xa1, 0x2a, 0x35, 0xd4, 0xba, + 0x97, 0x5d, 0x3b, 0xa1, 0xf2, 0xc4, 0x3c, 0xb1, 0xdd, 0xa9, 0x7f, 0xca, 0x42, 0x6b, 0x16, 0xb5, + 0xda, 0xe2, 0xb6, 0x24, 0x21, 0x1d, 0x44, 0xdc, 0x4f, 0xa3, 0x38, 0xf4, 0xbd, 0xe3, 0xd6, 0x26, + 0xde, 0x93, 0xc8, 0xd1, 0xc5, 0xe0, 0xf7, 0x2e, 0x66, 0xfe, 0x7c, 0xf0, 0xfb, 0x1c, 0xee, 0x60, + 0x65, 0x66, 0x3d, 0x3b, 0xb3, 0xf2, 0x68, 0x0d, 0xd1, 0x1b, 0xc8, 0xdd, 0x3d, 0x3b, 0xcc, 0x4e, + 0x6a, 0x43, 0xcd, 0x71, 0xa3, 0xd8, 0xf5, 0xc6, 0x71, 0xab, 0x85, 0xef, 0x4c, 0xc7, 0xe4, 0x33, + 0xb8, 0x3d, 0x73, 0x3d, 0x2b, 0xb2, 0x27, 0xcc, 0x8a, 0x5d, 0xee, 0x9b, 0x6c, 0xec, 0x7b, 0x4e, + 0xd4, 0x7a, 0x80, 0x82, 0x93, 0x99, 0xeb, 0x8d, 0xec, 0x09, 0x33, 0xdd, 0x19, 0x1b, 0x09, 0x0e, + 0xf9, 0x08, 0xd6, 0x11, 0x1e, 0xb2, 0x60, 0xea, 0x8e, 0x6d, 0xf1, 0xfa, 0x1f, 0xe1, 0xeb, 0xd7, + 0x38, 0x83, 0x0a, 0x3a, 0xbe, 0xfa, 0x63, 0x68, 0x06, 0x2c, 0x8c, 0xdc, 0x28, 0xb6, 0xa4, 0x45, + 0xbf, 0x97, 0xd5, 0xda, 0xaa, 0x64, 0x0e, 0x91, 0xd7, 0xfe, 0xcf, 0x02, 0x54, 0x84, 0x73, 0x92, + 0x4f, 0x41, 0xf1, 0x03, 0xbc, 0x06, 0x69, 0x6e, 0x6f, 0x5e, 0xe2, 0xc1, 0x9d, 0x61, 0xc0, 0xeb, + 0x5e, 0x3f, 0xa4, 0x8a, 0x1f, 0xdc, 0xb8, 0x28, 0xd4, 0xfe, 0x10, 0x6a, 0xc9, 0x02, 0xbc, 0xbc, + 0xe8, 0xeb, 0xa3, 0x91, 0x65, 0x3e, 0xee, 0x0e, 0xd4, 0x02, 0xb9, 0x03, 0x24, 0x1d, 0x5a, 0x43, + 0x6a, 0xe9, 0xdf, 0x1c, 0x75, 0xfb, 0xaa, 0x82, 0x5d, 0x1a, 0xd5, 0xbb, 0xa6, 0x4e, 0x05, 0xb2, + 0x48, 0xee, 0xc1, 0xed, 0x2c, 0xe5, 0x1c, 0x5c, 0xc2, 0x14, 0x8c, 0x8f, 0x65, 0x52, 0x01, 0xc5, + 0x18, 0xa8, 0x15, 0x9e, 0x16, 0xf4, 0xef, 0x8d, 0x91, 0x39, 0x52, 0xab, 0xed, 0xbf, 0x29, 0x40, + 0x19, 0xc3, 0x06, 0x3f, 0x9f, 0x54, 0x72, 0x71, 0x5d, 0x73, 0x5e, 0xb9, 0x1a, 0xd9, 0x92, 0xaa, + 0x81, 0x01, 0x65, 0x73, 0x79, 0xf4, 0xf9, 0xb5, 0xd6, 0x53, 0x3f, 0x85, 0x12, 0x8f, 0x52, 0xbc, + 0x43, 0x1c, 0xd2, 0x9e, 0x4e, 0xad, 0x47, 0x06, 0x1d, 0xf1, 0x2a, 0x97, 0x40, 0xb3, 0x3b, 0xd8, + 0xd3, 0x47, 0xe6, 0x30, 0xa1, 0xa1, 0x56, 0x1e, 0x19, 0x7d, 0x33, 0x45, 0x15, 0xb5, 0x9f, 0xd7, + 0x60, 0x35, 0x89, 0x09, 0x22, 0x82, 0x3e, 0x82, 0x46, 0x10, 0xba, 0x33, 0x3b, 0x3c, 0x8b, 0xc6, + 0xb6, 0x87, 0x49, 0x01, 0xb6, 0x7f, 0xb4, 0x24, 0xaa, 0x88, 0x1d, 0x1d, 0x0a, 0xec, 0x68, 0x6c, + 0x7b, 0x34, 0x3b, 0x91, 0xf4, 0x61, 0x75, 0xc6, 0xc2, 0x63, 0xf6, 0x7b, 0xbe, 0xeb, 0xe1, 0x4a, + 0x55, 0x8c, 0xc8, 0xef, 0x5f, 0xba, 0xd2, 0x01, 0x47, 0xff, 0x8e, 0xef, 0x7a, 0xb8, 0x56, 0x7e, + 0x32, 0xf9, 0x04, 0xea, 0xa2, 0x12, 0x72, 0xd8, 0x04, 0x63, 0xc5, 0xb2, 0xda, 0x4f, 0xd4, 0xe8, + 0x3d, 0x36, 0xc9, 0xc4, 0x65, 0xb8, 0x34, 0x2e, 0x37, 0xb2, 0x71, 0xf9, 0xcd, 0x6c, 0x2c, 0x5a, + 0x11, 0x55, 0x78, 0x1a, 0x84, 0x2e, 0x38, 0x7c, 0x6b, 0x89, 0xc3, 0x77, 0x60, 0x23, 0xf1, 0x55, + 0xcb, 0xf5, 0x26, 0xee, 0x4b, 0x2b, 0x72, 0x5f, 0x89, 0xd8, 0x53, 0xa6, 0xeb, 0x09, 0xcb, 0xe0, + 0x9c, 0x91, 0xfb, 0x8a, 0x11, 0x23, 0xe9, 0xe0, 0x64, 0x0e, 0x5c, 0xc5, 0xab, 0xc9, 0xf7, 0x2e, + 0x55, 0x8f, 0x68, 0xbe, 0x64, 0x46, 0xcc, 0x4d, 0x6d, 0xff, 0x52, 0x81, 0x46, 0xe6, 0x1c, 0x78, + 0xf6, 0x16, 0xca, 0x42, 0x61, 0xc5, 0x55, 0x94, 0x50, 0x1f, 0x4a, 0xfa, 0x26, 0xd4, 0xa3, 0xd8, + 0x0e, 0x63, 0x8b, 0x17, 0x57, 0xb2, 0xdd, 0x45, 0xc2, 0x13, 0x76, 0x46, 0x3e, 0x80, 0x35, 0xc1, + 0x74, 0xbd, 0xf1, 0x74, 0x1e, 0xb9, 0xa7, 0xa2, 0x99, 0xaf, 0xd1, 0x26, 0x92, 0x8d, 0x84, 0x4a, + 0xee, 0x42, 0x95, 0x67, 0x21, 0xbe, 0x86, 0x68, 0xfa, 0x2a, 0xcc, 0x73, 0xf8, 0x0a, 0x0f, 0x60, + 0x95, 0x33, 0xce, 0xe7, 0x57, 0xc4, 0x2d, 0x33, 0xf3, 0x9c, 0xf3, 0xd9, 0x1d, 0xd8, 0x10, 0xaf, + 0x09, 0x44, 0xf1, 0x2a, 0x2b, 0xdc, 0x3b, 0xa8, 0xd8, 0x75, 0x64, 0xc9, 0xb2, 0x56, 0x14, 0x9c, + 0x1f, 0x01, 0xcf, 0x5e, 0x0b, 0xe8, 0xbb, 0x22, 0x94, 0x31, 0xcf, 0xc9, 0x61, 0x77, 0xe1, 0x1d, + 0x8e, 0x9d, 0x7b, 0x76, 0x10, 0x4c, 0x5d, 0xe6, 0x58, 0x53, 0xff, 0x18, 0x43, 0x66, 0x14, 0xdb, + 0xb3, 0xc0, 0x9a, 0x47, 0xad, 0x0d, 0x0c, 0x99, 0x6d, 0xe6, 0x39, 0x47, 0x09, 0xa8, 0xef, 0x1f, + 0x9b, 0x09, 0xe4, 0x28, 0x6a, 0xff, 0x3e, 0xac, 0xe6, 0xec, 0x71, 0x41, 0xa7, 0x35, 0x74, 0xfe, + 0x8c, 0x4e, 0xdf, 0x85, 0x95, 0x20, 0x64, 0xe7, 0xa2, 0xd5, 0x51, 0xb4, 0x86, 0xa0, 0x09, 0xb1, + 0xb6, 0x60, 0x05, 0x79, 0x96, 0x20, 0xe6, 0xf3, 0x63, 0x03, 0x59, 0x87, 0xc8, 0x69, 0xbf, 0x80, + 0x95, 0xec, 0x69, 0x93, 0x77, 0x33, 0x69, 0xa1, 0x99, 0xcb, 0x93, 0x69, 0x76, 0x48, 0x2a, 0xb2, + 0xf5, 0x4b, 0x2a, 0x32, 0x72, 0x9d, 0x8a, 0x4c, 0xfb, 0x2f, 0xd9, 0x9c, 0x65, 0x2a, 0x84, 0x9f, + 0x41, 0x2d, 0x90, 0xf5, 0x38, 0x5a, 0x52, 0xfe, 0x12, 0x3e, 0x0f, 0xee, 0x24, 0x95, 0x3b, 0x4d, + 0xe7, 0xb4, 0xff, 0x56, 0x81, 0x5a, 0x5a, 0xd0, 0xe7, 0x2c, 0xef, 0xcd, 0x05, 0xcb, 0x3b, 0x90, + 0x1a, 0x16, 0x0a, 0x7c, 0x1b, 0xa3, 0xc5, 0x27, 0xaf, 0x7f, 0xd7, 0xc5, 0xb6, 0xe7, 0x34, 0xdb, + 0xf6, 0x6c, 0xbe, 0xae, 0xed, 0xf9, 0xe4, 0xa2, 0xc1, 0xbf, 0x95, 0xe9, 0x2d, 0x16, 0xcc, 0xbe, + 0xfd, 0x7d, 0xae, 0x0f, 0xca, 0x26, 0x84, 0x77, 0xc4, 0x7e, 0xd2, 0x84, 0x90, 0xb6, 0x3f, 0xf7, + 0xaf, 0xd7, 0xfe, 0x6c, 0x43, 0x45, 0xea, 0xfc, 0x0e, 0x54, 0x64, 0x4d, 0x27, 0x1b, 0x04, 0x31, + 0x3a, 0x6f, 0x10, 0x0a, 0xb2, 0x4e, 0xd7, 0x7e, 0xae, 0x40, 0x59, 0x0f, 0x43, 0x3f, 0xd4, 0xfe, + 0x48, 0x81, 0x3a, 0x3e, 0xed, 0xf9, 0x0e, 0xe3, 0xd9, 0x60, 0xb7, 0xdb, 0xb3, 0xa8, 0xfe, 0xcd, + 0x91, 0x8e, 0xd9, 0xa0, 0x0d, 0x77, 0xf6, 0x86, 0x83, 0xbd, 0x23, 0x4a, 0xf5, 0x81, 0x69, 0x99, + 0xb4, 0x3b, 0x18, 0xf1, 0xb6, 0x67, 0x38, 0x50, 0x15, 0x9e, 0x29, 0x8c, 0x81, 0xa9, 0xd3, 0x41, + 0xb7, 0x6f, 0x89, 0x56, 0xb4, 0x88, 0x77, 0xb3, 0xba, 0xde, 0xb3, 0xf0, 0xd6, 0x51, 0x2d, 0xf1, + 0x96, 0xd5, 0x34, 0x0e, 0xf4, 0xe1, 0x91, 0xa9, 0x96, 0xc9, 0x6d, 0x58, 0x3f, 0xd4, 0xe9, 0x81, + 0x31, 0x1a, 0x19, 0xc3, 0x81, 0xd5, 0xd3, 0x07, 0x86, 0xde, 0x53, 0x2b, 0x7c, 0x9d, 0x5d, 0x63, + 0xdf, 0xec, 0xee, 0xf6, 0x75, 0xb9, 0x4e, 0x95, 0x6c, 0xc2, 0x5b, 0x7b, 0xc3, 0x83, 0x03, 0xc3, + 0x34, 0xf5, 0x9e, 0xb5, 0x7b, 0x64, 0x5a, 0x23, 0xd3, 0xe8, 0xf7, 0xad, 0xee, 0xe1, 0x61, 0xff, + 0x29, 0x4f, 0x60, 0x35, 0x72, 0x17, 0x36, 0xf6, 0xba, 0x87, 0xdd, 0x5d, 0xa3, 0x6f, 0x98, 0x4f, + 0xad, 0x9e, 0x31, 0xe2, 0xf3, 0x7b, 0x6a, 0x9d, 0x27, 0x6c, 0x93, 0x3e, 0xb5, 0xba, 0x7d, 0x14, + 0xcd, 0xd4, 0xad, 0xdd, 0xee, 0xde, 0x13, 0x7d, 0xd0, 0x53, 0x81, 0x0b, 0x30, 0xea, 0x3e, 0xd2, + 0x2d, 0x2e, 0x92, 0x65, 0x0e, 0x87, 0xd6, 0xb0, 0xdf, 0x53, 0x1b, 0xda, 0xbf, 0x14, 0xa1, 0xb4, + 0xe7, 0x47, 0x31, 0xf7, 0x46, 0xe1, 0xac, 0x2f, 0x42, 0x37, 0x66, 0xa2, 0x7f, 0x2b, 0x53, 0xd1, + 0x4b, 0x7f, 0x87, 0x24, 0x1e, 0x50, 0x32, 0x10, 0xeb, 0xd9, 0x19, 0xc7, 0x29, 0x88, 0x5b, 0x3b, + 0xc7, 0xed, 0x72, 0xb2, 0x88, 0x68, 0x78, 0x85, 0x23, 0xd7, 0x2b, 0x22, 0x4e, 0x06, 0x61, 0xb9, + 0xe0, 0xc7, 0x40, 0xb2, 0x20, 0xb9, 0x62, 0x09, 0x91, 0x6a, 0x06, 0x29, 0x96, 0xdc, 0x01, 0x18, + 0xfb, 0xb3, 0x99, 0x1b, 0x8f, 0xfd, 0x28, 0x96, 0x5f, 0xc8, 0xda, 0x39, 0x63, 0x8f, 0x62, 0x6e, + 0xf1, 0x33, 0x37, 0xe6, 0x8f, 0x34, 0x83, 0x26, 0x3b, 0x70, 0xcf, 0x0e, 0x82, 0xd0, 0x7f, 0xe9, + 0xce, 0xec, 0x98, 0x59, 0xdc, 0x73, 0xed, 0x63, 0x66, 0x39, 0x6c, 0x1a, 0xdb, 0xd8, 0x13, 0x95, + 0xe9, 0xdd, 0x0c, 0x60, 0x24, 0xf8, 0x3d, 0xce, 0xe6, 0x71, 0xd7, 0x75, 0xac, 0x88, 0xfd, 0x30, + 0xe7, 0x1e, 0x60, 0xcd, 0x03, 0xc7, 0xe6, 0x62, 0xd6, 0x45, 0x96, 0x72, 0x9d, 0x91, 0xe4, 0x1c, + 0x09, 0x46, 0xfb, 0x15, 0xc0, 0xb9, 0x14, 0x64, 0x1b, 0x6e, 0xf3, 0x3a, 0x9e, 0x45, 0x31, 0x73, + 0x2c, 0xb9, 0xdb, 0x60, 0x1e, 0x47, 0x18, 0xe2, 0xcb, 0x74, 0x23, 0x65, 0xca, 0x9b, 0xc2, 0x79, + 0x1c, 0x91, 0x9f, 0x40, 0xeb, 0xc2, 0x1c, 0x87, 0x4d, 0x19, 0x7f, 0x6d, 0x15, 0xa7, 0xdd, 0x59, + 0x98, 0xd6, 0x13, 0x5c, 0xed, 0x4f, 0x14, 0x80, 0x7d, 0x16, 0x53, 0xc1, 0xcd, 0x34, 0xb6, 0x95, + 0xeb, 0x36, 0xb6, 0xef, 0x27, 0x17, 0x08, 0xc5, 0xab, 0x63, 0xc0, 0x42, 0x97, 0xa1, 0xdc, 0xa4, + 0xcb, 0xc8, 0x35, 0x11, 0xc5, 0x2b, 0x9a, 0x88, 0x52, 0xae, 0x89, 0xf8, 0x18, 0x9a, 0xf6, 0x74, + 0xea, 0xbf, 0xe0, 0x05, 0x0d, 0x0b, 0x43, 0xe6, 0xa0, 0x11, 0x9c, 0xd7, 0xdb, 0xc8, 0xec, 0x49, + 0x9e, 0xf6, 0xe7, 0x0a, 0x34, 0x50, 0x15, 0x51, 0xe0, 0x7b, 0x11, 0x23, 0x5f, 0x42, 0x45, 0x5e, + 0x44, 0x8b, 0x8b, 0xfc, 0xb7, 0x33, 0xb2, 0x66, 0x70, 0xb2, 0x68, 0xa0, 0x12, 0xcc, 0x33, 0x42, + 0xe6, 0x75, 0x97, 0x2b, 0x25, 0x45, 0x91, 0xfb, 0x50, 0x73, 0x3d, 0x4b, 0xb4, 0xd4, 0x95, 0x4c, + 0x58, 0xac, 0xba, 0x1e, 0xd6, 0xb2, 0xed, 0x57, 0x50, 0x11, 0x2f, 0x21, 0x9d, 0x54, 0xa6, 0x8b, + 0xfa, 0xcb, 0xdc, 0x1c, 0xa7, 0xc2, 0xc8, 0xc3, 0x29, 0xbd, 0x2e, 0x40, 0xb7, 0xa0, 0x7a, 0xca, + 0x9b, 0x0f, 0xbc, 0xf4, 0xe3, 0xea, 0x4d, 0x86, 0xda, 0x1f, 0x97, 0x00, 0x0e, 0xe7, 0x4b, 0x0c, + 0xa4, 0x71, 0x5d, 0x03, 0xe9, 0xe4, 0xf4, 0xf8, 0x7a, 0x99, 0x7f, 0x75, 0x43, 0x59, 0xd2, 0x69, + 0x17, 0x6f, 0xda, 0x69, 0xdf, 0x87, 0x6a, 0x1c, 0xce, 0xb9, 0xa3, 0x08, 0x63, 0x4a, 0x5b, 0x5a, + 0x49, 0x25, 0x6f, 0x42, 0x79, 0xe2, 0x87, 0x63, 0x86, 0x8e, 0x95, 0xb2, 0x05, 0xed, 0xc2, 0x65, + 0x52, 0xed, 0xb2, 0xcb, 0x24, 0xde, 0xa0, 0x45, 0xf2, 0x1e, 0x0d, 0x0b, 0x99, 0x7c, 0x83, 0x96, + 0x5c, 0xb1, 0xd1, 0x14, 0x44, 0xbe, 0x81, 0xa6, 0x3d, 0x8f, 0x7d, 0xcb, 0xe5, 0x15, 0xda, 0xd4, + 0x1d, 0x9f, 0x61, 0xd9, 0xdd, 0xcc, 0x7f, 0xaf, 0x4f, 0x0f, 0xaa, 0xd3, 0x9d, 0xc7, 0xbe, 0xe1, + 0x1c, 0x22, 0x72, 0xa7, 0x2a, 0x93, 0x12, 0x5d, 0xb1, 0x33, 0x64, 0xed, 0xc7, 0xb0, 0x92, 0x85, + 0xf1, 0x04, 0x24, 0x81, 0xea, 0x1b, 0x3c, 0x3b, 0x8d, 0x78, 0x6a, 0x1b, 0x98, 0x46, 0xb7, 0xaf, + 0x16, 0xb4, 0x18, 0x1a, 0xb8, 0xbc, 0xf4, 0x8e, 0xeb, 0xba, 0xfd, 0x03, 0x28, 0x61, 0xf8, 0x55, + 0x2e, 0x7c, 0x0f, 0xc1, 0x98, 0x8b, 0xcc, 0xbc, 0xf9, 0x15, 0xb3, 0xe6, 0xf7, 0xdf, 0x05, 0x58, + 0x31, 0xfd, 0xf9, 0xf8, 0xe4, 0xa2, 0x01, 0xc2, 0xaf, 0x3b, 0x42, 0x2d, 0x31, 0x1f, 0xe5, 0xa6, + 0xe6, 0x93, 0x5a, 0x47, 0x71, 0x89, 0x75, 0xdc, 0xf4, 0xcc, 0xb5, 0x2f, 0x60, 0x55, 0x6e, 0x5e, + 0x6a, 0x3d, 0xd1, 0x66, 0xe1, 0x0a, 0x6d, 0x6a, 0xbf, 0x50, 0x60, 0x55, 0xc4, 0xf7, 0xff, 0xbb, + 0xd2, 0x2a, 0x37, 0x0c, 0xeb, 0xe5, 0x1b, 0x5d, 0x1e, 0xfd, 0xbf, 0xf4, 0x34, 0x6d, 0x08, 0xcd, + 0x44, 0x7d, 0x37, 0x50, 0xfb, 0x15, 0x46, 0xfc, 0x8b, 0x02, 0x34, 0x06, 0xec, 0xe5, 0x92, 0x20, + 0x5a, 0xbe, 0xee, 0x71, 0x7c, 0x98, 0x2b, 0x57, 0x1b, 0xdb, 0xeb, 0x59, 0x19, 0xc4, 0xd5, 0x63, + 0x52, 0xc1, 0xa6, 0xb7, 0xa8, 0xca, 0xf2, 0x5b, 0xd4, 0xd2, 0x62, 0xb7, 0x9e, 0xb9, 0xc5, 0x2b, + 0x2e, 0xbb, 0xc5, 0xd3, 0xfe, 0xad, 0x08, 0x0d, 0x6c, 0x90, 0x29, 0x8b, 0xe6, 0xd3, 0x38, 0x27, + 0x4c, 0xe1, 0x6a, 0x61, 0x3a, 0x50, 0x09, 0x71, 0x92, 0x74, 0xa5, 0x4b, 0x83, 0xbf, 0x40, 0x61, + 0x6b, 0xfc, 0xdc, 0x0d, 0x02, 0xe6, 0x58, 0x82, 0x92, 0x14, 0x30, 0x4d, 0x49, 0x16, 0x22, 0x44, + 0xbc, 0xfc, 0x9c, 0xf9, 0x21, 0x4b, 0x51, 0x45, 0xbc, 0x4f, 0x68, 0x70, 0x5a, 0x02, 0xc9, 0xdd, + 0x37, 0x88, 0xca, 0xe0, 0xfc, 0xbe, 0x21, 0xed, 0x35, 0x91, 0x5b, 0x47, 0xae, 0xe8, 0x35, 0x91, + 0xcd, 0xbb, 0xa8, 0x99, 0x3d, 0x9d, 0x5a, 0x7e, 0x10, 0xa1, 0xd3, 0xd4, 0x68, 0x0d, 0x09, 0xc3, + 0x20, 0x22, 0x5f, 0x43, 0x7a, 0x5d, 0x2c, 0x6f, 0xc9, 0xc5, 0x39, 0xb6, 0x2e, 0xbb, 0x58, 0xa0, + 0xab, 0xe3, 0xdc, 0xfd, 0xcf, 0x92, 0x1b, 0xea, 0xca, 0x4d, 0x6f, 0xa8, 0x1f, 0x42, 0x59, 0xc4, + 0xa8, 0xda, 0xeb, 0x62, 0x94, 0xc0, 0x65, 0xed, 0xb3, 0x91, 0xb7, 0xcf, 0x5f, 0x16, 0x80, 0x74, + 0xa7, 0x53, 0x7f, 0x6c, 0xc7, 0xcc, 0x70, 0xa2, 0x8b, 0x66, 0x7a, 0xed, 0xcf, 0x2e, 0x9f, 0x41, + 0x7d, 0xe6, 0x3b, 0x6c, 0x6a, 0x25, 0xdf, 0x94, 0x2e, 0xad, 0x7e, 0x10, 0xc6, 0x5b, 0x52, 0x02, + 0x25, 0xbc, 0xc4, 0x51, 0xb0, 0xee, 0xc0, 0x67, 0xde, 0x84, 0xcd, 0xec, 0x97, 0xb2, 0x14, 0xe1, + 0x8f, 0xa4, 0x03, 0xd5, 0x90, 0x45, 0x2c, 0x3c, 0x65, 0x57, 0x16, 0x55, 0x09, 0x48, 0x7b, 0x06, + 0x1b, 0xb9, 0x1d, 0x49, 0x47, 0xbe, 0x85, 0x5f, 0x2b, 0xc3, 0x58, 0x7e, 0xb4, 0x12, 0x03, 0xfe, + 0x3a, 0xe6, 0x25, 0x9f, 0x41, 0xf9, 0x63, 0xea, 0xf0, 0xc5, 0xab, 0xe2, 0xec, 0x1e, 0xa8, 0x59, + 0x4d, 0xbb, 0x63, 0x0c, 0x36, 0xf2, 0x54, 0x0a, 0xd7, 0x3b, 0x15, 0xed, 0xef, 0x0a, 0xb0, 0xde, + 0x75, 0x1c, 0xf1, 0x77, 0xc3, 0x25, 0xaa, 0x2f, 0x5e, 0x57, 0xf5, 0x0b, 0x81, 0x58, 0x84, 0x89, + 0x6b, 0x05, 0xe2, 0x0f, 0xa1, 0x92, 0xd6, 0x5a, 0xc5, 0x05, 0x77, 0x16, 0x72, 0x51, 0x09, 0xd0, + 0x6e, 0x01, 0xc9, 0x0a, 0x2b, 0xb4, 0xaa, 0xfd, 0x69, 0x11, 0xee, 0xee, 0xb2, 0x63, 0xd7, 0xcb, + 0xbe, 0xe2, 0x57, 0xdf, 0xc9, 0xc5, 0x4f, 0x65, 0x9f, 0xc1, 0xba, 0x28, 0xe4, 0x93, 0x7f, 0x62, + 0x59, 0xec, 0x58, 0x7e, 0x9d, 0x94, 0xb1, 0x6a, 0x0d, 0xf9, 0x07, 0x92, 0xad, 0xe3, 0x7f, 0xc5, + 0x1c, 0x3b, 0xb6, 0x9f, 0xd9, 0x11, 0xb3, 0x5c, 0x47, 0xfe, 0x59, 0x06, 0x12, 0x92, 0xe1, 0x90, + 0x21, 0x94, 0xb8, 0x0d, 0xa2, 0xeb, 0x36, 0xb7, 0xb7, 0x33, 0x62, 0x5d, 0xb2, 0x95, 0xac, 0x02, + 0x0f, 0x7c, 0x87, 0xed, 0x54, 0x8f, 0x06, 0x4f, 0x06, 0xc3, 0xef, 0x06, 0x14, 0x17, 0x22, 0x06, + 0xdc, 0x0a, 0x42, 0x76, 0xea, 0xfa, 0xf3, 0xc8, 0xca, 0x9e, 0x44, 0xf5, 0xca, 0x94, 0xb8, 0x91, + 0xcc, 0xc9, 0x10, 0xb5, 0x9f, 0xc2, 0xda, 0xc2, 0xcb, 0x78, 0x6d, 0x26, 0x5f, 0xa7, 0xbe, 0x41, + 0x56, 0xa1, 0x8e, 0x1f, 0xbb, 0x97, 0x7f, 0xfb, 0xd6, 0xfe, 0xb5, 0x80, 0x57, 0x4c, 0x33, 0x37, + 0xbe, 0x59, 0x06, 0xfb, 0xcd, 0x7c, 0x06, 0x83, 0xed, 0x77, 0xf3, 0xe6, 0x9b, 0x59, 0xb0, 0xf3, + 0xad, 0x00, 0xa6, 0x41, 0xa4, 0x6d, 0x43, 0x55, 0xd2, 0xc8, 0x6f, 0xc1, 0x5a, 0xe8, 0xfb, 0x71, + 0xd2, 0x89, 0x8a, 0x0e, 0xe4, 0xf2, 0x3f, 0xdb, 0xac, 0x72, 0xb0, 0x48, 0x06, 0x4f, 0xf2, 0xbd, + 0x48, 0x59, 0xfc, 0x0d, 0x44, 0x0e, 0x77, 0x1b, 0xbf, 0x5b, 0x4f, 0xff, 0xb7, 0xfb, 0xbf, 0x01, + 0x00, 0x00, 0xff, 0xff, 0x35, 0x9f, 0x30, 0x98, 0xf2, 0x2b, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/appengine/internal/identity.go b/vendor/google.golang.org/appengine/internal/identity.go new file mode 100644 index 000000000..d538701ab --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/identity.go @@ -0,0 +1,14 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package internal + +import netcontext "golang.org/x/net/context" + +// These functions are implementations of the wrapper functions +// in ../appengine/identity.go. See that file for commentary. + +func AppID(c netcontext.Context) string { + return appID(FullyQualifiedAppID(c)) +} diff --git a/vendor/google.golang.org/appengine/internal/identity_classic.go b/vendor/google.golang.org/appengine/internal/identity_classic.go new file mode 100644 index 000000000..b59603f13 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/identity_classic.go @@ -0,0 +1,57 @@ +// Copyright 2015 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build appengine + +package internal + +import ( + "appengine" + + netcontext "golang.org/x/net/context" +) + +func DefaultVersionHostname(ctx netcontext.Context) string { + c := fromContext(ctx) + if c == nil { + panic(errNotAppEngineContext) + } + return appengine.DefaultVersionHostname(c) +} + +func Datacenter(_ netcontext.Context) string { return appengine.Datacenter() } +func ServerSoftware() string { return appengine.ServerSoftware() } +func InstanceID() string { return appengine.InstanceID() } +func IsDevAppServer() bool { return appengine.IsDevAppServer() } + +func RequestID(ctx netcontext.Context) string { + c := fromContext(ctx) + if c == nil { + panic(errNotAppEngineContext) + } + return appengine.RequestID(c) +} + +func ModuleName(ctx netcontext.Context) string { + c := fromContext(ctx) + if c == nil { + panic(errNotAppEngineContext) + } + return appengine.ModuleName(c) +} +func VersionID(ctx netcontext.Context) string { + c := fromContext(ctx) + if c == nil { + panic(errNotAppEngineContext) + } + return appengine.VersionID(c) +} + +func fullyQualifiedAppID(ctx netcontext.Context) string { + c := fromContext(ctx) + if c == nil { + panic(errNotAppEngineContext) + } + return c.FullyQualifiedAppID() +} diff --git a/vendor/google.golang.org/appengine/internal/identity_vm.go b/vendor/google.golang.org/appengine/internal/identity_vm.go new file mode 100644 index 000000000..5d8067263 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/identity_vm.go @@ -0,0 +1,134 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build !appengine + +package internal + +import ( + "log" + "net/http" + "os" + "strings" + + netcontext "golang.org/x/net/context" +) + +// These functions are implementations of the wrapper functions +// in ../appengine/identity.go. See that file for commentary. + +const ( + hDefaultVersionHostname = "X-AppEngine-Default-Version-Hostname" + hRequestLogId = "X-AppEngine-Request-Log-Id" + hDatacenter = "X-AppEngine-Datacenter" +) + +func ctxHeaders(ctx netcontext.Context) http.Header { + c := fromContext(ctx) + if c == nil { + return nil + } + return c.Request().Header +} + +func DefaultVersionHostname(ctx netcontext.Context) string { + return ctxHeaders(ctx).Get(hDefaultVersionHostname) +} + +func RequestID(ctx netcontext.Context) string { + return ctxHeaders(ctx).Get(hRequestLogId) +} + +func Datacenter(ctx netcontext.Context) string { + if dc := ctxHeaders(ctx).Get(hDatacenter); dc != "" { + return dc + } + // If the header isn't set, read zone from the metadata service. + // It has the format projects/[NUMERIC_PROJECT_ID]/zones/[ZONE] + zone, err := getMetadata("instance/zone") + if err != nil { + log.Printf("Datacenter: %v", err) + return "" + } + parts := strings.Split(string(zone), "/") + if len(parts) == 0 { + return "" + } + return parts[len(parts)-1] +} + +func ServerSoftware() string { + // TODO(dsymonds): Remove fallback when we've verified this. + if s := os.Getenv("SERVER_SOFTWARE"); s != "" { + return s + } + if s := os.Getenv("GAE_ENV"); s != "" { + return s + } + return "Google App Engine/1.x.x" +} + +// TODO(dsymonds): Remove the metadata fetches. + +func ModuleName(_ netcontext.Context) string { + if s := os.Getenv("GAE_MODULE_NAME"); s != "" { + return s + } + if s := os.Getenv("GAE_SERVICE"); s != "" { + return s + } + return string(mustGetMetadata("instance/attributes/gae_backend_name")) +} + +func VersionID(_ netcontext.Context) string { + if s1, s2 := os.Getenv("GAE_MODULE_VERSION"), os.Getenv("GAE_MINOR_VERSION"); s1 != "" && s2 != "" { + return s1 + "." + s2 + } + if s1, s2 := os.Getenv("GAE_VERSION"), os.Getenv("GAE_DEPLOYMENT_ID"); s1 != "" && s2 != "" { + return s1 + "." + s2 + } + return string(mustGetMetadata("instance/attributes/gae_backend_version")) + "." + string(mustGetMetadata("instance/attributes/gae_backend_minor_version")) +} + +func InstanceID() string { + if s := os.Getenv("GAE_MODULE_INSTANCE"); s != "" { + return s + } + if s := os.Getenv("GAE_INSTANCE"); s != "" { + return s + } + return string(mustGetMetadata("instance/attributes/gae_backend_instance")) +} + +func partitionlessAppID() string { + // gae_project has everything except the partition prefix. + if appID := os.Getenv("GAE_LONG_APP_ID"); appID != "" { + return appID + } + if project := os.Getenv("GOOGLE_CLOUD_PROJECT"); project != "" { + return project + } + return string(mustGetMetadata("instance/attributes/gae_project")) +} + +func fullyQualifiedAppID(_ netcontext.Context) string { + if s := os.Getenv("GAE_APPLICATION"); s != "" { + return s + } + appID := partitionlessAppID() + + part := os.Getenv("GAE_PARTITION") + if part == "" { + part = string(mustGetMetadata("instance/attributes/gae_partition")) + } + + if part != "" { + appID = part + "~" + appID + } + return appID +} + +func IsDevAppServer() bool { + return os.Getenv("RUN_WITH_DEVAPPSERVER") != "" +} diff --git a/vendor/google.golang.org/appengine/internal/internal.go b/vendor/google.golang.org/appengine/internal/internal.go new file mode 100644 index 000000000..051ea3980 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/internal.go @@ -0,0 +1,110 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// Package internal provides support for package appengine. +// +// Programs should not use this package directly. Its API is not stable. +// Use packages appengine and appengine/* instead. +package internal + +import ( + "fmt" + + "github.com/golang/protobuf/proto" + + remotepb "google.golang.org/appengine/internal/remote_api" +) + +// errorCodeMaps is a map of service name to the error code map for the service. +var errorCodeMaps = make(map[string]map[int32]string) + +// RegisterErrorCodeMap is called from API implementations to register their +// error code map. This should only be called from init functions. +func RegisterErrorCodeMap(service string, m map[int32]string) { + errorCodeMaps[service] = m +} + +type timeoutCodeKey struct { + service string + code int32 +} + +// timeoutCodes is the set of service+code pairs that represent timeouts. +var timeoutCodes = make(map[timeoutCodeKey]bool) + +func RegisterTimeoutErrorCode(service string, code int32) { + timeoutCodes[timeoutCodeKey{service, code}] = true +} + +// APIError is the type returned by appengine.Context's Call method +// when an API call fails in an API-specific way. This may be, for instance, +// a taskqueue API call failing with TaskQueueServiceError::UNKNOWN_QUEUE. +type APIError struct { + Service string + Detail string + Code int32 // API-specific error code +} + +func (e *APIError) Error() string { + if e.Code == 0 { + if e.Detail == "" { + return "APIError " + } + return e.Detail + } + s := fmt.Sprintf("API error %d", e.Code) + if m, ok := errorCodeMaps[e.Service]; ok { + s += " (" + e.Service + ": " + m[e.Code] + ")" + } else { + // Shouldn't happen, but provide a bit more detail if it does. + s = e.Service + " " + s + } + if e.Detail != "" { + s += ": " + e.Detail + } + return s +} + +func (e *APIError) IsTimeout() bool { + return timeoutCodes[timeoutCodeKey{e.Service, e.Code}] +} + +// CallError is the type returned by appengine.Context's Call method when an +// API call fails in a generic way, such as RpcError::CAPABILITY_DISABLED. +type CallError struct { + Detail string + Code int32 + // TODO: Remove this if we get a distinguishable error code. + Timeout bool +} + +func (e *CallError) Error() string { + var msg string + switch remotepb.RpcError_ErrorCode(e.Code) { + case remotepb.RpcError_UNKNOWN: + return e.Detail + case remotepb.RpcError_OVER_QUOTA: + msg = "Over quota" + case remotepb.RpcError_CAPABILITY_DISABLED: + msg = "Capability disabled" + case remotepb.RpcError_CANCELLED: + msg = "Canceled" + default: + msg = fmt.Sprintf("Call error %d", e.Code) + } + s := msg + ": " + e.Detail + if e.Timeout { + s += " (timeout)" + } + return s +} + +func (e *CallError) IsTimeout() bool { + return e.Timeout +} + +// NamespaceMods is a map from API service to a function that will mutate an RPC request to attach a namespace. +// The function should be prepared to be called on the same message more than once; it should only modify the +// RPC request the first time. +var NamespaceMods = make(map[string]func(m proto.Message, namespace string)) diff --git a/vendor/google.golang.org/appengine/internal/log/log_service.pb.go b/vendor/google.golang.org/appengine/internal/log/log_service.pb.go new file mode 100644 index 000000000..8545ac4ad --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/log/log_service.pb.go @@ -0,0 +1,1313 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google.golang.org/appengine/internal/log/log_service.proto + +package log + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type LogServiceError_ErrorCode int32 + +const ( + LogServiceError_OK LogServiceError_ErrorCode = 0 + LogServiceError_INVALID_REQUEST LogServiceError_ErrorCode = 1 + LogServiceError_STORAGE_ERROR LogServiceError_ErrorCode = 2 +) + +var LogServiceError_ErrorCode_name = map[int32]string{ + 0: "OK", + 1: "INVALID_REQUEST", + 2: "STORAGE_ERROR", +} +var LogServiceError_ErrorCode_value = map[string]int32{ + "OK": 0, + "INVALID_REQUEST": 1, + "STORAGE_ERROR": 2, +} + +func (x LogServiceError_ErrorCode) Enum() *LogServiceError_ErrorCode { + p := new(LogServiceError_ErrorCode) + *p = x + return p +} +func (x LogServiceError_ErrorCode) String() string { + return proto.EnumName(LogServiceError_ErrorCode_name, int32(x)) +} +func (x *LogServiceError_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(LogServiceError_ErrorCode_value, data, "LogServiceError_ErrorCode") + if err != nil { + return err + } + *x = LogServiceError_ErrorCode(value) + return nil +} +func (LogServiceError_ErrorCode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_log_service_f054fd4b5012319d, []int{0, 0} +} + +type LogServiceError struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LogServiceError) Reset() { *m = LogServiceError{} } +func (m *LogServiceError) String() string { return proto.CompactTextString(m) } +func (*LogServiceError) ProtoMessage() {} +func (*LogServiceError) Descriptor() ([]byte, []int) { + return fileDescriptor_log_service_f054fd4b5012319d, []int{0} +} +func (m *LogServiceError) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LogServiceError.Unmarshal(m, b) +} +func (m *LogServiceError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LogServiceError.Marshal(b, m, deterministic) +} +func (dst *LogServiceError) XXX_Merge(src proto.Message) { + xxx_messageInfo_LogServiceError.Merge(dst, src) +} +func (m *LogServiceError) XXX_Size() int { + return xxx_messageInfo_LogServiceError.Size(m) +} +func (m *LogServiceError) XXX_DiscardUnknown() { + xxx_messageInfo_LogServiceError.DiscardUnknown(m) +} + +var xxx_messageInfo_LogServiceError proto.InternalMessageInfo + +type UserAppLogLine struct { + TimestampUsec *int64 `protobuf:"varint,1,req,name=timestamp_usec,json=timestampUsec" json:"timestamp_usec,omitempty"` + Level *int64 `protobuf:"varint,2,req,name=level" json:"level,omitempty"` + Message *string `protobuf:"bytes,3,req,name=message" json:"message,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UserAppLogLine) Reset() { *m = UserAppLogLine{} } +func (m *UserAppLogLine) String() string { return proto.CompactTextString(m) } +func (*UserAppLogLine) ProtoMessage() {} +func (*UserAppLogLine) Descriptor() ([]byte, []int) { + return fileDescriptor_log_service_f054fd4b5012319d, []int{1} +} +func (m *UserAppLogLine) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UserAppLogLine.Unmarshal(m, b) +} +func (m *UserAppLogLine) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UserAppLogLine.Marshal(b, m, deterministic) +} +func (dst *UserAppLogLine) XXX_Merge(src proto.Message) { + xxx_messageInfo_UserAppLogLine.Merge(dst, src) +} +func (m *UserAppLogLine) XXX_Size() int { + return xxx_messageInfo_UserAppLogLine.Size(m) +} +func (m *UserAppLogLine) XXX_DiscardUnknown() { + xxx_messageInfo_UserAppLogLine.DiscardUnknown(m) +} + +var xxx_messageInfo_UserAppLogLine proto.InternalMessageInfo + +func (m *UserAppLogLine) GetTimestampUsec() int64 { + if m != nil && m.TimestampUsec != nil { + return *m.TimestampUsec + } + return 0 +} + +func (m *UserAppLogLine) GetLevel() int64 { + if m != nil && m.Level != nil { + return *m.Level + } + return 0 +} + +func (m *UserAppLogLine) GetMessage() string { + if m != nil && m.Message != nil { + return *m.Message + } + return "" +} + +type UserAppLogGroup struct { + LogLine []*UserAppLogLine `protobuf:"bytes,2,rep,name=log_line,json=logLine" json:"log_line,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UserAppLogGroup) Reset() { *m = UserAppLogGroup{} } +func (m *UserAppLogGroup) String() string { return proto.CompactTextString(m) } +func (*UserAppLogGroup) ProtoMessage() {} +func (*UserAppLogGroup) Descriptor() ([]byte, []int) { + return fileDescriptor_log_service_f054fd4b5012319d, []int{2} +} +func (m *UserAppLogGroup) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UserAppLogGroup.Unmarshal(m, b) +} +func (m *UserAppLogGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UserAppLogGroup.Marshal(b, m, deterministic) +} +func (dst *UserAppLogGroup) XXX_Merge(src proto.Message) { + xxx_messageInfo_UserAppLogGroup.Merge(dst, src) +} +func (m *UserAppLogGroup) XXX_Size() int { + return xxx_messageInfo_UserAppLogGroup.Size(m) +} +func (m *UserAppLogGroup) XXX_DiscardUnknown() { + xxx_messageInfo_UserAppLogGroup.DiscardUnknown(m) +} + +var xxx_messageInfo_UserAppLogGroup proto.InternalMessageInfo + +func (m *UserAppLogGroup) GetLogLine() []*UserAppLogLine { + if m != nil { + return m.LogLine + } + return nil +} + +type FlushRequest struct { + Logs []byte `protobuf:"bytes,1,opt,name=logs" json:"logs,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FlushRequest) Reset() { *m = FlushRequest{} } +func (m *FlushRequest) String() string { return proto.CompactTextString(m) } +func (*FlushRequest) ProtoMessage() {} +func (*FlushRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_log_service_f054fd4b5012319d, []int{3} +} +func (m *FlushRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FlushRequest.Unmarshal(m, b) +} +func (m *FlushRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FlushRequest.Marshal(b, m, deterministic) +} +func (dst *FlushRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlushRequest.Merge(dst, src) +} +func (m *FlushRequest) XXX_Size() int { + return xxx_messageInfo_FlushRequest.Size(m) +} +func (m *FlushRequest) XXX_DiscardUnknown() { + xxx_messageInfo_FlushRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_FlushRequest proto.InternalMessageInfo + +func (m *FlushRequest) GetLogs() []byte { + if m != nil { + return m.Logs + } + return nil +} + +type SetStatusRequest struct { + Status *string `protobuf:"bytes,1,req,name=status" json:"status,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SetStatusRequest) Reset() { *m = SetStatusRequest{} } +func (m *SetStatusRequest) String() string { return proto.CompactTextString(m) } +func (*SetStatusRequest) ProtoMessage() {} +func (*SetStatusRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_log_service_f054fd4b5012319d, []int{4} +} +func (m *SetStatusRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SetStatusRequest.Unmarshal(m, b) +} +func (m *SetStatusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SetStatusRequest.Marshal(b, m, deterministic) +} +func (dst *SetStatusRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetStatusRequest.Merge(dst, src) +} +func (m *SetStatusRequest) XXX_Size() int { + return xxx_messageInfo_SetStatusRequest.Size(m) +} +func (m *SetStatusRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SetStatusRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SetStatusRequest proto.InternalMessageInfo + +func (m *SetStatusRequest) GetStatus() string { + if m != nil && m.Status != nil { + return *m.Status + } + return "" +} + +type LogOffset struct { + RequestId []byte `protobuf:"bytes,1,opt,name=request_id,json=requestId" json:"request_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LogOffset) Reset() { *m = LogOffset{} } +func (m *LogOffset) String() string { return proto.CompactTextString(m) } +func (*LogOffset) ProtoMessage() {} +func (*LogOffset) Descriptor() ([]byte, []int) { + return fileDescriptor_log_service_f054fd4b5012319d, []int{5} +} +func (m *LogOffset) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LogOffset.Unmarshal(m, b) +} +func (m *LogOffset) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LogOffset.Marshal(b, m, deterministic) +} +func (dst *LogOffset) XXX_Merge(src proto.Message) { + xxx_messageInfo_LogOffset.Merge(dst, src) +} +func (m *LogOffset) XXX_Size() int { + return xxx_messageInfo_LogOffset.Size(m) +} +func (m *LogOffset) XXX_DiscardUnknown() { + xxx_messageInfo_LogOffset.DiscardUnknown(m) +} + +var xxx_messageInfo_LogOffset proto.InternalMessageInfo + +func (m *LogOffset) GetRequestId() []byte { + if m != nil { + return m.RequestId + } + return nil +} + +type LogLine struct { + Time *int64 `protobuf:"varint,1,req,name=time" json:"time,omitempty"` + Level *int32 `protobuf:"varint,2,req,name=level" json:"level,omitempty"` + LogMessage *string `protobuf:"bytes,3,req,name=log_message,json=logMessage" json:"log_message,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LogLine) Reset() { *m = LogLine{} } +func (m *LogLine) String() string { return proto.CompactTextString(m) } +func (*LogLine) ProtoMessage() {} +func (*LogLine) Descriptor() ([]byte, []int) { + return fileDescriptor_log_service_f054fd4b5012319d, []int{6} +} +func (m *LogLine) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LogLine.Unmarshal(m, b) +} +func (m *LogLine) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LogLine.Marshal(b, m, deterministic) +} +func (dst *LogLine) XXX_Merge(src proto.Message) { + xxx_messageInfo_LogLine.Merge(dst, src) +} +func (m *LogLine) XXX_Size() int { + return xxx_messageInfo_LogLine.Size(m) +} +func (m *LogLine) XXX_DiscardUnknown() { + xxx_messageInfo_LogLine.DiscardUnknown(m) +} + +var xxx_messageInfo_LogLine proto.InternalMessageInfo + +func (m *LogLine) GetTime() int64 { + if m != nil && m.Time != nil { + return *m.Time + } + return 0 +} + +func (m *LogLine) GetLevel() int32 { + if m != nil && m.Level != nil { + return *m.Level + } + return 0 +} + +func (m *LogLine) GetLogMessage() string { + if m != nil && m.LogMessage != nil { + return *m.LogMessage + } + return "" +} + +type RequestLog struct { + AppId *string `protobuf:"bytes,1,req,name=app_id,json=appId" json:"app_id,omitempty"` + ModuleId *string `protobuf:"bytes,37,opt,name=module_id,json=moduleId,def=default" json:"module_id,omitempty"` + VersionId *string `protobuf:"bytes,2,req,name=version_id,json=versionId" json:"version_id,omitempty"` + RequestId []byte `protobuf:"bytes,3,req,name=request_id,json=requestId" json:"request_id,omitempty"` + Offset *LogOffset `protobuf:"bytes,35,opt,name=offset" json:"offset,omitempty"` + Ip *string `protobuf:"bytes,4,req,name=ip" json:"ip,omitempty"` + Nickname *string `protobuf:"bytes,5,opt,name=nickname" json:"nickname,omitempty"` + StartTime *int64 `protobuf:"varint,6,req,name=start_time,json=startTime" json:"start_time,omitempty"` + EndTime *int64 `protobuf:"varint,7,req,name=end_time,json=endTime" json:"end_time,omitempty"` + Latency *int64 `protobuf:"varint,8,req,name=latency" json:"latency,omitempty"` + Mcycles *int64 `protobuf:"varint,9,req,name=mcycles" json:"mcycles,omitempty"` + Method *string `protobuf:"bytes,10,req,name=method" json:"method,omitempty"` + Resource *string `protobuf:"bytes,11,req,name=resource" json:"resource,omitempty"` + HttpVersion *string `protobuf:"bytes,12,req,name=http_version,json=httpVersion" json:"http_version,omitempty"` + Status *int32 `protobuf:"varint,13,req,name=status" json:"status,omitempty"` + ResponseSize *int64 `protobuf:"varint,14,req,name=response_size,json=responseSize" json:"response_size,omitempty"` + Referrer *string `protobuf:"bytes,15,opt,name=referrer" json:"referrer,omitempty"` + UserAgent *string `protobuf:"bytes,16,opt,name=user_agent,json=userAgent" json:"user_agent,omitempty"` + UrlMapEntry *string `protobuf:"bytes,17,req,name=url_map_entry,json=urlMapEntry" json:"url_map_entry,omitempty"` + Combined *string `protobuf:"bytes,18,req,name=combined" json:"combined,omitempty"` + ApiMcycles *int64 `protobuf:"varint,19,opt,name=api_mcycles,json=apiMcycles" json:"api_mcycles,omitempty"` + Host *string `protobuf:"bytes,20,opt,name=host" json:"host,omitempty"` + Cost *float64 `protobuf:"fixed64,21,opt,name=cost" json:"cost,omitempty"` + TaskQueueName *string `protobuf:"bytes,22,opt,name=task_queue_name,json=taskQueueName" json:"task_queue_name,omitempty"` + TaskName *string `protobuf:"bytes,23,opt,name=task_name,json=taskName" json:"task_name,omitempty"` + WasLoadingRequest *bool `protobuf:"varint,24,opt,name=was_loading_request,json=wasLoadingRequest" json:"was_loading_request,omitempty"` + PendingTime *int64 `protobuf:"varint,25,opt,name=pending_time,json=pendingTime" json:"pending_time,omitempty"` + ReplicaIndex *int32 `protobuf:"varint,26,opt,name=replica_index,json=replicaIndex,def=-1" json:"replica_index,omitempty"` + Finished *bool `protobuf:"varint,27,opt,name=finished,def=1" json:"finished,omitempty"` + CloneKey []byte `protobuf:"bytes,28,opt,name=clone_key,json=cloneKey" json:"clone_key,omitempty"` + Line []*LogLine `protobuf:"bytes,29,rep,name=line" json:"line,omitempty"` + LinesIncomplete *bool `protobuf:"varint,36,opt,name=lines_incomplete,json=linesIncomplete" json:"lines_incomplete,omitempty"` + AppEngineRelease []byte `protobuf:"bytes,38,opt,name=app_engine_release,json=appEngineRelease" json:"app_engine_release,omitempty"` + ExitReason *int32 `protobuf:"varint,30,opt,name=exit_reason,json=exitReason" json:"exit_reason,omitempty"` + WasThrottledForTime *bool `protobuf:"varint,31,opt,name=was_throttled_for_time,json=wasThrottledForTime" json:"was_throttled_for_time,omitempty"` + WasThrottledForRequests *bool `protobuf:"varint,32,opt,name=was_throttled_for_requests,json=wasThrottledForRequests" json:"was_throttled_for_requests,omitempty"` + ThrottledTime *int64 `protobuf:"varint,33,opt,name=throttled_time,json=throttledTime" json:"throttled_time,omitempty"` + ServerName []byte `protobuf:"bytes,34,opt,name=server_name,json=serverName" json:"server_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RequestLog) Reset() { *m = RequestLog{} } +func (m *RequestLog) String() string { return proto.CompactTextString(m) } +func (*RequestLog) ProtoMessage() {} +func (*RequestLog) Descriptor() ([]byte, []int) { + return fileDescriptor_log_service_f054fd4b5012319d, []int{7} +} +func (m *RequestLog) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RequestLog.Unmarshal(m, b) +} +func (m *RequestLog) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RequestLog.Marshal(b, m, deterministic) +} +func (dst *RequestLog) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestLog.Merge(dst, src) +} +func (m *RequestLog) XXX_Size() int { + return xxx_messageInfo_RequestLog.Size(m) +} +func (m *RequestLog) XXX_DiscardUnknown() { + xxx_messageInfo_RequestLog.DiscardUnknown(m) +} + +var xxx_messageInfo_RequestLog proto.InternalMessageInfo + +const Default_RequestLog_ModuleId string = "default" +const Default_RequestLog_ReplicaIndex int32 = -1 +const Default_RequestLog_Finished bool = true + +func (m *RequestLog) GetAppId() string { + if m != nil && m.AppId != nil { + return *m.AppId + } + return "" +} + +func (m *RequestLog) GetModuleId() string { + if m != nil && m.ModuleId != nil { + return *m.ModuleId + } + return Default_RequestLog_ModuleId +} + +func (m *RequestLog) GetVersionId() string { + if m != nil && m.VersionId != nil { + return *m.VersionId + } + return "" +} + +func (m *RequestLog) GetRequestId() []byte { + if m != nil { + return m.RequestId + } + return nil +} + +func (m *RequestLog) GetOffset() *LogOffset { + if m != nil { + return m.Offset + } + return nil +} + +func (m *RequestLog) GetIp() string { + if m != nil && m.Ip != nil { + return *m.Ip + } + return "" +} + +func (m *RequestLog) GetNickname() string { + if m != nil && m.Nickname != nil { + return *m.Nickname + } + return "" +} + +func (m *RequestLog) GetStartTime() int64 { + if m != nil && m.StartTime != nil { + return *m.StartTime + } + return 0 +} + +func (m *RequestLog) GetEndTime() int64 { + if m != nil && m.EndTime != nil { + return *m.EndTime + } + return 0 +} + +func (m *RequestLog) GetLatency() int64 { + if m != nil && m.Latency != nil { + return *m.Latency + } + return 0 +} + +func (m *RequestLog) GetMcycles() int64 { + if m != nil && m.Mcycles != nil { + return *m.Mcycles + } + return 0 +} + +func (m *RequestLog) GetMethod() string { + if m != nil && m.Method != nil { + return *m.Method + } + return "" +} + +func (m *RequestLog) GetResource() string { + if m != nil && m.Resource != nil { + return *m.Resource + } + return "" +} + +func (m *RequestLog) GetHttpVersion() string { + if m != nil && m.HttpVersion != nil { + return *m.HttpVersion + } + return "" +} + +func (m *RequestLog) GetStatus() int32 { + if m != nil && m.Status != nil { + return *m.Status + } + return 0 +} + +func (m *RequestLog) GetResponseSize() int64 { + if m != nil && m.ResponseSize != nil { + return *m.ResponseSize + } + return 0 +} + +func (m *RequestLog) GetReferrer() string { + if m != nil && m.Referrer != nil { + return *m.Referrer + } + return "" +} + +func (m *RequestLog) GetUserAgent() string { + if m != nil && m.UserAgent != nil { + return *m.UserAgent + } + return "" +} + +func (m *RequestLog) GetUrlMapEntry() string { + if m != nil && m.UrlMapEntry != nil { + return *m.UrlMapEntry + } + return "" +} + +func (m *RequestLog) GetCombined() string { + if m != nil && m.Combined != nil { + return *m.Combined + } + return "" +} + +func (m *RequestLog) GetApiMcycles() int64 { + if m != nil && m.ApiMcycles != nil { + return *m.ApiMcycles + } + return 0 +} + +func (m *RequestLog) GetHost() string { + if m != nil && m.Host != nil { + return *m.Host + } + return "" +} + +func (m *RequestLog) GetCost() float64 { + if m != nil && m.Cost != nil { + return *m.Cost + } + return 0 +} + +func (m *RequestLog) GetTaskQueueName() string { + if m != nil && m.TaskQueueName != nil { + return *m.TaskQueueName + } + return "" +} + +func (m *RequestLog) GetTaskName() string { + if m != nil && m.TaskName != nil { + return *m.TaskName + } + return "" +} + +func (m *RequestLog) GetWasLoadingRequest() bool { + if m != nil && m.WasLoadingRequest != nil { + return *m.WasLoadingRequest + } + return false +} + +func (m *RequestLog) GetPendingTime() int64 { + if m != nil && m.PendingTime != nil { + return *m.PendingTime + } + return 0 +} + +func (m *RequestLog) GetReplicaIndex() int32 { + if m != nil && m.ReplicaIndex != nil { + return *m.ReplicaIndex + } + return Default_RequestLog_ReplicaIndex +} + +func (m *RequestLog) GetFinished() bool { + if m != nil && m.Finished != nil { + return *m.Finished + } + return Default_RequestLog_Finished +} + +func (m *RequestLog) GetCloneKey() []byte { + if m != nil { + return m.CloneKey + } + return nil +} + +func (m *RequestLog) GetLine() []*LogLine { + if m != nil { + return m.Line + } + return nil +} + +func (m *RequestLog) GetLinesIncomplete() bool { + if m != nil && m.LinesIncomplete != nil { + return *m.LinesIncomplete + } + return false +} + +func (m *RequestLog) GetAppEngineRelease() []byte { + if m != nil { + return m.AppEngineRelease + } + return nil +} + +func (m *RequestLog) GetExitReason() int32 { + if m != nil && m.ExitReason != nil { + return *m.ExitReason + } + return 0 +} + +func (m *RequestLog) GetWasThrottledForTime() bool { + if m != nil && m.WasThrottledForTime != nil { + return *m.WasThrottledForTime + } + return false +} + +func (m *RequestLog) GetWasThrottledForRequests() bool { + if m != nil && m.WasThrottledForRequests != nil { + return *m.WasThrottledForRequests + } + return false +} + +func (m *RequestLog) GetThrottledTime() int64 { + if m != nil && m.ThrottledTime != nil { + return *m.ThrottledTime + } + return 0 +} + +func (m *RequestLog) GetServerName() []byte { + if m != nil { + return m.ServerName + } + return nil +} + +type LogModuleVersion struct { + ModuleId *string `protobuf:"bytes,1,opt,name=module_id,json=moduleId,def=default" json:"module_id,omitempty"` + VersionId *string `protobuf:"bytes,2,opt,name=version_id,json=versionId" json:"version_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LogModuleVersion) Reset() { *m = LogModuleVersion{} } +func (m *LogModuleVersion) String() string { return proto.CompactTextString(m) } +func (*LogModuleVersion) ProtoMessage() {} +func (*LogModuleVersion) Descriptor() ([]byte, []int) { + return fileDescriptor_log_service_f054fd4b5012319d, []int{8} +} +func (m *LogModuleVersion) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LogModuleVersion.Unmarshal(m, b) +} +func (m *LogModuleVersion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LogModuleVersion.Marshal(b, m, deterministic) +} +func (dst *LogModuleVersion) XXX_Merge(src proto.Message) { + xxx_messageInfo_LogModuleVersion.Merge(dst, src) +} +func (m *LogModuleVersion) XXX_Size() int { + return xxx_messageInfo_LogModuleVersion.Size(m) +} +func (m *LogModuleVersion) XXX_DiscardUnknown() { + xxx_messageInfo_LogModuleVersion.DiscardUnknown(m) +} + +var xxx_messageInfo_LogModuleVersion proto.InternalMessageInfo + +const Default_LogModuleVersion_ModuleId string = "default" + +func (m *LogModuleVersion) GetModuleId() string { + if m != nil && m.ModuleId != nil { + return *m.ModuleId + } + return Default_LogModuleVersion_ModuleId +} + +func (m *LogModuleVersion) GetVersionId() string { + if m != nil && m.VersionId != nil { + return *m.VersionId + } + return "" +} + +type LogReadRequest struct { + AppId *string `protobuf:"bytes,1,req,name=app_id,json=appId" json:"app_id,omitempty"` + VersionId []string `protobuf:"bytes,2,rep,name=version_id,json=versionId" json:"version_id,omitempty"` + ModuleVersion []*LogModuleVersion `protobuf:"bytes,19,rep,name=module_version,json=moduleVersion" json:"module_version,omitempty"` + StartTime *int64 `protobuf:"varint,3,opt,name=start_time,json=startTime" json:"start_time,omitempty"` + EndTime *int64 `protobuf:"varint,4,opt,name=end_time,json=endTime" json:"end_time,omitempty"` + Offset *LogOffset `protobuf:"bytes,5,opt,name=offset" json:"offset,omitempty"` + RequestId [][]byte `protobuf:"bytes,6,rep,name=request_id,json=requestId" json:"request_id,omitempty"` + MinimumLogLevel *int32 `protobuf:"varint,7,opt,name=minimum_log_level,json=minimumLogLevel" json:"minimum_log_level,omitempty"` + IncludeIncomplete *bool `protobuf:"varint,8,opt,name=include_incomplete,json=includeIncomplete" json:"include_incomplete,omitempty"` + Count *int64 `protobuf:"varint,9,opt,name=count" json:"count,omitempty"` + CombinedLogRegex *string `protobuf:"bytes,14,opt,name=combined_log_regex,json=combinedLogRegex" json:"combined_log_regex,omitempty"` + HostRegex *string `protobuf:"bytes,15,opt,name=host_regex,json=hostRegex" json:"host_regex,omitempty"` + ReplicaIndex *int32 `protobuf:"varint,16,opt,name=replica_index,json=replicaIndex" json:"replica_index,omitempty"` + IncludeAppLogs *bool `protobuf:"varint,10,opt,name=include_app_logs,json=includeAppLogs" json:"include_app_logs,omitempty"` + AppLogsPerRequest *int32 `protobuf:"varint,17,opt,name=app_logs_per_request,json=appLogsPerRequest" json:"app_logs_per_request,omitempty"` + IncludeHost *bool `protobuf:"varint,11,opt,name=include_host,json=includeHost" json:"include_host,omitempty"` + IncludeAll *bool `protobuf:"varint,12,opt,name=include_all,json=includeAll" json:"include_all,omitempty"` + CacheIterator *bool `protobuf:"varint,13,opt,name=cache_iterator,json=cacheIterator" json:"cache_iterator,omitempty"` + NumShards *int32 `protobuf:"varint,18,opt,name=num_shards,json=numShards" json:"num_shards,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LogReadRequest) Reset() { *m = LogReadRequest{} } +func (m *LogReadRequest) String() string { return proto.CompactTextString(m) } +func (*LogReadRequest) ProtoMessage() {} +func (*LogReadRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_log_service_f054fd4b5012319d, []int{9} +} +func (m *LogReadRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LogReadRequest.Unmarshal(m, b) +} +func (m *LogReadRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LogReadRequest.Marshal(b, m, deterministic) +} +func (dst *LogReadRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_LogReadRequest.Merge(dst, src) +} +func (m *LogReadRequest) XXX_Size() int { + return xxx_messageInfo_LogReadRequest.Size(m) +} +func (m *LogReadRequest) XXX_DiscardUnknown() { + xxx_messageInfo_LogReadRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_LogReadRequest proto.InternalMessageInfo + +func (m *LogReadRequest) GetAppId() string { + if m != nil && m.AppId != nil { + return *m.AppId + } + return "" +} + +func (m *LogReadRequest) GetVersionId() []string { + if m != nil { + return m.VersionId + } + return nil +} + +func (m *LogReadRequest) GetModuleVersion() []*LogModuleVersion { + if m != nil { + return m.ModuleVersion + } + return nil +} + +func (m *LogReadRequest) GetStartTime() int64 { + if m != nil && m.StartTime != nil { + return *m.StartTime + } + return 0 +} + +func (m *LogReadRequest) GetEndTime() int64 { + if m != nil && m.EndTime != nil { + return *m.EndTime + } + return 0 +} + +func (m *LogReadRequest) GetOffset() *LogOffset { + if m != nil { + return m.Offset + } + return nil +} + +func (m *LogReadRequest) GetRequestId() [][]byte { + if m != nil { + return m.RequestId + } + return nil +} + +func (m *LogReadRequest) GetMinimumLogLevel() int32 { + if m != nil && m.MinimumLogLevel != nil { + return *m.MinimumLogLevel + } + return 0 +} + +func (m *LogReadRequest) GetIncludeIncomplete() bool { + if m != nil && m.IncludeIncomplete != nil { + return *m.IncludeIncomplete + } + return false +} + +func (m *LogReadRequest) GetCount() int64 { + if m != nil && m.Count != nil { + return *m.Count + } + return 0 +} + +func (m *LogReadRequest) GetCombinedLogRegex() string { + if m != nil && m.CombinedLogRegex != nil { + return *m.CombinedLogRegex + } + return "" +} + +func (m *LogReadRequest) GetHostRegex() string { + if m != nil && m.HostRegex != nil { + return *m.HostRegex + } + return "" +} + +func (m *LogReadRequest) GetReplicaIndex() int32 { + if m != nil && m.ReplicaIndex != nil { + return *m.ReplicaIndex + } + return 0 +} + +func (m *LogReadRequest) GetIncludeAppLogs() bool { + if m != nil && m.IncludeAppLogs != nil { + return *m.IncludeAppLogs + } + return false +} + +func (m *LogReadRequest) GetAppLogsPerRequest() int32 { + if m != nil && m.AppLogsPerRequest != nil { + return *m.AppLogsPerRequest + } + return 0 +} + +func (m *LogReadRequest) GetIncludeHost() bool { + if m != nil && m.IncludeHost != nil { + return *m.IncludeHost + } + return false +} + +func (m *LogReadRequest) GetIncludeAll() bool { + if m != nil && m.IncludeAll != nil { + return *m.IncludeAll + } + return false +} + +func (m *LogReadRequest) GetCacheIterator() bool { + if m != nil && m.CacheIterator != nil { + return *m.CacheIterator + } + return false +} + +func (m *LogReadRequest) GetNumShards() int32 { + if m != nil && m.NumShards != nil { + return *m.NumShards + } + return 0 +} + +type LogReadResponse struct { + Log []*RequestLog `protobuf:"bytes,1,rep,name=log" json:"log,omitempty"` + Offset *LogOffset `protobuf:"bytes,2,opt,name=offset" json:"offset,omitempty"` + LastEndTime *int64 `protobuf:"varint,3,opt,name=last_end_time,json=lastEndTime" json:"last_end_time,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LogReadResponse) Reset() { *m = LogReadResponse{} } +func (m *LogReadResponse) String() string { return proto.CompactTextString(m) } +func (*LogReadResponse) ProtoMessage() {} +func (*LogReadResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_log_service_f054fd4b5012319d, []int{10} +} +func (m *LogReadResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LogReadResponse.Unmarshal(m, b) +} +func (m *LogReadResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LogReadResponse.Marshal(b, m, deterministic) +} +func (dst *LogReadResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_LogReadResponse.Merge(dst, src) +} +func (m *LogReadResponse) XXX_Size() int { + return xxx_messageInfo_LogReadResponse.Size(m) +} +func (m *LogReadResponse) XXX_DiscardUnknown() { + xxx_messageInfo_LogReadResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_LogReadResponse proto.InternalMessageInfo + +func (m *LogReadResponse) GetLog() []*RequestLog { + if m != nil { + return m.Log + } + return nil +} + +func (m *LogReadResponse) GetOffset() *LogOffset { + if m != nil { + return m.Offset + } + return nil +} + +func (m *LogReadResponse) GetLastEndTime() int64 { + if m != nil && m.LastEndTime != nil { + return *m.LastEndTime + } + return 0 +} + +type LogUsageRecord struct { + VersionId *string `protobuf:"bytes,1,opt,name=version_id,json=versionId" json:"version_id,omitempty"` + StartTime *int32 `protobuf:"varint,2,opt,name=start_time,json=startTime" json:"start_time,omitempty"` + EndTime *int32 `protobuf:"varint,3,opt,name=end_time,json=endTime" json:"end_time,omitempty"` + Count *int64 `protobuf:"varint,4,opt,name=count" json:"count,omitempty"` + TotalSize *int64 `protobuf:"varint,5,opt,name=total_size,json=totalSize" json:"total_size,omitempty"` + Records *int32 `protobuf:"varint,6,opt,name=records" json:"records,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LogUsageRecord) Reset() { *m = LogUsageRecord{} } +func (m *LogUsageRecord) String() string { return proto.CompactTextString(m) } +func (*LogUsageRecord) ProtoMessage() {} +func (*LogUsageRecord) Descriptor() ([]byte, []int) { + return fileDescriptor_log_service_f054fd4b5012319d, []int{11} +} +func (m *LogUsageRecord) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LogUsageRecord.Unmarshal(m, b) +} +func (m *LogUsageRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LogUsageRecord.Marshal(b, m, deterministic) +} +func (dst *LogUsageRecord) XXX_Merge(src proto.Message) { + xxx_messageInfo_LogUsageRecord.Merge(dst, src) +} +func (m *LogUsageRecord) XXX_Size() int { + return xxx_messageInfo_LogUsageRecord.Size(m) +} +func (m *LogUsageRecord) XXX_DiscardUnknown() { + xxx_messageInfo_LogUsageRecord.DiscardUnknown(m) +} + +var xxx_messageInfo_LogUsageRecord proto.InternalMessageInfo + +func (m *LogUsageRecord) GetVersionId() string { + if m != nil && m.VersionId != nil { + return *m.VersionId + } + return "" +} + +func (m *LogUsageRecord) GetStartTime() int32 { + if m != nil && m.StartTime != nil { + return *m.StartTime + } + return 0 +} + +func (m *LogUsageRecord) GetEndTime() int32 { + if m != nil && m.EndTime != nil { + return *m.EndTime + } + return 0 +} + +func (m *LogUsageRecord) GetCount() int64 { + if m != nil && m.Count != nil { + return *m.Count + } + return 0 +} + +func (m *LogUsageRecord) GetTotalSize() int64 { + if m != nil && m.TotalSize != nil { + return *m.TotalSize + } + return 0 +} + +func (m *LogUsageRecord) GetRecords() int32 { + if m != nil && m.Records != nil { + return *m.Records + } + return 0 +} + +type LogUsageRequest struct { + AppId *string `protobuf:"bytes,1,req,name=app_id,json=appId" json:"app_id,omitempty"` + VersionId []string `protobuf:"bytes,2,rep,name=version_id,json=versionId" json:"version_id,omitempty"` + StartTime *int32 `protobuf:"varint,3,opt,name=start_time,json=startTime" json:"start_time,omitempty"` + EndTime *int32 `protobuf:"varint,4,opt,name=end_time,json=endTime" json:"end_time,omitempty"` + ResolutionHours *uint32 `protobuf:"varint,5,opt,name=resolution_hours,json=resolutionHours,def=1" json:"resolution_hours,omitempty"` + CombineVersions *bool `protobuf:"varint,6,opt,name=combine_versions,json=combineVersions" json:"combine_versions,omitempty"` + UsageVersion *int32 `protobuf:"varint,7,opt,name=usage_version,json=usageVersion" json:"usage_version,omitempty"` + VersionsOnly *bool `protobuf:"varint,8,opt,name=versions_only,json=versionsOnly" json:"versions_only,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LogUsageRequest) Reset() { *m = LogUsageRequest{} } +func (m *LogUsageRequest) String() string { return proto.CompactTextString(m) } +func (*LogUsageRequest) ProtoMessage() {} +func (*LogUsageRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_log_service_f054fd4b5012319d, []int{12} +} +func (m *LogUsageRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LogUsageRequest.Unmarshal(m, b) +} +func (m *LogUsageRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LogUsageRequest.Marshal(b, m, deterministic) +} +func (dst *LogUsageRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_LogUsageRequest.Merge(dst, src) +} +func (m *LogUsageRequest) XXX_Size() int { + return xxx_messageInfo_LogUsageRequest.Size(m) +} +func (m *LogUsageRequest) XXX_DiscardUnknown() { + xxx_messageInfo_LogUsageRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_LogUsageRequest proto.InternalMessageInfo + +const Default_LogUsageRequest_ResolutionHours uint32 = 1 + +func (m *LogUsageRequest) GetAppId() string { + if m != nil && m.AppId != nil { + return *m.AppId + } + return "" +} + +func (m *LogUsageRequest) GetVersionId() []string { + if m != nil { + return m.VersionId + } + return nil +} + +func (m *LogUsageRequest) GetStartTime() int32 { + if m != nil && m.StartTime != nil { + return *m.StartTime + } + return 0 +} + +func (m *LogUsageRequest) GetEndTime() int32 { + if m != nil && m.EndTime != nil { + return *m.EndTime + } + return 0 +} + +func (m *LogUsageRequest) GetResolutionHours() uint32 { + if m != nil && m.ResolutionHours != nil { + return *m.ResolutionHours + } + return Default_LogUsageRequest_ResolutionHours +} + +func (m *LogUsageRequest) GetCombineVersions() bool { + if m != nil && m.CombineVersions != nil { + return *m.CombineVersions + } + return false +} + +func (m *LogUsageRequest) GetUsageVersion() int32 { + if m != nil && m.UsageVersion != nil { + return *m.UsageVersion + } + return 0 +} + +func (m *LogUsageRequest) GetVersionsOnly() bool { + if m != nil && m.VersionsOnly != nil { + return *m.VersionsOnly + } + return false +} + +type LogUsageResponse struct { + Usage []*LogUsageRecord `protobuf:"bytes,1,rep,name=usage" json:"usage,omitempty"` + Summary *LogUsageRecord `protobuf:"bytes,2,opt,name=summary" json:"summary,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LogUsageResponse) Reset() { *m = LogUsageResponse{} } +func (m *LogUsageResponse) String() string { return proto.CompactTextString(m) } +func (*LogUsageResponse) ProtoMessage() {} +func (*LogUsageResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_log_service_f054fd4b5012319d, []int{13} +} +func (m *LogUsageResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LogUsageResponse.Unmarshal(m, b) +} +func (m *LogUsageResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LogUsageResponse.Marshal(b, m, deterministic) +} +func (dst *LogUsageResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_LogUsageResponse.Merge(dst, src) +} +func (m *LogUsageResponse) XXX_Size() int { + return xxx_messageInfo_LogUsageResponse.Size(m) +} +func (m *LogUsageResponse) XXX_DiscardUnknown() { + xxx_messageInfo_LogUsageResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_LogUsageResponse proto.InternalMessageInfo + +func (m *LogUsageResponse) GetUsage() []*LogUsageRecord { + if m != nil { + return m.Usage + } + return nil +} + +func (m *LogUsageResponse) GetSummary() *LogUsageRecord { + if m != nil { + return m.Summary + } + return nil +} + +func init() { + proto.RegisterType((*LogServiceError)(nil), "appengine.LogServiceError") + proto.RegisterType((*UserAppLogLine)(nil), "appengine.UserAppLogLine") + proto.RegisterType((*UserAppLogGroup)(nil), "appengine.UserAppLogGroup") + proto.RegisterType((*FlushRequest)(nil), "appengine.FlushRequest") + proto.RegisterType((*SetStatusRequest)(nil), "appengine.SetStatusRequest") + proto.RegisterType((*LogOffset)(nil), "appengine.LogOffset") + proto.RegisterType((*LogLine)(nil), "appengine.LogLine") + proto.RegisterType((*RequestLog)(nil), "appengine.RequestLog") + proto.RegisterType((*LogModuleVersion)(nil), "appengine.LogModuleVersion") + proto.RegisterType((*LogReadRequest)(nil), "appengine.LogReadRequest") + proto.RegisterType((*LogReadResponse)(nil), "appengine.LogReadResponse") + proto.RegisterType((*LogUsageRecord)(nil), "appengine.LogUsageRecord") + proto.RegisterType((*LogUsageRequest)(nil), "appengine.LogUsageRequest") + proto.RegisterType((*LogUsageResponse)(nil), "appengine.LogUsageResponse") +} + +func init() { + proto.RegisterFile("google.golang.org/appengine/internal/log/log_service.proto", fileDescriptor_log_service_f054fd4b5012319d) +} + +var fileDescriptor_log_service_f054fd4b5012319d = []byte{ + // 1553 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xdd, 0x72, 0xdb, 0xc6, + 0x15, 0x2e, 0x48, 0x51, 0x24, 0x0f, 0x49, 0x91, 0x5a, 0xcb, 0xce, 0xda, 0xae, 0x6b, 0x1a, 0x4e, + 0x1c, 0xd6, 0x93, 0x48, 0x93, 0xa4, 0x57, 0xca, 0x95, 0xd3, 0x2a, 0x8e, 0x26, 0xb4, 0xd5, 0x40, + 0x72, 0x3a, 0xd3, 0x1b, 0x0c, 0x0a, 0x1c, 0x81, 0x18, 0x2f, 0xb1, 0xc8, 0xee, 0xc2, 0x91, 0x72, + 0xdb, 0xdb, 0x3e, 0x46, 0x1f, 0xa2, 0xaf, 0xd2, 0xb7, 0xe9, 0xec, 0xd9, 0x05, 0x44, 0x2a, 0x4d, + 0xc6, 0x33, 0xb9, 0xe0, 0x10, 0xfb, 0x9d, 0x83, 0xdd, 0xf3, 0xf3, 0x9d, 0x6f, 0x01, 0xc7, 0xb9, + 0x94, 0xb9, 0xc0, 0xc3, 0x5c, 0x8a, 0xa4, 0xcc, 0x0f, 0xa5, 0xca, 0x8f, 0x92, 0xaa, 0xc2, 0x32, + 0x2f, 0x4a, 0x3c, 0x2a, 0x4a, 0x83, 0xaa, 0x4c, 0xc4, 0x91, 0x90, 0xb9, 0xfd, 0xc5, 0x1a, 0xd5, + 0xbb, 0x22, 0xc5, 0xc3, 0x4a, 0x49, 0x23, 0xd9, 0xb0, 0xf5, 0x0c, 0x5f, 0xc3, 0x74, 0x29, 0xf3, + 0x73, 0x67, 0x3e, 0x51, 0x4a, 0xaa, 0xf0, 0x4b, 0x18, 0xd2, 0xc3, 0x9f, 0x65, 0x86, 0x6c, 0x17, + 0x3a, 0x67, 0xdf, 0xce, 0x7e, 0xc7, 0xee, 0xc0, 0xf4, 0xf4, 0xf5, 0xf7, 0x2f, 0x96, 0xa7, 0x7f, + 0x89, 0xa3, 0x93, 0xef, 0xde, 0x9c, 0x9c, 0x5f, 0xcc, 0x02, 0xb6, 0x0f, 0x93, 0xf3, 0x8b, 0xb3, + 0xe8, 0xc5, 0xcb, 0x93, 0xf8, 0x24, 0x8a, 0xce, 0xa2, 0x59, 0x27, 0xcc, 0x61, 0xef, 0x8d, 0x46, + 0xf5, 0xa2, 0xaa, 0x96, 0x32, 0x5f, 0x16, 0x25, 0xb2, 0x8f, 0x60, 0xcf, 0x14, 0x6b, 0xd4, 0x26, + 0x59, 0x57, 0x71, 0xad, 0x31, 0xe5, 0xc1, 0xbc, 0xb3, 0xe8, 0x46, 0x93, 0x16, 0x7d, 0xa3, 0x31, + 0x65, 0x07, 0xd0, 0x13, 0xf8, 0x0e, 0x05, 0xef, 0x90, 0xd5, 0x2d, 0x18, 0x87, 0xfe, 0x1a, 0xb5, + 0x4e, 0x72, 0xe4, 0xdd, 0x79, 0x67, 0x31, 0x8c, 0x9a, 0x65, 0xf8, 0x12, 0xa6, 0x37, 0x07, 0xbd, + 0x54, 0xb2, 0xae, 0xd8, 0x9f, 0x60, 0x60, 0x73, 0x15, 0x45, 0x89, 0xbc, 0x33, 0xef, 0x2e, 0x46, + 0x9f, 0xdf, 0x3f, 0x6c, 0x33, 0x3d, 0xdc, 0x0e, 0x2b, 0xea, 0x0b, 0xf7, 0x10, 0x86, 0x30, 0xfe, + 0x5a, 0xd4, 0x7a, 0x15, 0xe1, 0x0f, 0x35, 0x6a, 0xc3, 0x18, 0xec, 0x08, 0x99, 0x6b, 0x1e, 0xcc, + 0x83, 0xc5, 0x38, 0xa2, 0xe7, 0xf0, 0x39, 0xcc, 0xce, 0xd1, 0x9c, 0x9b, 0xc4, 0xd4, 0xba, 0xf1, + 0xbb, 0x07, 0xbb, 0x9a, 0x00, 0xca, 0x67, 0x18, 0xf9, 0x55, 0xf8, 0x1c, 0x86, 0x4b, 0x99, 0x9f, + 0x5d, 0x5e, 0x6a, 0x34, 0xec, 0x11, 0x80, 0x72, 0xfe, 0x71, 0x91, 0xf9, 0x2d, 0x87, 0x1e, 0x39, + 0xcd, 0xc2, 0x0b, 0xe8, 0x37, 0x65, 0x62, 0xb0, 0x63, 0x0b, 0xe2, 0x8b, 0x43, 0xcf, 0xdb, 0x35, + 0xe9, 0x35, 0x35, 0x79, 0x0c, 0x23, 0x9b, 0xe6, 0x76, 0x5d, 0x40, 0xc8, 0xfc, 0x95, 0x2f, 0xcd, + 0x3f, 0x01, 0xc0, 0x47, 0xb9, 0x94, 0x39, 0xbb, 0x0b, 0xbb, 0x49, 0x55, 0xb9, 0xf3, 0xad, 0x6b, + 0x2f, 0xa9, 0xaa, 0xd3, 0x8c, 0x7d, 0x08, 0xc3, 0xb5, 0xcc, 0x6a, 0x81, 0xd6, 0xf2, 0xd1, 0x3c, + 0x58, 0x0c, 0x8f, 0xfb, 0x19, 0x5e, 0x26, 0xb5, 0x30, 0xd1, 0xc0, 0x59, 0x4e, 0x33, 0x9b, 0xc0, + 0x3b, 0x54, 0xba, 0x90, 0xa5, 0x75, 0xeb, 0xd0, 0x06, 0x43, 0x8f, 0x38, 0xf3, 0x46, 0x7e, 0x36, + 0x94, 0xcd, 0xfc, 0xd8, 0x27, 0xb0, 0x2b, 0xa9, 0x10, 0xfc, 0xe9, 0x3c, 0x58, 0x8c, 0x3e, 0x3f, + 0xd8, 0xe8, 0x47, 0x5b, 0xa4, 0xc8, 0xfb, 0xb0, 0x3d, 0xe8, 0x14, 0x15, 0xdf, 0xa1, 0x33, 0x3a, + 0x45, 0xc5, 0x1e, 0xc0, 0xa0, 0x2c, 0xd2, 0xb7, 0x65, 0xb2, 0x46, 0xde, 0xb3, 0x01, 0x46, 0xed, + 0xda, 0x1e, 0xac, 0x4d, 0xa2, 0x4c, 0x4c, 0x45, 0xdb, 0xa5, 0xa2, 0x0d, 0x09, 0xb9, 0xb0, 0x95, + 0xbb, 0x0f, 0x03, 0x2c, 0x33, 0x67, 0xec, 0x93, 0xb1, 0x8f, 0x65, 0x46, 0x26, 0x0e, 0x7d, 0x91, + 0x18, 0x2c, 0xd3, 0x6b, 0x3e, 0x70, 0x16, 0xbf, 0x24, 0xb2, 0xa5, 0xd7, 0xa9, 0x40, 0xcd, 0x87, + 0xce, 0xe2, 0x97, 0xb6, 0xd7, 0x6b, 0x34, 0x2b, 0x99, 0x71, 0x70, 0xbd, 0x76, 0x2b, 0x1b, 0xa1, + 0x42, 0x2d, 0x6b, 0x95, 0x22, 0x1f, 0x91, 0xa5, 0x5d, 0xb3, 0x27, 0x30, 0x5e, 0x19, 0x53, 0xc5, + 0xbe, 0x58, 0x7c, 0x4c, 0xf6, 0x91, 0xc5, 0xbe, 0x77, 0xd0, 0x06, 0x85, 0x26, 0xd4, 0x60, 0xbf, + 0x62, 0x4f, 0x61, 0xa2, 0x50, 0x57, 0xb2, 0xd4, 0x18, 0xeb, 0xe2, 0x27, 0xe4, 0x7b, 0x14, 0xce, + 0xb8, 0x01, 0xcf, 0x8b, 0x9f, 0xd0, 0x9d, 0x7d, 0x89, 0x4a, 0xa1, 0xe2, 0x53, 0x57, 0x9d, 0x66, + 0x6d, 0xab, 0x53, 0x6b, 0x54, 0x71, 0x92, 0x63, 0x69, 0xf8, 0x8c, 0xac, 0x43, 0x8b, 0xbc, 0xb0, + 0x00, 0x0b, 0x61, 0x52, 0x2b, 0x11, 0xaf, 0x93, 0x2a, 0xc6, 0xd2, 0xa8, 0x6b, 0xbe, 0xef, 0x62, + 0xab, 0x95, 0x78, 0x95, 0x54, 0x27, 0x16, 0xb2, 0xdb, 0xa7, 0x72, 0xfd, 0x8f, 0xa2, 0xc4, 0x8c, + 0x33, 0x97, 0x5a, 0xb3, 0xb6, 0x0c, 0x4c, 0xaa, 0x22, 0x6e, 0x8a, 0x75, 0x67, 0x1e, 0x2c, 0xba, + 0x11, 0x24, 0x55, 0xf1, 0xca, 0xd7, 0x8b, 0xc1, 0xce, 0x4a, 0x6a, 0xc3, 0x0f, 0xe8, 0x64, 0x7a, + 0xb6, 0x58, 0x6a, 0xb1, 0xbb, 0xf3, 0x60, 0x11, 0x44, 0xf4, 0xcc, 0x9e, 0xc1, 0xd4, 0x24, 0xfa, + 0x6d, 0xfc, 0x43, 0x8d, 0x35, 0xc6, 0xd4, 0xe8, 0x7b, 0xf4, 0xca, 0xc4, 0xc2, 0xdf, 0x59, 0xf4, + 0xb5, 0xed, 0xf6, 0x43, 0x18, 0x92, 0x1f, 0x79, 0x7c, 0xe0, 0x92, 0xb5, 0x00, 0x19, 0x0f, 0xe1, + 0xce, 0x8f, 0x89, 0x8e, 0x85, 0x4c, 0xb2, 0xa2, 0xcc, 0x63, 0xcf, 0x3e, 0xce, 0xe7, 0xc1, 0x62, + 0x10, 0xed, 0xff, 0x98, 0xe8, 0xa5, 0xb3, 0x34, 0x83, 0xfb, 0x04, 0xc6, 0x15, 0x96, 0xe4, 0x4b, + 0xfc, 0xb8, 0x4f, 0xe1, 0x8f, 0x3c, 0x46, 0x1c, 0xf9, 0xd8, 0x36, 0xa0, 0x12, 0x45, 0x9a, 0xc4, + 0x45, 0x99, 0xe1, 0x15, 0x7f, 0x30, 0x0f, 0x16, 0xbd, 0xe3, 0xce, 0xa7, 0x9f, 0xd9, 0x26, 0x90, + 0xe1, 0xd4, 0xe2, 0x6c, 0x0e, 0x83, 0xcb, 0xa2, 0x2c, 0xf4, 0x0a, 0x33, 0xfe, 0xd0, 0x1e, 0x78, + 0xbc, 0x63, 0x54, 0x8d, 0x51, 0x8b, 0xda, 0xd0, 0x53, 0x21, 0x4b, 0x8c, 0xdf, 0xe2, 0x35, 0xff, + 0x3d, 0x09, 0xc0, 0x80, 0x80, 0x6f, 0xf1, 0x9a, 0x3d, 0x83, 0x1d, 0x52, 0xab, 0x47, 0xa4, 0x56, + 0x6c, 0x7b, 0x3a, 0x48, 0xa6, 0xc8, 0xce, 0xfe, 0x08, 0x33, 0xfb, 0xaf, 0xe3, 0xa2, 0x4c, 0xe5, + 0xba, 0x12, 0x68, 0x90, 0x7f, 0x48, 0xf9, 0x4d, 0x09, 0x3f, 0x6d, 0x61, 0xf6, 0x09, 0x30, 0x3b, + 0xed, 0x6e, 0x9b, 0x58, 0xa1, 0xc0, 0x44, 0x23, 0x7f, 0x46, 0x07, 0xcf, 0x92, 0xaa, 0x3a, 0x21, + 0x43, 0xe4, 0x70, 0xdb, 0x49, 0xbc, 0x2a, 0x4c, 0xac, 0x30, 0xd1, 0xb2, 0xe4, 0x7f, 0xb0, 0x69, + 0x46, 0x60, 0xa1, 0x88, 0x10, 0xf6, 0x05, 0xdc, 0xb3, 0xc5, 0x35, 0x2b, 0x25, 0x8d, 0x11, 0x98, + 0xc5, 0x97, 0x52, 0xb9, 0xb2, 0x3d, 0xa6, 0xf3, 0x6d, 0xe9, 0x2f, 0x1a, 0xe3, 0xd7, 0x52, 0x51, + 0xf9, 0xbe, 0x84, 0x07, 0x3f, 0x7f, 0xc9, 0xf7, 0x45, 0xf3, 0x39, 0xbd, 0xf8, 0xc1, 0xad, 0x17, + 0x7d, 0x77, 0x34, 0xdd, 0x17, 0xed, 0x8b, 0x74, 0xd2, 0x13, 0x6a, 0xd0, 0xa4, 0x45, 0xe9, 0x8c, + 0xc7, 0x30, 0xb2, 0x97, 0x1a, 0x2a, 0x47, 0x8a, 0x90, 0x12, 0x04, 0x07, 0x59, 0x5a, 0x84, 0x7f, + 0x83, 0xd9, 0x52, 0xe6, 0xaf, 0x48, 0xc8, 0x9a, 0x81, 0xdb, 0xd2, 0xbc, 0xe0, 0x7d, 0x35, 0x2f, + 0xd8, 0xd2, 0xbc, 0xf0, 0xbf, 0x3d, 0xd8, 0x5b, 0xca, 0x3c, 0xc2, 0x24, 0x6b, 0x28, 0xf5, 0x0b, + 0x12, 0x7b, 0x7b, 0xa3, 0xee, 0xb6, 0x78, 0x7e, 0x05, 0x7b, 0x3e, 0x9a, 0x46, 0x23, 0xee, 0x10, + 0x0f, 0x1e, 0x6e, 0xf3, 0x60, 0x2b, 0x85, 0x68, 0xb2, 0xde, 0xca, 0x68, 0x5b, 0x07, 0xbb, 0x54, + 0xa9, 0x5f, 0xd0, 0xc1, 0x1d, 0x32, 0xb6, 0x3a, 0x78, 0xa3, 0xcd, 0xbd, 0xf7, 0xd0, 0xe6, 0x6d, + 0xa1, 0xdf, 0x9d, 0x77, 0xb7, 0x85, 0xfe, 0x39, 0xec, 0xaf, 0x8b, 0xb2, 0x58, 0xd7, 0xeb, 0x98, + 0xae, 0x60, 0xba, 0xb5, 0xfa, 0xc4, 0xa6, 0xa9, 0x37, 0x58, 0x46, 0xd3, 0xfd, 0xf5, 0x29, 0xb0, + 0xa2, 0x4c, 0x45, 0x9d, 0xe1, 0x26, 0x9d, 0x07, 0x6e, 0x5c, 0xbd, 0x65, 0x83, 0xd0, 0x07, 0xd0, + 0x4b, 0x65, 0x5d, 0x1a, 0x3e, 0xa4, 0xf8, 0xdd, 0xc2, 0xd2, 0xbc, 0x91, 0x23, 0x3a, 0x51, 0x61, + 0x8e, 0x57, 0x7c, 0x8f, 0x7a, 0x35, 0x6b, 0x2c, 0xd4, 0xa5, 0x1c, 0xaf, 0x6c, 0xf4, 0x56, 0x83, + 0xbc, 0x97, 0x53, 0xcb, 0xa1, 0x45, 0x9c, 0xf9, 0xe9, 0xed, 0x71, 0x9f, 0x51, 0xe4, 0xdb, 0xa3, + 0xbe, 0x80, 0x59, 0x13, 0xb6, 0xed, 0x35, 0x7d, 0x23, 0x00, 0x05, 0xbd, 0xe7, 0x71, 0xf7, 0x75, + 0xa1, 0xd9, 0x11, 0x1c, 0x34, 0x1e, 0x71, 0x85, 0x2d, 0xf3, 0xf9, 0x3e, 0xed, 0xba, 0x9f, 0x38, + 0xb7, 0xbf, 0xa2, 0xda, 0x50, 0xa4, 0x66, 0x6b, 0x92, 0xcd, 0x11, 0x6d, 0x3b, 0xf2, 0xd8, 0x37, + 0x56, 0x29, 0x1f, 0xc3, 0xa8, 0x3d, 0x5d, 0x08, 0x3e, 0x26, 0x0f, 0x68, 0x0e, 0x16, 0xc2, 0x8e, + 0x4d, 0x9a, 0xa4, 0x2b, 0x8c, 0x0b, 0x83, 0x2a, 0x31, 0x52, 0xf1, 0x09, 0xf9, 0x4c, 0x08, 0x3d, + 0xf5, 0xa0, 0xad, 0x44, 0x59, 0xaf, 0x63, 0xbd, 0x4a, 0x54, 0xa6, 0x39, 0xa3, 0x88, 0x86, 0x65, + 0xbd, 0x3e, 0x27, 0x20, 0xfc, 0x57, 0x40, 0xdf, 0x83, 0x8e, 0xdb, 0xee, 0xb2, 0x61, 0x1f, 0x43, + 0x57, 0xc8, 0x9c, 0x07, 0xc4, 0xcd, 0xbb, 0x1b, 0x2c, 0xb9, 0xf9, 0xc6, 0x88, 0xac, 0xc7, 0x06, + 0xa3, 0x3a, 0xef, 0xc1, 0xa8, 0x10, 0x26, 0x22, 0xd1, 0x26, 0x6e, 0xf9, 0xe9, 0xc8, 0x3b, 0xb2, + 0xe0, 0x89, 0xe3, 0x68, 0xf8, 0x9f, 0x80, 0x46, 0xed, 0x8d, 0xfd, 0xac, 0x89, 0x30, 0x95, 0xea, + 0xf6, 0x4c, 0x05, 0xb7, 0x86, 0xf3, 0xd6, 0x3c, 0x74, 0x5c, 0x7e, 0xff, 0x7f, 0x1e, 0xba, 0x64, + 0x6c, 0xe7, 0xa1, 0xe5, 0xd9, 0xce, 0x26, 0xcf, 0x1e, 0x01, 0x18, 0x69, 0x12, 0xe1, 0xee, 0xe1, + 0x9e, 0x9b, 0x2f, 0x42, 0xe8, 0x12, 0xe6, 0xd0, 0x57, 0x14, 0x97, 0xe6, 0xbb, 0x6e, 0x3b, 0xbf, + 0x0c, 0xff, 0xdd, 0xa1, 0x4a, 0xfa, 0xd0, 0x7f, 0x8b, 0x4c, 0xfc, 0x7c, 0xc4, 0x7b, 0xbf, 0x36, + 0xe2, 0xbd, 0xcd, 0x11, 0x9f, 0xd9, 0xcf, 0x11, 0x51, 0x1b, 0xbb, 0xf7, 0x4a, 0xd6, 0x4a, 0x53, + 0x0a, 0x93, 0xe3, 0xe0, 0xb3, 0x68, 0x7a, 0x63, 0xfa, 0xc6, 0x5a, 0xec, 0x25, 0xe3, 0x07, 0xa7, + 0xd1, 0x23, 0x97, 0xd4, 0x20, 0x9a, 0x7a, 0xdc, 0x8b, 0x0e, 0x7d, 0xa0, 0xd4, 0x36, 0xb1, 0x56, + 0xb8, 0xdc, 0xa8, 0x8f, 0x09, 0x6c, 0xa4, 0xe9, 0x29, 0x4c, 0x9a, 0x7d, 0x62, 0x59, 0x8a, 0x6b, + 0x3f, 0xe2, 0xe3, 0x06, 0x3c, 0x2b, 0xc5, 0x75, 0x78, 0x45, 0x2a, 0xed, 0xab, 0xe4, 0x09, 0x77, + 0x04, 0x3d, 0xda, 0xc8, 0x53, 0xee, 0xfe, 0x36, 0x8d, 0x36, 0xc8, 0x10, 0x39, 0x3f, 0xf6, 0x05, + 0xf4, 0x75, 0xbd, 0x5e, 0x27, 0xea, 0xda, 0x33, 0xef, 0x57, 0x5e, 0x69, 0x3c, 0xbf, 0xea, 0xfd, + 0xdd, 0x92, 0xf6, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x70, 0xd9, 0xa0, 0xf8, 0x48, 0x0d, 0x00, + 0x00, +} diff --git a/vendor/google.golang.org/appengine/internal/main.go b/vendor/google.golang.org/appengine/internal/main.go new file mode 100644 index 000000000..49036163c --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/main.go @@ -0,0 +1,15 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build appengine + +package internal + +import ( + "appengine_internal" +) + +func Main() { + appengine_internal.Main() +} diff --git a/vendor/google.golang.org/appengine/internal/main_vm.go b/vendor/google.golang.org/appengine/internal/main_vm.go new file mode 100644 index 000000000..822e784a4 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/main_vm.go @@ -0,0 +1,48 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build !appengine + +package internal + +import ( + "io" + "log" + "net/http" + "net/url" + "os" +) + +func Main() { + installHealthChecker(http.DefaultServeMux) + + port := "8080" + if s := os.Getenv("PORT"); s != "" { + port = s + } + + host := "" + if IsDevAppServer() { + host = "127.0.0.1" + } + if err := http.ListenAndServe(host+":"+port, http.HandlerFunc(handleHTTP)); err != nil { + log.Fatalf("http.ListenAndServe: %v", err) + } +} + +func installHealthChecker(mux *http.ServeMux) { + // If no health check handler has been installed by this point, add a trivial one. + const healthPath = "/_ah/health" + hreq := &http.Request{ + Method: "GET", + URL: &url.URL{ + Path: healthPath, + }, + } + if _, pat := mux.Handler(hreq); pat != healthPath { + mux.HandleFunc(healthPath, func(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, "ok") + }) + } +} diff --git a/vendor/google.golang.org/appengine/internal/metadata.go b/vendor/google.golang.org/appengine/internal/metadata.go new file mode 100644 index 000000000..c4ba63bb4 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/metadata.go @@ -0,0 +1,60 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package internal + +// This file has code for accessing metadata. +// +// References: +// https://cloud.google.com/compute/docs/metadata + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/url" +) + +const ( + metadataHost = "metadata" + metadataPath = "/computeMetadata/v1/" +) + +var ( + metadataRequestHeaders = http.Header{ + "Metadata-Flavor": []string{"Google"}, + } +) + +// TODO(dsymonds): Do we need to support default values, like Python? +func mustGetMetadata(key string) []byte { + b, err := getMetadata(key) + if err != nil { + panic(fmt.Sprintf("Metadata fetch failed for '%s': %v", key, err)) + } + return b +} + +func getMetadata(key string) ([]byte, error) { + // TODO(dsymonds): May need to use url.Parse to support keys with query args. + req := &http.Request{ + Method: "GET", + URL: &url.URL{ + Scheme: "http", + Host: metadataHost, + Path: metadataPath + key, + }, + Header: metadataRequestHeaders, + Host: metadataHost, + } + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + if resp.StatusCode != 200 { + return nil, fmt.Errorf("metadata server returned HTTP %d", resp.StatusCode) + } + return ioutil.ReadAll(resp.Body) +} diff --git a/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go b/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go new file mode 100644 index 000000000..ddfc0c04a --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go @@ -0,0 +1,786 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google.golang.org/appengine/internal/modules/modules_service.proto + +package modules + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type ModulesServiceError_ErrorCode int32 + +const ( + ModulesServiceError_OK ModulesServiceError_ErrorCode = 0 + ModulesServiceError_INVALID_MODULE ModulesServiceError_ErrorCode = 1 + ModulesServiceError_INVALID_VERSION ModulesServiceError_ErrorCode = 2 + ModulesServiceError_INVALID_INSTANCES ModulesServiceError_ErrorCode = 3 + ModulesServiceError_TRANSIENT_ERROR ModulesServiceError_ErrorCode = 4 + ModulesServiceError_UNEXPECTED_STATE ModulesServiceError_ErrorCode = 5 +) + +var ModulesServiceError_ErrorCode_name = map[int32]string{ + 0: "OK", + 1: "INVALID_MODULE", + 2: "INVALID_VERSION", + 3: "INVALID_INSTANCES", + 4: "TRANSIENT_ERROR", + 5: "UNEXPECTED_STATE", +} +var ModulesServiceError_ErrorCode_value = map[string]int32{ + "OK": 0, + "INVALID_MODULE": 1, + "INVALID_VERSION": 2, + "INVALID_INSTANCES": 3, + "TRANSIENT_ERROR": 4, + "UNEXPECTED_STATE": 5, +} + +func (x ModulesServiceError_ErrorCode) Enum() *ModulesServiceError_ErrorCode { + p := new(ModulesServiceError_ErrorCode) + *p = x + return p +} +func (x ModulesServiceError_ErrorCode) String() string { + return proto.EnumName(ModulesServiceError_ErrorCode_name, int32(x)) +} +func (x *ModulesServiceError_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(ModulesServiceError_ErrorCode_value, data, "ModulesServiceError_ErrorCode") + if err != nil { + return err + } + *x = ModulesServiceError_ErrorCode(value) + return nil +} +func (ModulesServiceError_ErrorCode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{0, 0} +} + +type ModulesServiceError struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ModulesServiceError) Reset() { *m = ModulesServiceError{} } +func (m *ModulesServiceError) String() string { return proto.CompactTextString(m) } +func (*ModulesServiceError) ProtoMessage() {} +func (*ModulesServiceError) Descriptor() ([]byte, []int) { + return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{0} +} +func (m *ModulesServiceError) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ModulesServiceError.Unmarshal(m, b) +} +func (m *ModulesServiceError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ModulesServiceError.Marshal(b, m, deterministic) +} +func (dst *ModulesServiceError) XXX_Merge(src proto.Message) { + xxx_messageInfo_ModulesServiceError.Merge(dst, src) +} +func (m *ModulesServiceError) XXX_Size() int { + return xxx_messageInfo_ModulesServiceError.Size(m) +} +func (m *ModulesServiceError) XXX_DiscardUnknown() { + xxx_messageInfo_ModulesServiceError.DiscardUnknown(m) +} + +var xxx_messageInfo_ModulesServiceError proto.InternalMessageInfo + +type GetModulesRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetModulesRequest) Reset() { *m = GetModulesRequest{} } +func (m *GetModulesRequest) String() string { return proto.CompactTextString(m) } +func (*GetModulesRequest) ProtoMessage() {} +func (*GetModulesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{1} +} +func (m *GetModulesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetModulesRequest.Unmarshal(m, b) +} +func (m *GetModulesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetModulesRequest.Marshal(b, m, deterministic) +} +func (dst *GetModulesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetModulesRequest.Merge(dst, src) +} +func (m *GetModulesRequest) XXX_Size() int { + return xxx_messageInfo_GetModulesRequest.Size(m) +} +func (m *GetModulesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetModulesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetModulesRequest proto.InternalMessageInfo + +type GetModulesResponse struct { + Module []string `protobuf:"bytes,1,rep,name=module" json:"module,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetModulesResponse) Reset() { *m = GetModulesResponse{} } +func (m *GetModulesResponse) String() string { return proto.CompactTextString(m) } +func (*GetModulesResponse) ProtoMessage() {} +func (*GetModulesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{2} +} +func (m *GetModulesResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetModulesResponse.Unmarshal(m, b) +} +func (m *GetModulesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetModulesResponse.Marshal(b, m, deterministic) +} +func (dst *GetModulesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetModulesResponse.Merge(dst, src) +} +func (m *GetModulesResponse) XXX_Size() int { + return xxx_messageInfo_GetModulesResponse.Size(m) +} +func (m *GetModulesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetModulesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetModulesResponse proto.InternalMessageInfo + +func (m *GetModulesResponse) GetModule() []string { + if m != nil { + return m.Module + } + return nil +} + +type GetVersionsRequest struct { + Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetVersionsRequest) Reset() { *m = GetVersionsRequest{} } +func (m *GetVersionsRequest) String() string { return proto.CompactTextString(m) } +func (*GetVersionsRequest) ProtoMessage() {} +func (*GetVersionsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{3} +} +func (m *GetVersionsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetVersionsRequest.Unmarshal(m, b) +} +func (m *GetVersionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetVersionsRequest.Marshal(b, m, deterministic) +} +func (dst *GetVersionsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetVersionsRequest.Merge(dst, src) +} +func (m *GetVersionsRequest) XXX_Size() int { + return xxx_messageInfo_GetVersionsRequest.Size(m) +} +func (m *GetVersionsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetVersionsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetVersionsRequest proto.InternalMessageInfo + +func (m *GetVersionsRequest) GetModule() string { + if m != nil && m.Module != nil { + return *m.Module + } + return "" +} + +type GetVersionsResponse struct { + Version []string `protobuf:"bytes,1,rep,name=version" json:"version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetVersionsResponse) Reset() { *m = GetVersionsResponse{} } +func (m *GetVersionsResponse) String() string { return proto.CompactTextString(m) } +func (*GetVersionsResponse) ProtoMessage() {} +func (*GetVersionsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{4} +} +func (m *GetVersionsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetVersionsResponse.Unmarshal(m, b) +} +func (m *GetVersionsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetVersionsResponse.Marshal(b, m, deterministic) +} +func (dst *GetVersionsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetVersionsResponse.Merge(dst, src) +} +func (m *GetVersionsResponse) XXX_Size() int { + return xxx_messageInfo_GetVersionsResponse.Size(m) +} +func (m *GetVersionsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetVersionsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetVersionsResponse proto.InternalMessageInfo + +func (m *GetVersionsResponse) GetVersion() []string { + if m != nil { + return m.Version + } + return nil +} + +type GetDefaultVersionRequest struct { + Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetDefaultVersionRequest) Reset() { *m = GetDefaultVersionRequest{} } +func (m *GetDefaultVersionRequest) String() string { return proto.CompactTextString(m) } +func (*GetDefaultVersionRequest) ProtoMessage() {} +func (*GetDefaultVersionRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{5} +} +func (m *GetDefaultVersionRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetDefaultVersionRequest.Unmarshal(m, b) +} +func (m *GetDefaultVersionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetDefaultVersionRequest.Marshal(b, m, deterministic) +} +func (dst *GetDefaultVersionRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetDefaultVersionRequest.Merge(dst, src) +} +func (m *GetDefaultVersionRequest) XXX_Size() int { + return xxx_messageInfo_GetDefaultVersionRequest.Size(m) +} +func (m *GetDefaultVersionRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetDefaultVersionRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetDefaultVersionRequest proto.InternalMessageInfo + +func (m *GetDefaultVersionRequest) GetModule() string { + if m != nil && m.Module != nil { + return *m.Module + } + return "" +} + +type GetDefaultVersionResponse struct { + Version *string `protobuf:"bytes,1,req,name=version" json:"version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetDefaultVersionResponse) Reset() { *m = GetDefaultVersionResponse{} } +func (m *GetDefaultVersionResponse) String() string { return proto.CompactTextString(m) } +func (*GetDefaultVersionResponse) ProtoMessage() {} +func (*GetDefaultVersionResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{6} +} +func (m *GetDefaultVersionResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetDefaultVersionResponse.Unmarshal(m, b) +} +func (m *GetDefaultVersionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetDefaultVersionResponse.Marshal(b, m, deterministic) +} +func (dst *GetDefaultVersionResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetDefaultVersionResponse.Merge(dst, src) +} +func (m *GetDefaultVersionResponse) XXX_Size() int { + return xxx_messageInfo_GetDefaultVersionResponse.Size(m) +} +func (m *GetDefaultVersionResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetDefaultVersionResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetDefaultVersionResponse proto.InternalMessageInfo + +func (m *GetDefaultVersionResponse) GetVersion() string { + if m != nil && m.Version != nil { + return *m.Version + } + return "" +} + +type GetNumInstancesRequest struct { + Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` + Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetNumInstancesRequest) Reset() { *m = GetNumInstancesRequest{} } +func (m *GetNumInstancesRequest) String() string { return proto.CompactTextString(m) } +func (*GetNumInstancesRequest) ProtoMessage() {} +func (*GetNumInstancesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{7} +} +func (m *GetNumInstancesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetNumInstancesRequest.Unmarshal(m, b) +} +func (m *GetNumInstancesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetNumInstancesRequest.Marshal(b, m, deterministic) +} +func (dst *GetNumInstancesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetNumInstancesRequest.Merge(dst, src) +} +func (m *GetNumInstancesRequest) XXX_Size() int { + return xxx_messageInfo_GetNumInstancesRequest.Size(m) +} +func (m *GetNumInstancesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetNumInstancesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetNumInstancesRequest proto.InternalMessageInfo + +func (m *GetNumInstancesRequest) GetModule() string { + if m != nil && m.Module != nil { + return *m.Module + } + return "" +} + +func (m *GetNumInstancesRequest) GetVersion() string { + if m != nil && m.Version != nil { + return *m.Version + } + return "" +} + +type GetNumInstancesResponse struct { + Instances *int64 `protobuf:"varint,1,req,name=instances" json:"instances,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetNumInstancesResponse) Reset() { *m = GetNumInstancesResponse{} } +func (m *GetNumInstancesResponse) String() string { return proto.CompactTextString(m) } +func (*GetNumInstancesResponse) ProtoMessage() {} +func (*GetNumInstancesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{8} +} +func (m *GetNumInstancesResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetNumInstancesResponse.Unmarshal(m, b) +} +func (m *GetNumInstancesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetNumInstancesResponse.Marshal(b, m, deterministic) +} +func (dst *GetNumInstancesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetNumInstancesResponse.Merge(dst, src) +} +func (m *GetNumInstancesResponse) XXX_Size() int { + return xxx_messageInfo_GetNumInstancesResponse.Size(m) +} +func (m *GetNumInstancesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetNumInstancesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetNumInstancesResponse proto.InternalMessageInfo + +func (m *GetNumInstancesResponse) GetInstances() int64 { + if m != nil && m.Instances != nil { + return *m.Instances + } + return 0 +} + +type SetNumInstancesRequest struct { + Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` + Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` + Instances *int64 `protobuf:"varint,3,req,name=instances" json:"instances,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SetNumInstancesRequest) Reset() { *m = SetNumInstancesRequest{} } +func (m *SetNumInstancesRequest) String() string { return proto.CompactTextString(m) } +func (*SetNumInstancesRequest) ProtoMessage() {} +func (*SetNumInstancesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{9} +} +func (m *SetNumInstancesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SetNumInstancesRequest.Unmarshal(m, b) +} +func (m *SetNumInstancesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SetNumInstancesRequest.Marshal(b, m, deterministic) +} +func (dst *SetNumInstancesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetNumInstancesRequest.Merge(dst, src) +} +func (m *SetNumInstancesRequest) XXX_Size() int { + return xxx_messageInfo_SetNumInstancesRequest.Size(m) +} +func (m *SetNumInstancesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SetNumInstancesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SetNumInstancesRequest proto.InternalMessageInfo + +func (m *SetNumInstancesRequest) GetModule() string { + if m != nil && m.Module != nil { + return *m.Module + } + return "" +} + +func (m *SetNumInstancesRequest) GetVersion() string { + if m != nil && m.Version != nil { + return *m.Version + } + return "" +} + +func (m *SetNumInstancesRequest) GetInstances() int64 { + if m != nil && m.Instances != nil { + return *m.Instances + } + return 0 +} + +type SetNumInstancesResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SetNumInstancesResponse) Reset() { *m = SetNumInstancesResponse{} } +func (m *SetNumInstancesResponse) String() string { return proto.CompactTextString(m) } +func (*SetNumInstancesResponse) ProtoMessage() {} +func (*SetNumInstancesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{10} +} +func (m *SetNumInstancesResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SetNumInstancesResponse.Unmarshal(m, b) +} +func (m *SetNumInstancesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SetNumInstancesResponse.Marshal(b, m, deterministic) +} +func (dst *SetNumInstancesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetNumInstancesResponse.Merge(dst, src) +} +func (m *SetNumInstancesResponse) XXX_Size() int { + return xxx_messageInfo_SetNumInstancesResponse.Size(m) +} +func (m *SetNumInstancesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SetNumInstancesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SetNumInstancesResponse proto.InternalMessageInfo + +type StartModuleRequest struct { + Module *string `protobuf:"bytes,1,req,name=module" json:"module,omitempty"` + Version *string `protobuf:"bytes,2,req,name=version" json:"version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StartModuleRequest) Reset() { *m = StartModuleRequest{} } +func (m *StartModuleRequest) String() string { return proto.CompactTextString(m) } +func (*StartModuleRequest) ProtoMessage() {} +func (*StartModuleRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{11} +} +func (m *StartModuleRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StartModuleRequest.Unmarshal(m, b) +} +func (m *StartModuleRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StartModuleRequest.Marshal(b, m, deterministic) +} +func (dst *StartModuleRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_StartModuleRequest.Merge(dst, src) +} +func (m *StartModuleRequest) XXX_Size() int { + return xxx_messageInfo_StartModuleRequest.Size(m) +} +func (m *StartModuleRequest) XXX_DiscardUnknown() { + xxx_messageInfo_StartModuleRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_StartModuleRequest proto.InternalMessageInfo + +func (m *StartModuleRequest) GetModule() string { + if m != nil && m.Module != nil { + return *m.Module + } + return "" +} + +func (m *StartModuleRequest) GetVersion() string { + if m != nil && m.Version != nil { + return *m.Version + } + return "" +} + +type StartModuleResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StartModuleResponse) Reset() { *m = StartModuleResponse{} } +func (m *StartModuleResponse) String() string { return proto.CompactTextString(m) } +func (*StartModuleResponse) ProtoMessage() {} +func (*StartModuleResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{12} +} +func (m *StartModuleResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StartModuleResponse.Unmarshal(m, b) +} +func (m *StartModuleResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StartModuleResponse.Marshal(b, m, deterministic) +} +func (dst *StartModuleResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_StartModuleResponse.Merge(dst, src) +} +func (m *StartModuleResponse) XXX_Size() int { + return xxx_messageInfo_StartModuleResponse.Size(m) +} +func (m *StartModuleResponse) XXX_DiscardUnknown() { + xxx_messageInfo_StartModuleResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_StartModuleResponse proto.InternalMessageInfo + +type StopModuleRequest struct { + Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` + Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StopModuleRequest) Reset() { *m = StopModuleRequest{} } +func (m *StopModuleRequest) String() string { return proto.CompactTextString(m) } +func (*StopModuleRequest) ProtoMessage() {} +func (*StopModuleRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{13} +} +func (m *StopModuleRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StopModuleRequest.Unmarshal(m, b) +} +func (m *StopModuleRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StopModuleRequest.Marshal(b, m, deterministic) +} +func (dst *StopModuleRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_StopModuleRequest.Merge(dst, src) +} +func (m *StopModuleRequest) XXX_Size() int { + return xxx_messageInfo_StopModuleRequest.Size(m) +} +func (m *StopModuleRequest) XXX_DiscardUnknown() { + xxx_messageInfo_StopModuleRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_StopModuleRequest proto.InternalMessageInfo + +func (m *StopModuleRequest) GetModule() string { + if m != nil && m.Module != nil { + return *m.Module + } + return "" +} + +func (m *StopModuleRequest) GetVersion() string { + if m != nil && m.Version != nil { + return *m.Version + } + return "" +} + +type StopModuleResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StopModuleResponse) Reset() { *m = StopModuleResponse{} } +func (m *StopModuleResponse) String() string { return proto.CompactTextString(m) } +func (*StopModuleResponse) ProtoMessage() {} +func (*StopModuleResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{14} +} +func (m *StopModuleResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StopModuleResponse.Unmarshal(m, b) +} +func (m *StopModuleResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StopModuleResponse.Marshal(b, m, deterministic) +} +func (dst *StopModuleResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_StopModuleResponse.Merge(dst, src) +} +func (m *StopModuleResponse) XXX_Size() int { + return xxx_messageInfo_StopModuleResponse.Size(m) +} +func (m *StopModuleResponse) XXX_DiscardUnknown() { + xxx_messageInfo_StopModuleResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_StopModuleResponse proto.InternalMessageInfo + +type GetHostnameRequest struct { + Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` + Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` + Instance *string `protobuf:"bytes,3,opt,name=instance" json:"instance,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetHostnameRequest) Reset() { *m = GetHostnameRequest{} } +func (m *GetHostnameRequest) String() string { return proto.CompactTextString(m) } +func (*GetHostnameRequest) ProtoMessage() {} +func (*GetHostnameRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{15} +} +func (m *GetHostnameRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetHostnameRequest.Unmarshal(m, b) +} +func (m *GetHostnameRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetHostnameRequest.Marshal(b, m, deterministic) +} +func (dst *GetHostnameRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetHostnameRequest.Merge(dst, src) +} +func (m *GetHostnameRequest) XXX_Size() int { + return xxx_messageInfo_GetHostnameRequest.Size(m) +} +func (m *GetHostnameRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetHostnameRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetHostnameRequest proto.InternalMessageInfo + +func (m *GetHostnameRequest) GetModule() string { + if m != nil && m.Module != nil { + return *m.Module + } + return "" +} + +func (m *GetHostnameRequest) GetVersion() string { + if m != nil && m.Version != nil { + return *m.Version + } + return "" +} + +func (m *GetHostnameRequest) GetInstance() string { + if m != nil && m.Instance != nil { + return *m.Instance + } + return "" +} + +type GetHostnameResponse struct { + Hostname *string `protobuf:"bytes,1,req,name=hostname" json:"hostname,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetHostnameResponse) Reset() { *m = GetHostnameResponse{} } +func (m *GetHostnameResponse) String() string { return proto.CompactTextString(m) } +func (*GetHostnameResponse) ProtoMessage() {} +func (*GetHostnameResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{16} +} +func (m *GetHostnameResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetHostnameResponse.Unmarshal(m, b) +} +func (m *GetHostnameResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetHostnameResponse.Marshal(b, m, deterministic) +} +func (dst *GetHostnameResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetHostnameResponse.Merge(dst, src) +} +func (m *GetHostnameResponse) XXX_Size() int { + return xxx_messageInfo_GetHostnameResponse.Size(m) +} +func (m *GetHostnameResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetHostnameResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetHostnameResponse proto.InternalMessageInfo + +func (m *GetHostnameResponse) GetHostname() string { + if m != nil && m.Hostname != nil { + return *m.Hostname + } + return "" +} + +func init() { + proto.RegisterType((*ModulesServiceError)(nil), "appengine.ModulesServiceError") + proto.RegisterType((*GetModulesRequest)(nil), "appengine.GetModulesRequest") + proto.RegisterType((*GetModulesResponse)(nil), "appengine.GetModulesResponse") + proto.RegisterType((*GetVersionsRequest)(nil), "appengine.GetVersionsRequest") + proto.RegisterType((*GetVersionsResponse)(nil), "appengine.GetVersionsResponse") + proto.RegisterType((*GetDefaultVersionRequest)(nil), "appengine.GetDefaultVersionRequest") + proto.RegisterType((*GetDefaultVersionResponse)(nil), "appengine.GetDefaultVersionResponse") + proto.RegisterType((*GetNumInstancesRequest)(nil), "appengine.GetNumInstancesRequest") + proto.RegisterType((*GetNumInstancesResponse)(nil), "appengine.GetNumInstancesResponse") + proto.RegisterType((*SetNumInstancesRequest)(nil), "appengine.SetNumInstancesRequest") + proto.RegisterType((*SetNumInstancesResponse)(nil), "appengine.SetNumInstancesResponse") + proto.RegisterType((*StartModuleRequest)(nil), "appengine.StartModuleRequest") + proto.RegisterType((*StartModuleResponse)(nil), "appengine.StartModuleResponse") + proto.RegisterType((*StopModuleRequest)(nil), "appengine.StopModuleRequest") + proto.RegisterType((*StopModuleResponse)(nil), "appengine.StopModuleResponse") + proto.RegisterType((*GetHostnameRequest)(nil), "appengine.GetHostnameRequest") + proto.RegisterType((*GetHostnameResponse)(nil), "appengine.GetHostnameResponse") +} + +func init() { + proto.RegisterFile("google.golang.org/appengine/internal/modules/modules_service.proto", fileDescriptor_modules_service_9cd3bffe4e91c59a) +} + +var fileDescriptor_modules_service_9cd3bffe4e91c59a = []byte{ + // 457 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x94, 0xc1, 0x6f, 0xd3, 0x30, + 0x14, 0xc6, 0x69, 0x02, 0xdb, 0xf2, 0x0e, 0x90, 0x3a, 0x5b, 0xd7, 0x4d, 0x1c, 0x50, 0x4e, 0x1c, + 0x50, 0x2b, 0x90, 0x10, 0xe7, 0xae, 0x35, 0x25, 0xb0, 0xa5, 0x28, 0xce, 0x2a, 0xc4, 0xa5, 0x0a, + 0xdb, 0x23, 0x8b, 0x94, 0xda, 0xc1, 0x76, 0x77, 0xe4, 0xbf, 0xe0, 0xff, 0x45, 0x4b, 0xed, 0xb6, + 0x81, 0x4e, 0x45, 0x68, 0xa7, 0xe4, 0x7d, 0xfe, 0xfc, 0x7b, 0x9f, 0x5f, 0xac, 0xc0, 0x59, 0x2e, + 0x44, 0x5e, 0x62, 0x2f, 0x17, 0x65, 0xc6, 0xf3, 0x9e, 0x90, 0x79, 0x3f, 0xab, 0x2a, 0xe4, 0x79, + 0xc1, 0xb1, 0x5f, 0x70, 0x8d, 0x92, 0x67, 0x65, 0x7f, 0x2e, 0xae, 0x17, 0x25, 0x2a, 0xfb, 0x9c, + 0x29, 0x94, 0xb7, 0xc5, 0x15, 0xf6, 0x2a, 0x29, 0xb4, 0x20, 0xde, 0x6a, 0x47, 0xf8, 0xab, 0x05, + 0xc1, 0xc5, 0xd2, 0xc4, 0x96, 0x1e, 0x2a, 0xa5, 0x90, 0xe1, 0x4f, 0xf0, 0xea, 0x97, 0xa1, 0xb8, + 0x46, 0xb2, 0x07, 0xce, 0xe4, 0x93, 0xff, 0x88, 0x10, 0x78, 0x1a, 0xc5, 0xd3, 0xc1, 0x79, 0x34, + 0x9a, 0x5d, 0x4c, 0x46, 0x97, 0xe7, 0xd4, 0x6f, 0x91, 0x00, 0x9e, 0x59, 0x6d, 0x4a, 0x13, 0x16, + 0x4d, 0x62, 0xdf, 0x21, 0x47, 0xd0, 0xb6, 0x62, 0x14, 0xb3, 0x74, 0x10, 0x0f, 0x29, 0xf3, 0xdd, + 0x3b, 0x6f, 0x9a, 0x0c, 0x62, 0x16, 0xd1, 0x38, 0x9d, 0xd1, 0x24, 0x99, 0x24, 0xfe, 0x63, 0x72, + 0x08, 0xfe, 0x65, 0x4c, 0xbf, 0x7c, 0xa6, 0xc3, 0x94, 0x8e, 0x66, 0x2c, 0x1d, 0xa4, 0xd4, 0x7f, + 0x12, 0x06, 0xd0, 0x1e, 0xa3, 0x36, 0xc9, 0x12, 0xfc, 0xb1, 0x40, 0xa5, 0xc3, 0x57, 0x40, 0x36, + 0x45, 0x55, 0x09, 0xae, 0x90, 0x74, 0x60, 0x6f, 0x79, 0xcc, 0x6e, 0xeb, 0x85, 0xfb, 0xd2, 0x4b, + 0x4c, 0x65, 0xdc, 0x53, 0x94, 0xaa, 0x10, 0xdc, 0x32, 0x1a, 0xee, 0xd6, 0x86, 0xbb, 0x0f, 0x41, + 0xc3, 0x6d, 0xe0, 0x5d, 0xd8, 0xbf, 0x5d, 0x6a, 0x86, 0x6e, 0xcb, 0xf0, 0x0d, 0x74, 0xc7, 0xa8, + 0x47, 0xf8, 0x3d, 0x5b, 0x94, 0x76, 0xdf, 0xae, 0x26, 0x6f, 0xe1, 0x64, 0xcb, 0x9e, 0x6d, 0xad, + 0x9c, 0xcd, 0x56, 0x1f, 0xa1, 0x33, 0x46, 0x1d, 0x2f, 0xe6, 0x11, 0x57, 0x3a, 0xe3, 0x57, 0xb8, + 0xeb, 0x34, 0x9b, 0x2c, 0xa7, 0x5e, 0x58, 0xb1, 0xde, 0xc1, 0xf1, 0x5f, 0x2c, 0x13, 0xe0, 0x39, + 0x78, 0x85, 0x15, 0xeb, 0x08, 0x6e, 0xb2, 0x16, 0xc2, 0x1b, 0xe8, 0xb0, 0x07, 0x0a, 0xd1, 0xec, + 0xe4, 0xfe, 0xd9, 0xe9, 0x04, 0x8e, 0xd9, 0xf6, 0x88, 0xe1, 0x7b, 0x20, 0x4c, 0x67, 0xd2, 0xdc, + 0x81, 0x6d, 0x01, 0x9c, 0xfb, 0x02, 0x34, 0x26, 0x7a, 0x04, 0x41, 0x83, 0x63, 0xf0, 0x14, 0xda, + 0x4c, 0x8b, 0xea, 0x7e, 0xfa, 0xbf, 0xcd, 0xf8, 0xf0, 0x2e, 0xe5, 0x1a, 0x63, 0xe0, 0xdf, 0xea, + 0xfb, 0xf8, 0x41, 0x28, 0xcd, 0xb3, 0xf9, 0xff, 0xd3, 0xc9, 0x29, 0x1c, 0xd8, 0x59, 0x75, 0xdd, + 0x7a, 0x69, 0x55, 0x87, 0xaf, 0xeb, 0x5b, 0xbc, 0xee, 0x61, 0xbe, 0xec, 0x29, 0x1c, 0xdc, 0x18, + 0xcd, 0x8c, 0x68, 0x55, 0x9f, 0x79, 0x5f, 0xf7, 0xcd, 0x5f, 0xe2, 0x77, 0x00, 0x00, 0x00, 0xff, + 0xff, 0x6e, 0xbc, 0xe0, 0x61, 0x5c, 0x04, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/appengine/internal/net.go b/vendor/google.golang.org/appengine/internal/net.go new file mode 100644 index 000000000..3b94cf0c6 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/net.go @@ -0,0 +1,56 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package internal + +// This file implements a network dialer that limits the number of concurrent connections. +// It is only used for API calls. + +import ( + "log" + "net" + "runtime" + "sync" + "time" +) + +var limitSem = make(chan int, 100) // TODO(dsymonds): Use environment variable. + +func limitRelease() { + // non-blocking + select { + case <-limitSem: + default: + // This should not normally happen. + log.Print("appengine: unbalanced limitSem release!") + } +} + +func limitDial(network, addr string) (net.Conn, error) { + limitSem <- 1 + + // Dial with a timeout in case the API host is MIA. + // The connection should normally be very fast. + conn, err := net.DialTimeout(network, addr, 500*time.Millisecond) + if err != nil { + limitRelease() + return nil, err + } + lc := &limitConn{Conn: conn} + runtime.SetFinalizer(lc, (*limitConn).Close) // shouldn't usually be required + return lc, nil +} + +type limitConn struct { + close sync.Once + net.Conn +} + +func (lc *limitConn) Close() error { + defer lc.close.Do(func() { + limitRelease() + runtime.SetFinalizer(lc, nil) + }) + return lc.Conn.Close() +} diff --git a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go new file mode 100644 index 000000000..8d782a38e --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go @@ -0,0 +1,361 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google.golang.org/appengine/internal/remote_api/remote_api.proto + +package remote_api + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type RpcError_ErrorCode int32 + +const ( + RpcError_UNKNOWN RpcError_ErrorCode = 0 + RpcError_CALL_NOT_FOUND RpcError_ErrorCode = 1 + RpcError_PARSE_ERROR RpcError_ErrorCode = 2 + RpcError_SECURITY_VIOLATION RpcError_ErrorCode = 3 + RpcError_OVER_QUOTA RpcError_ErrorCode = 4 + RpcError_REQUEST_TOO_LARGE RpcError_ErrorCode = 5 + RpcError_CAPABILITY_DISABLED RpcError_ErrorCode = 6 + RpcError_FEATURE_DISABLED RpcError_ErrorCode = 7 + RpcError_BAD_REQUEST RpcError_ErrorCode = 8 + RpcError_RESPONSE_TOO_LARGE RpcError_ErrorCode = 9 + RpcError_CANCELLED RpcError_ErrorCode = 10 + RpcError_REPLAY_ERROR RpcError_ErrorCode = 11 + RpcError_DEADLINE_EXCEEDED RpcError_ErrorCode = 12 +) + +var RpcError_ErrorCode_name = map[int32]string{ + 0: "UNKNOWN", + 1: "CALL_NOT_FOUND", + 2: "PARSE_ERROR", + 3: "SECURITY_VIOLATION", + 4: "OVER_QUOTA", + 5: "REQUEST_TOO_LARGE", + 6: "CAPABILITY_DISABLED", + 7: "FEATURE_DISABLED", + 8: "BAD_REQUEST", + 9: "RESPONSE_TOO_LARGE", + 10: "CANCELLED", + 11: "REPLAY_ERROR", + 12: "DEADLINE_EXCEEDED", +} +var RpcError_ErrorCode_value = map[string]int32{ + "UNKNOWN": 0, + "CALL_NOT_FOUND": 1, + "PARSE_ERROR": 2, + "SECURITY_VIOLATION": 3, + "OVER_QUOTA": 4, + "REQUEST_TOO_LARGE": 5, + "CAPABILITY_DISABLED": 6, + "FEATURE_DISABLED": 7, + "BAD_REQUEST": 8, + "RESPONSE_TOO_LARGE": 9, + "CANCELLED": 10, + "REPLAY_ERROR": 11, + "DEADLINE_EXCEEDED": 12, +} + +func (x RpcError_ErrorCode) Enum() *RpcError_ErrorCode { + p := new(RpcError_ErrorCode) + *p = x + return p +} +func (x RpcError_ErrorCode) String() string { + return proto.EnumName(RpcError_ErrorCode_name, int32(x)) +} +func (x *RpcError_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(RpcError_ErrorCode_value, data, "RpcError_ErrorCode") + if err != nil { + return err + } + *x = RpcError_ErrorCode(value) + return nil +} +func (RpcError_ErrorCode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_remote_api_1978114ec33a273d, []int{2, 0} +} + +type Request struct { + ServiceName *string `protobuf:"bytes,2,req,name=service_name,json=serviceName" json:"service_name,omitempty"` + Method *string `protobuf:"bytes,3,req,name=method" json:"method,omitempty"` + Request []byte `protobuf:"bytes,4,req,name=request" json:"request,omitempty"` + RequestId *string `protobuf:"bytes,5,opt,name=request_id,json=requestId" json:"request_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Request) Reset() { *m = Request{} } +func (m *Request) String() string { return proto.CompactTextString(m) } +func (*Request) ProtoMessage() {} +func (*Request) Descriptor() ([]byte, []int) { + return fileDescriptor_remote_api_1978114ec33a273d, []int{0} +} +func (m *Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Request.Unmarshal(m, b) +} +func (m *Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Request.Marshal(b, m, deterministic) +} +func (dst *Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_Request.Merge(dst, src) +} +func (m *Request) XXX_Size() int { + return xxx_messageInfo_Request.Size(m) +} +func (m *Request) XXX_DiscardUnknown() { + xxx_messageInfo_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_Request proto.InternalMessageInfo + +func (m *Request) GetServiceName() string { + if m != nil && m.ServiceName != nil { + return *m.ServiceName + } + return "" +} + +func (m *Request) GetMethod() string { + if m != nil && m.Method != nil { + return *m.Method + } + return "" +} + +func (m *Request) GetRequest() []byte { + if m != nil { + return m.Request + } + return nil +} + +func (m *Request) GetRequestId() string { + if m != nil && m.RequestId != nil { + return *m.RequestId + } + return "" +} + +type ApplicationError struct { + Code *int32 `protobuf:"varint,1,req,name=code" json:"code,omitempty"` + Detail *string `protobuf:"bytes,2,req,name=detail" json:"detail,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ApplicationError) Reset() { *m = ApplicationError{} } +func (m *ApplicationError) String() string { return proto.CompactTextString(m) } +func (*ApplicationError) ProtoMessage() {} +func (*ApplicationError) Descriptor() ([]byte, []int) { + return fileDescriptor_remote_api_1978114ec33a273d, []int{1} +} +func (m *ApplicationError) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ApplicationError.Unmarshal(m, b) +} +func (m *ApplicationError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ApplicationError.Marshal(b, m, deterministic) +} +func (dst *ApplicationError) XXX_Merge(src proto.Message) { + xxx_messageInfo_ApplicationError.Merge(dst, src) +} +func (m *ApplicationError) XXX_Size() int { + return xxx_messageInfo_ApplicationError.Size(m) +} +func (m *ApplicationError) XXX_DiscardUnknown() { + xxx_messageInfo_ApplicationError.DiscardUnknown(m) +} + +var xxx_messageInfo_ApplicationError proto.InternalMessageInfo + +func (m *ApplicationError) GetCode() int32 { + if m != nil && m.Code != nil { + return *m.Code + } + return 0 +} + +func (m *ApplicationError) GetDetail() string { + if m != nil && m.Detail != nil { + return *m.Detail + } + return "" +} + +type RpcError struct { + Code *int32 `protobuf:"varint,1,req,name=code" json:"code,omitempty"` + Detail *string `protobuf:"bytes,2,opt,name=detail" json:"detail,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RpcError) Reset() { *m = RpcError{} } +func (m *RpcError) String() string { return proto.CompactTextString(m) } +func (*RpcError) ProtoMessage() {} +func (*RpcError) Descriptor() ([]byte, []int) { + return fileDescriptor_remote_api_1978114ec33a273d, []int{2} +} +func (m *RpcError) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RpcError.Unmarshal(m, b) +} +func (m *RpcError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RpcError.Marshal(b, m, deterministic) +} +func (dst *RpcError) XXX_Merge(src proto.Message) { + xxx_messageInfo_RpcError.Merge(dst, src) +} +func (m *RpcError) XXX_Size() int { + return xxx_messageInfo_RpcError.Size(m) +} +func (m *RpcError) XXX_DiscardUnknown() { + xxx_messageInfo_RpcError.DiscardUnknown(m) +} + +var xxx_messageInfo_RpcError proto.InternalMessageInfo + +func (m *RpcError) GetCode() int32 { + if m != nil && m.Code != nil { + return *m.Code + } + return 0 +} + +func (m *RpcError) GetDetail() string { + if m != nil && m.Detail != nil { + return *m.Detail + } + return "" +} + +type Response struct { + Response []byte `protobuf:"bytes,1,opt,name=response" json:"response,omitempty"` + Exception []byte `protobuf:"bytes,2,opt,name=exception" json:"exception,omitempty"` + ApplicationError *ApplicationError `protobuf:"bytes,3,opt,name=application_error,json=applicationError" json:"application_error,omitempty"` + JavaException []byte `protobuf:"bytes,4,opt,name=java_exception,json=javaException" json:"java_exception,omitempty"` + RpcError *RpcError `protobuf:"bytes,5,opt,name=rpc_error,json=rpcError" json:"rpc_error,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Response) Reset() { *m = Response{} } +func (m *Response) String() string { return proto.CompactTextString(m) } +func (*Response) ProtoMessage() {} +func (*Response) Descriptor() ([]byte, []int) { + return fileDescriptor_remote_api_1978114ec33a273d, []int{3} +} +func (m *Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Response.Unmarshal(m, b) +} +func (m *Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Response.Marshal(b, m, deterministic) +} +func (dst *Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_Response.Merge(dst, src) +} +func (m *Response) XXX_Size() int { + return xxx_messageInfo_Response.Size(m) +} +func (m *Response) XXX_DiscardUnknown() { + xxx_messageInfo_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_Response proto.InternalMessageInfo + +func (m *Response) GetResponse() []byte { + if m != nil { + return m.Response + } + return nil +} + +func (m *Response) GetException() []byte { + if m != nil { + return m.Exception + } + return nil +} + +func (m *Response) GetApplicationError() *ApplicationError { + if m != nil { + return m.ApplicationError + } + return nil +} + +func (m *Response) GetJavaException() []byte { + if m != nil { + return m.JavaException + } + return nil +} + +func (m *Response) GetRpcError() *RpcError { + if m != nil { + return m.RpcError + } + return nil +} + +func init() { + proto.RegisterType((*Request)(nil), "remote_api.Request") + proto.RegisterType((*ApplicationError)(nil), "remote_api.ApplicationError") + proto.RegisterType((*RpcError)(nil), "remote_api.RpcError") + proto.RegisterType((*Response)(nil), "remote_api.Response") +} + +func init() { + proto.RegisterFile("google.golang.org/appengine/internal/remote_api/remote_api.proto", fileDescriptor_remote_api_1978114ec33a273d) +} + +var fileDescriptor_remote_api_1978114ec33a273d = []byte{ + // 531 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0x51, 0x6e, 0xd3, 0x40, + 0x10, 0x86, 0xb1, 0x9b, 0x34, 0xf1, 0xc4, 0x2d, 0xdb, 0xa5, 0x14, 0x0b, 0x15, 0x29, 0x44, 0x42, + 0xca, 0x53, 0x2a, 0x38, 0x00, 0x62, 0x63, 0x6f, 0x91, 0x85, 0x65, 0xa7, 0x6b, 0xbb, 0x50, 0x5e, + 0x56, 0x2b, 0x67, 0x65, 0x8c, 0x12, 0xaf, 0xd9, 0x98, 0x8a, 0x17, 0x6e, 0xc0, 0xb5, 0x38, 0x0c, + 0xb7, 0x40, 0x36, 0x6e, 0x63, 0xf5, 0x89, 0xb7, 0x7f, 0x7e, 0x7b, 0xe6, 0x1b, 0xcd, 0xcc, 0xc2, + 0xbb, 0x5c, 0xa9, 0x7c, 0x23, 0x17, 0xb9, 0xda, 0x88, 0x32, 0x5f, 0x28, 0x9d, 0x5f, 0x88, 0xaa, + 0x92, 0x65, 0x5e, 0x94, 0xf2, 0xa2, 0x28, 0x6b, 0xa9, 0x4b, 0xb1, 0xb9, 0xd0, 0x72, 0xab, 0x6a, + 0xc9, 0x45, 0x55, 0xf4, 0xe4, 0xa2, 0xd2, 0xaa, 0x56, 0x18, 0xf6, 0xce, 0xec, 0x27, 0x8c, 0x98, + 0xfc, 0xf6, 0x5d, 0xee, 0x6a, 0xfc, 0x12, 0xec, 0x9d, 0xd4, 0xb7, 0x45, 0x26, 0x79, 0x29, 0xb6, + 0xd2, 0x31, 0xa7, 0xe6, 0xdc, 0x62, 0x93, 0xce, 0x0b, 0xc5, 0x56, 0xe2, 0x33, 0x38, 0xdc, 0xca, + 0xfa, 0x8b, 0x5a, 0x3b, 0x07, 0xed, 0xc7, 0x2e, 0xc2, 0x0e, 0x8c, 0xf4, 0xbf, 0x2a, 0xce, 0x60, + 0x6a, 0xce, 0x6d, 0x76, 0x17, 0xe2, 0x17, 0x00, 0x9d, 0xe4, 0xc5, 0xda, 0x19, 0x4e, 0x8d, 0xb9, + 0xc5, 0xac, 0xce, 0xf1, 0xd7, 0xb3, 0xb7, 0x80, 0x48, 0x55, 0x6d, 0x8a, 0x4c, 0xd4, 0x85, 0x2a, + 0xa9, 0xd6, 0x4a, 0x63, 0x0c, 0x83, 0x4c, 0xad, 0xa5, 0x63, 0x4c, 0xcd, 0xf9, 0x90, 0xb5, 0xba, + 0x01, 0xaf, 0x65, 0x2d, 0x8a, 0x4d, 0xd7, 0x55, 0x17, 0xcd, 0x7e, 0x9b, 0x30, 0x66, 0x55, 0xf6, + 0x7f, 0x89, 0x46, 0x2f, 0xf1, 0x97, 0x09, 0x56, 0x9b, 0xe5, 0x36, 0x7f, 0x4d, 0x60, 0x94, 0x86, + 0x1f, 0xc2, 0xe8, 0x63, 0x88, 0x1e, 0x61, 0x0c, 0xc7, 0x2e, 0x09, 0x02, 0x1e, 0x46, 0x09, 0xbf, + 0x8c, 0xd2, 0xd0, 0x43, 0x06, 0x7e, 0x0c, 0x93, 0x15, 0x61, 0x31, 0xe5, 0x94, 0xb1, 0x88, 0x21, + 0x13, 0x9f, 0x01, 0x8e, 0xa9, 0x9b, 0x32, 0x3f, 0xb9, 0xe1, 0xd7, 0x7e, 0x14, 0x90, 0xc4, 0x8f, + 0x42, 0x74, 0x80, 0x8f, 0x01, 0xa2, 0x6b, 0xca, 0xf8, 0x55, 0x1a, 0x25, 0x04, 0x0d, 0xf0, 0x53, + 0x38, 0x61, 0xf4, 0x2a, 0xa5, 0x71, 0xc2, 0x93, 0x28, 0xe2, 0x01, 0x61, 0xef, 0x29, 0x1a, 0xe2, + 0x67, 0xf0, 0xc4, 0x25, 0x2b, 0xb2, 0xf4, 0x83, 0xa6, 0x80, 0xe7, 0xc7, 0x64, 0x19, 0x50, 0x0f, + 0x1d, 0xe2, 0x53, 0x40, 0x97, 0x94, 0x24, 0x29, 0xa3, 0x7b, 0x77, 0xd4, 0xe0, 0x97, 0xc4, 0xe3, + 0x5d, 0x25, 0x34, 0x6e, 0xf0, 0x8c, 0xc6, 0xab, 0x28, 0x8c, 0x69, 0xaf, 0xae, 0x85, 0x8f, 0xc0, + 0x72, 0x49, 0xe8, 0xd2, 0xa0, 0xc9, 0x03, 0x8c, 0xc0, 0x66, 0x74, 0x15, 0x90, 0x9b, 0xae, 0xef, + 0x49, 0xd3, 0x8f, 0x47, 0x89, 0x17, 0xf8, 0x21, 0xe5, 0xf4, 0x93, 0x4b, 0xa9, 0x47, 0x3d, 0x64, + 0xcf, 0xfe, 0x18, 0x30, 0x66, 0x72, 0x57, 0xa9, 0x72, 0x27, 0xf1, 0x73, 0x18, 0xeb, 0x4e, 0x3b, + 0xc6, 0xd4, 0x98, 0xdb, 0xec, 0x3e, 0xc6, 0xe7, 0x60, 0xc9, 0x1f, 0x99, 0xac, 0x9a, 0x75, 0xb5, + 0x23, 0xb5, 0xd9, 0xde, 0xc0, 0x3e, 0x9c, 0x88, 0xfd, 0x3a, 0xb9, 0x6c, 0x06, 0xec, 0x1c, 0x4c, + 0x8d, 0xf9, 0xe4, 0xcd, 0xf9, 0xa2, 0x77, 0x87, 0x0f, 0x77, 0xce, 0x90, 0x78, 0x78, 0x05, 0xaf, + 0xe0, 0xf8, 0xab, 0xb8, 0x15, 0x7c, 0x4f, 0x1b, 0xb4, 0xb4, 0xa3, 0xc6, 0xa5, 0xf7, 0xc4, 0xd7, + 0x60, 0xe9, 0x2a, 0xeb, 0x48, 0xc3, 0x96, 0x74, 0xda, 0x27, 0xdd, 0x1d, 0x07, 0x1b, 0xeb, 0x4e, + 0x2d, 0xed, 0xcf, 0xbd, 0x07, 0xf0, 0x37, 0x00, 0x00, 0xff, 0xff, 0x38, 0xd1, 0x0f, 0x22, 0x4f, + 0x03, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/appengine/internal/socket/socket_service.pb.go b/vendor/google.golang.org/appengine/internal/socket/socket_service.pb.go new file mode 100644 index 000000000..4ec872e46 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/socket/socket_service.pb.go @@ -0,0 +1,2822 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google.golang.org/appengine/internal/socket/socket_service.proto + +package socket + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type RemoteSocketServiceError_ErrorCode int32 + +const ( + RemoteSocketServiceError_SYSTEM_ERROR RemoteSocketServiceError_ErrorCode = 1 + RemoteSocketServiceError_GAI_ERROR RemoteSocketServiceError_ErrorCode = 2 + RemoteSocketServiceError_FAILURE RemoteSocketServiceError_ErrorCode = 4 + RemoteSocketServiceError_PERMISSION_DENIED RemoteSocketServiceError_ErrorCode = 5 + RemoteSocketServiceError_INVALID_REQUEST RemoteSocketServiceError_ErrorCode = 6 + RemoteSocketServiceError_SOCKET_CLOSED RemoteSocketServiceError_ErrorCode = 7 +) + +var RemoteSocketServiceError_ErrorCode_name = map[int32]string{ + 1: "SYSTEM_ERROR", + 2: "GAI_ERROR", + 4: "FAILURE", + 5: "PERMISSION_DENIED", + 6: "INVALID_REQUEST", + 7: "SOCKET_CLOSED", +} +var RemoteSocketServiceError_ErrorCode_value = map[string]int32{ + "SYSTEM_ERROR": 1, + "GAI_ERROR": 2, + "FAILURE": 4, + "PERMISSION_DENIED": 5, + "INVALID_REQUEST": 6, + "SOCKET_CLOSED": 7, +} + +func (x RemoteSocketServiceError_ErrorCode) Enum() *RemoteSocketServiceError_ErrorCode { + p := new(RemoteSocketServiceError_ErrorCode) + *p = x + return p +} +func (x RemoteSocketServiceError_ErrorCode) String() string { + return proto.EnumName(RemoteSocketServiceError_ErrorCode_name, int32(x)) +} +func (x *RemoteSocketServiceError_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(RemoteSocketServiceError_ErrorCode_value, data, "RemoteSocketServiceError_ErrorCode") + if err != nil { + return err + } + *x = RemoteSocketServiceError_ErrorCode(value) + return nil +} +func (RemoteSocketServiceError_ErrorCode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{0, 0} +} + +type RemoteSocketServiceError_SystemError int32 + +const ( + RemoteSocketServiceError_SYS_SUCCESS RemoteSocketServiceError_SystemError = 0 + RemoteSocketServiceError_SYS_EPERM RemoteSocketServiceError_SystemError = 1 + RemoteSocketServiceError_SYS_ENOENT RemoteSocketServiceError_SystemError = 2 + RemoteSocketServiceError_SYS_ESRCH RemoteSocketServiceError_SystemError = 3 + RemoteSocketServiceError_SYS_EINTR RemoteSocketServiceError_SystemError = 4 + RemoteSocketServiceError_SYS_EIO RemoteSocketServiceError_SystemError = 5 + RemoteSocketServiceError_SYS_ENXIO RemoteSocketServiceError_SystemError = 6 + RemoteSocketServiceError_SYS_E2BIG RemoteSocketServiceError_SystemError = 7 + RemoteSocketServiceError_SYS_ENOEXEC RemoteSocketServiceError_SystemError = 8 + RemoteSocketServiceError_SYS_EBADF RemoteSocketServiceError_SystemError = 9 + RemoteSocketServiceError_SYS_ECHILD RemoteSocketServiceError_SystemError = 10 + RemoteSocketServiceError_SYS_EAGAIN RemoteSocketServiceError_SystemError = 11 + RemoteSocketServiceError_SYS_EWOULDBLOCK RemoteSocketServiceError_SystemError = 11 + RemoteSocketServiceError_SYS_ENOMEM RemoteSocketServiceError_SystemError = 12 + RemoteSocketServiceError_SYS_EACCES RemoteSocketServiceError_SystemError = 13 + RemoteSocketServiceError_SYS_EFAULT RemoteSocketServiceError_SystemError = 14 + RemoteSocketServiceError_SYS_ENOTBLK RemoteSocketServiceError_SystemError = 15 + RemoteSocketServiceError_SYS_EBUSY RemoteSocketServiceError_SystemError = 16 + RemoteSocketServiceError_SYS_EEXIST RemoteSocketServiceError_SystemError = 17 + RemoteSocketServiceError_SYS_EXDEV RemoteSocketServiceError_SystemError = 18 + RemoteSocketServiceError_SYS_ENODEV RemoteSocketServiceError_SystemError = 19 + RemoteSocketServiceError_SYS_ENOTDIR RemoteSocketServiceError_SystemError = 20 + RemoteSocketServiceError_SYS_EISDIR RemoteSocketServiceError_SystemError = 21 + RemoteSocketServiceError_SYS_EINVAL RemoteSocketServiceError_SystemError = 22 + RemoteSocketServiceError_SYS_ENFILE RemoteSocketServiceError_SystemError = 23 + RemoteSocketServiceError_SYS_EMFILE RemoteSocketServiceError_SystemError = 24 + RemoteSocketServiceError_SYS_ENOTTY RemoteSocketServiceError_SystemError = 25 + RemoteSocketServiceError_SYS_ETXTBSY RemoteSocketServiceError_SystemError = 26 + RemoteSocketServiceError_SYS_EFBIG RemoteSocketServiceError_SystemError = 27 + RemoteSocketServiceError_SYS_ENOSPC RemoteSocketServiceError_SystemError = 28 + RemoteSocketServiceError_SYS_ESPIPE RemoteSocketServiceError_SystemError = 29 + RemoteSocketServiceError_SYS_EROFS RemoteSocketServiceError_SystemError = 30 + RemoteSocketServiceError_SYS_EMLINK RemoteSocketServiceError_SystemError = 31 + RemoteSocketServiceError_SYS_EPIPE RemoteSocketServiceError_SystemError = 32 + RemoteSocketServiceError_SYS_EDOM RemoteSocketServiceError_SystemError = 33 + RemoteSocketServiceError_SYS_ERANGE RemoteSocketServiceError_SystemError = 34 + RemoteSocketServiceError_SYS_EDEADLK RemoteSocketServiceError_SystemError = 35 + RemoteSocketServiceError_SYS_EDEADLOCK RemoteSocketServiceError_SystemError = 35 + RemoteSocketServiceError_SYS_ENAMETOOLONG RemoteSocketServiceError_SystemError = 36 + RemoteSocketServiceError_SYS_ENOLCK RemoteSocketServiceError_SystemError = 37 + RemoteSocketServiceError_SYS_ENOSYS RemoteSocketServiceError_SystemError = 38 + RemoteSocketServiceError_SYS_ENOTEMPTY RemoteSocketServiceError_SystemError = 39 + RemoteSocketServiceError_SYS_ELOOP RemoteSocketServiceError_SystemError = 40 + RemoteSocketServiceError_SYS_ENOMSG RemoteSocketServiceError_SystemError = 42 + RemoteSocketServiceError_SYS_EIDRM RemoteSocketServiceError_SystemError = 43 + RemoteSocketServiceError_SYS_ECHRNG RemoteSocketServiceError_SystemError = 44 + RemoteSocketServiceError_SYS_EL2NSYNC RemoteSocketServiceError_SystemError = 45 + RemoteSocketServiceError_SYS_EL3HLT RemoteSocketServiceError_SystemError = 46 + RemoteSocketServiceError_SYS_EL3RST RemoteSocketServiceError_SystemError = 47 + RemoteSocketServiceError_SYS_ELNRNG RemoteSocketServiceError_SystemError = 48 + RemoteSocketServiceError_SYS_EUNATCH RemoteSocketServiceError_SystemError = 49 + RemoteSocketServiceError_SYS_ENOCSI RemoteSocketServiceError_SystemError = 50 + RemoteSocketServiceError_SYS_EL2HLT RemoteSocketServiceError_SystemError = 51 + RemoteSocketServiceError_SYS_EBADE RemoteSocketServiceError_SystemError = 52 + RemoteSocketServiceError_SYS_EBADR RemoteSocketServiceError_SystemError = 53 + RemoteSocketServiceError_SYS_EXFULL RemoteSocketServiceError_SystemError = 54 + RemoteSocketServiceError_SYS_ENOANO RemoteSocketServiceError_SystemError = 55 + RemoteSocketServiceError_SYS_EBADRQC RemoteSocketServiceError_SystemError = 56 + RemoteSocketServiceError_SYS_EBADSLT RemoteSocketServiceError_SystemError = 57 + RemoteSocketServiceError_SYS_EBFONT RemoteSocketServiceError_SystemError = 59 + RemoteSocketServiceError_SYS_ENOSTR RemoteSocketServiceError_SystemError = 60 + RemoteSocketServiceError_SYS_ENODATA RemoteSocketServiceError_SystemError = 61 + RemoteSocketServiceError_SYS_ETIME RemoteSocketServiceError_SystemError = 62 + RemoteSocketServiceError_SYS_ENOSR RemoteSocketServiceError_SystemError = 63 + RemoteSocketServiceError_SYS_ENONET RemoteSocketServiceError_SystemError = 64 + RemoteSocketServiceError_SYS_ENOPKG RemoteSocketServiceError_SystemError = 65 + RemoteSocketServiceError_SYS_EREMOTE RemoteSocketServiceError_SystemError = 66 + RemoteSocketServiceError_SYS_ENOLINK RemoteSocketServiceError_SystemError = 67 + RemoteSocketServiceError_SYS_EADV RemoteSocketServiceError_SystemError = 68 + RemoteSocketServiceError_SYS_ESRMNT RemoteSocketServiceError_SystemError = 69 + RemoteSocketServiceError_SYS_ECOMM RemoteSocketServiceError_SystemError = 70 + RemoteSocketServiceError_SYS_EPROTO RemoteSocketServiceError_SystemError = 71 + RemoteSocketServiceError_SYS_EMULTIHOP RemoteSocketServiceError_SystemError = 72 + RemoteSocketServiceError_SYS_EDOTDOT RemoteSocketServiceError_SystemError = 73 + RemoteSocketServiceError_SYS_EBADMSG RemoteSocketServiceError_SystemError = 74 + RemoteSocketServiceError_SYS_EOVERFLOW RemoteSocketServiceError_SystemError = 75 + RemoteSocketServiceError_SYS_ENOTUNIQ RemoteSocketServiceError_SystemError = 76 + RemoteSocketServiceError_SYS_EBADFD RemoteSocketServiceError_SystemError = 77 + RemoteSocketServiceError_SYS_EREMCHG RemoteSocketServiceError_SystemError = 78 + RemoteSocketServiceError_SYS_ELIBACC RemoteSocketServiceError_SystemError = 79 + RemoteSocketServiceError_SYS_ELIBBAD RemoteSocketServiceError_SystemError = 80 + RemoteSocketServiceError_SYS_ELIBSCN RemoteSocketServiceError_SystemError = 81 + RemoteSocketServiceError_SYS_ELIBMAX RemoteSocketServiceError_SystemError = 82 + RemoteSocketServiceError_SYS_ELIBEXEC RemoteSocketServiceError_SystemError = 83 + RemoteSocketServiceError_SYS_EILSEQ RemoteSocketServiceError_SystemError = 84 + RemoteSocketServiceError_SYS_ERESTART RemoteSocketServiceError_SystemError = 85 + RemoteSocketServiceError_SYS_ESTRPIPE RemoteSocketServiceError_SystemError = 86 + RemoteSocketServiceError_SYS_EUSERS RemoteSocketServiceError_SystemError = 87 + RemoteSocketServiceError_SYS_ENOTSOCK RemoteSocketServiceError_SystemError = 88 + RemoteSocketServiceError_SYS_EDESTADDRREQ RemoteSocketServiceError_SystemError = 89 + RemoteSocketServiceError_SYS_EMSGSIZE RemoteSocketServiceError_SystemError = 90 + RemoteSocketServiceError_SYS_EPROTOTYPE RemoteSocketServiceError_SystemError = 91 + RemoteSocketServiceError_SYS_ENOPROTOOPT RemoteSocketServiceError_SystemError = 92 + RemoteSocketServiceError_SYS_EPROTONOSUPPORT RemoteSocketServiceError_SystemError = 93 + RemoteSocketServiceError_SYS_ESOCKTNOSUPPORT RemoteSocketServiceError_SystemError = 94 + RemoteSocketServiceError_SYS_EOPNOTSUPP RemoteSocketServiceError_SystemError = 95 + RemoteSocketServiceError_SYS_ENOTSUP RemoteSocketServiceError_SystemError = 95 + RemoteSocketServiceError_SYS_EPFNOSUPPORT RemoteSocketServiceError_SystemError = 96 + RemoteSocketServiceError_SYS_EAFNOSUPPORT RemoteSocketServiceError_SystemError = 97 + RemoteSocketServiceError_SYS_EADDRINUSE RemoteSocketServiceError_SystemError = 98 + RemoteSocketServiceError_SYS_EADDRNOTAVAIL RemoteSocketServiceError_SystemError = 99 + RemoteSocketServiceError_SYS_ENETDOWN RemoteSocketServiceError_SystemError = 100 + RemoteSocketServiceError_SYS_ENETUNREACH RemoteSocketServiceError_SystemError = 101 + RemoteSocketServiceError_SYS_ENETRESET RemoteSocketServiceError_SystemError = 102 + RemoteSocketServiceError_SYS_ECONNABORTED RemoteSocketServiceError_SystemError = 103 + RemoteSocketServiceError_SYS_ECONNRESET RemoteSocketServiceError_SystemError = 104 + RemoteSocketServiceError_SYS_ENOBUFS RemoteSocketServiceError_SystemError = 105 + RemoteSocketServiceError_SYS_EISCONN RemoteSocketServiceError_SystemError = 106 + RemoteSocketServiceError_SYS_ENOTCONN RemoteSocketServiceError_SystemError = 107 + RemoteSocketServiceError_SYS_ESHUTDOWN RemoteSocketServiceError_SystemError = 108 + RemoteSocketServiceError_SYS_ETOOMANYREFS RemoteSocketServiceError_SystemError = 109 + RemoteSocketServiceError_SYS_ETIMEDOUT RemoteSocketServiceError_SystemError = 110 + RemoteSocketServiceError_SYS_ECONNREFUSED RemoteSocketServiceError_SystemError = 111 + RemoteSocketServiceError_SYS_EHOSTDOWN RemoteSocketServiceError_SystemError = 112 + RemoteSocketServiceError_SYS_EHOSTUNREACH RemoteSocketServiceError_SystemError = 113 + RemoteSocketServiceError_SYS_EALREADY RemoteSocketServiceError_SystemError = 114 + RemoteSocketServiceError_SYS_EINPROGRESS RemoteSocketServiceError_SystemError = 115 + RemoteSocketServiceError_SYS_ESTALE RemoteSocketServiceError_SystemError = 116 + RemoteSocketServiceError_SYS_EUCLEAN RemoteSocketServiceError_SystemError = 117 + RemoteSocketServiceError_SYS_ENOTNAM RemoteSocketServiceError_SystemError = 118 + RemoteSocketServiceError_SYS_ENAVAIL RemoteSocketServiceError_SystemError = 119 + RemoteSocketServiceError_SYS_EISNAM RemoteSocketServiceError_SystemError = 120 + RemoteSocketServiceError_SYS_EREMOTEIO RemoteSocketServiceError_SystemError = 121 + RemoteSocketServiceError_SYS_EDQUOT RemoteSocketServiceError_SystemError = 122 + RemoteSocketServiceError_SYS_ENOMEDIUM RemoteSocketServiceError_SystemError = 123 + RemoteSocketServiceError_SYS_EMEDIUMTYPE RemoteSocketServiceError_SystemError = 124 + RemoteSocketServiceError_SYS_ECANCELED RemoteSocketServiceError_SystemError = 125 + RemoteSocketServiceError_SYS_ENOKEY RemoteSocketServiceError_SystemError = 126 + RemoteSocketServiceError_SYS_EKEYEXPIRED RemoteSocketServiceError_SystemError = 127 + RemoteSocketServiceError_SYS_EKEYREVOKED RemoteSocketServiceError_SystemError = 128 + RemoteSocketServiceError_SYS_EKEYREJECTED RemoteSocketServiceError_SystemError = 129 + RemoteSocketServiceError_SYS_EOWNERDEAD RemoteSocketServiceError_SystemError = 130 + RemoteSocketServiceError_SYS_ENOTRECOVERABLE RemoteSocketServiceError_SystemError = 131 + RemoteSocketServiceError_SYS_ERFKILL RemoteSocketServiceError_SystemError = 132 +) + +var RemoteSocketServiceError_SystemError_name = map[int32]string{ + 0: "SYS_SUCCESS", + 1: "SYS_EPERM", + 2: "SYS_ENOENT", + 3: "SYS_ESRCH", + 4: "SYS_EINTR", + 5: "SYS_EIO", + 6: "SYS_ENXIO", + 7: "SYS_E2BIG", + 8: "SYS_ENOEXEC", + 9: "SYS_EBADF", + 10: "SYS_ECHILD", + 11: "SYS_EAGAIN", + // Duplicate value: 11: "SYS_EWOULDBLOCK", + 12: "SYS_ENOMEM", + 13: "SYS_EACCES", + 14: "SYS_EFAULT", + 15: "SYS_ENOTBLK", + 16: "SYS_EBUSY", + 17: "SYS_EEXIST", + 18: "SYS_EXDEV", + 19: "SYS_ENODEV", + 20: "SYS_ENOTDIR", + 21: "SYS_EISDIR", + 22: "SYS_EINVAL", + 23: "SYS_ENFILE", + 24: "SYS_EMFILE", + 25: "SYS_ENOTTY", + 26: "SYS_ETXTBSY", + 27: "SYS_EFBIG", + 28: "SYS_ENOSPC", + 29: "SYS_ESPIPE", + 30: "SYS_EROFS", + 31: "SYS_EMLINK", + 32: "SYS_EPIPE", + 33: "SYS_EDOM", + 34: "SYS_ERANGE", + 35: "SYS_EDEADLK", + // Duplicate value: 35: "SYS_EDEADLOCK", + 36: "SYS_ENAMETOOLONG", + 37: "SYS_ENOLCK", + 38: "SYS_ENOSYS", + 39: "SYS_ENOTEMPTY", + 40: "SYS_ELOOP", + 42: "SYS_ENOMSG", + 43: "SYS_EIDRM", + 44: "SYS_ECHRNG", + 45: "SYS_EL2NSYNC", + 46: "SYS_EL3HLT", + 47: "SYS_EL3RST", + 48: "SYS_ELNRNG", + 49: "SYS_EUNATCH", + 50: "SYS_ENOCSI", + 51: "SYS_EL2HLT", + 52: "SYS_EBADE", + 53: "SYS_EBADR", + 54: "SYS_EXFULL", + 55: "SYS_ENOANO", + 56: "SYS_EBADRQC", + 57: "SYS_EBADSLT", + 59: "SYS_EBFONT", + 60: "SYS_ENOSTR", + 61: "SYS_ENODATA", + 62: "SYS_ETIME", + 63: "SYS_ENOSR", + 64: "SYS_ENONET", + 65: "SYS_ENOPKG", + 66: "SYS_EREMOTE", + 67: "SYS_ENOLINK", + 68: "SYS_EADV", + 69: "SYS_ESRMNT", + 70: "SYS_ECOMM", + 71: "SYS_EPROTO", + 72: "SYS_EMULTIHOP", + 73: "SYS_EDOTDOT", + 74: "SYS_EBADMSG", + 75: "SYS_EOVERFLOW", + 76: "SYS_ENOTUNIQ", + 77: "SYS_EBADFD", + 78: "SYS_EREMCHG", + 79: "SYS_ELIBACC", + 80: "SYS_ELIBBAD", + 81: "SYS_ELIBSCN", + 82: "SYS_ELIBMAX", + 83: "SYS_ELIBEXEC", + 84: "SYS_EILSEQ", + 85: "SYS_ERESTART", + 86: "SYS_ESTRPIPE", + 87: "SYS_EUSERS", + 88: "SYS_ENOTSOCK", + 89: "SYS_EDESTADDRREQ", + 90: "SYS_EMSGSIZE", + 91: "SYS_EPROTOTYPE", + 92: "SYS_ENOPROTOOPT", + 93: "SYS_EPROTONOSUPPORT", + 94: "SYS_ESOCKTNOSUPPORT", + 95: "SYS_EOPNOTSUPP", + // Duplicate value: 95: "SYS_ENOTSUP", + 96: "SYS_EPFNOSUPPORT", + 97: "SYS_EAFNOSUPPORT", + 98: "SYS_EADDRINUSE", + 99: "SYS_EADDRNOTAVAIL", + 100: "SYS_ENETDOWN", + 101: "SYS_ENETUNREACH", + 102: "SYS_ENETRESET", + 103: "SYS_ECONNABORTED", + 104: "SYS_ECONNRESET", + 105: "SYS_ENOBUFS", + 106: "SYS_EISCONN", + 107: "SYS_ENOTCONN", + 108: "SYS_ESHUTDOWN", + 109: "SYS_ETOOMANYREFS", + 110: "SYS_ETIMEDOUT", + 111: "SYS_ECONNREFUSED", + 112: "SYS_EHOSTDOWN", + 113: "SYS_EHOSTUNREACH", + 114: "SYS_EALREADY", + 115: "SYS_EINPROGRESS", + 116: "SYS_ESTALE", + 117: "SYS_EUCLEAN", + 118: "SYS_ENOTNAM", + 119: "SYS_ENAVAIL", + 120: "SYS_EISNAM", + 121: "SYS_EREMOTEIO", + 122: "SYS_EDQUOT", + 123: "SYS_ENOMEDIUM", + 124: "SYS_EMEDIUMTYPE", + 125: "SYS_ECANCELED", + 126: "SYS_ENOKEY", + 127: "SYS_EKEYEXPIRED", + 128: "SYS_EKEYREVOKED", + 129: "SYS_EKEYREJECTED", + 130: "SYS_EOWNERDEAD", + 131: "SYS_ENOTRECOVERABLE", + 132: "SYS_ERFKILL", +} +var RemoteSocketServiceError_SystemError_value = map[string]int32{ + "SYS_SUCCESS": 0, + "SYS_EPERM": 1, + "SYS_ENOENT": 2, + "SYS_ESRCH": 3, + "SYS_EINTR": 4, + "SYS_EIO": 5, + "SYS_ENXIO": 6, + "SYS_E2BIG": 7, + "SYS_ENOEXEC": 8, + "SYS_EBADF": 9, + "SYS_ECHILD": 10, + "SYS_EAGAIN": 11, + "SYS_EWOULDBLOCK": 11, + "SYS_ENOMEM": 12, + "SYS_EACCES": 13, + "SYS_EFAULT": 14, + "SYS_ENOTBLK": 15, + "SYS_EBUSY": 16, + "SYS_EEXIST": 17, + "SYS_EXDEV": 18, + "SYS_ENODEV": 19, + "SYS_ENOTDIR": 20, + "SYS_EISDIR": 21, + "SYS_EINVAL": 22, + "SYS_ENFILE": 23, + "SYS_EMFILE": 24, + "SYS_ENOTTY": 25, + "SYS_ETXTBSY": 26, + "SYS_EFBIG": 27, + "SYS_ENOSPC": 28, + "SYS_ESPIPE": 29, + "SYS_EROFS": 30, + "SYS_EMLINK": 31, + "SYS_EPIPE": 32, + "SYS_EDOM": 33, + "SYS_ERANGE": 34, + "SYS_EDEADLK": 35, + "SYS_EDEADLOCK": 35, + "SYS_ENAMETOOLONG": 36, + "SYS_ENOLCK": 37, + "SYS_ENOSYS": 38, + "SYS_ENOTEMPTY": 39, + "SYS_ELOOP": 40, + "SYS_ENOMSG": 42, + "SYS_EIDRM": 43, + "SYS_ECHRNG": 44, + "SYS_EL2NSYNC": 45, + "SYS_EL3HLT": 46, + "SYS_EL3RST": 47, + "SYS_ELNRNG": 48, + "SYS_EUNATCH": 49, + "SYS_ENOCSI": 50, + "SYS_EL2HLT": 51, + "SYS_EBADE": 52, + "SYS_EBADR": 53, + "SYS_EXFULL": 54, + "SYS_ENOANO": 55, + "SYS_EBADRQC": 56, + "SYS_EBADSLT": 57, + "SYS_EBFONT": 59, + "SYS_ENOSTR": 60, + "SYS_ENODATA": 61, + "SYS_ETIME": 62, + "SYS_ENOSR": 63, + "SYS_ENONET": 64, + "SYS_ENOPKG": 65, + "SYS_EREMOTE": 66, + "SYS_ENOLINK": 67, + "SYS_EADV": 68, + "SYS_ESRMNT": 69, + "SYS_ECOMM": 70, + "SYS_EPROTO": 71, + "SYS_EMULTIHOP": 72, + "SYS_EDOTDOT": 73, + "SYS_EBADMSG": 74, + "SYS_EOVERFLOW": 75, + "SYS_ENOTUNIQ": 76, + "SYS_EBADFD": 77, + "SYS_EREMCHG": 78, + "SYS_ELIBACC": 79, + "SYS_ELIBBAD": 80, + "SYS_ELIBSCN": 81, + "SYS_ELIBMAX": 82, + "SYS_ELIBEXEC": 83, + "SYS_EILSEQ": 84, + "SYS_ERESTART": 85, + "SYS_ESTRPIPE": 86, + "SYS_EUSERS": 87, + "SYS_ENOTSOCK": 88, + "SYS_EDESTADDRREQ": 89, + "SYS_EMSGSIZE": 90, + "SYS_EPROTOTYPE": 91, + "SYS_ENOPROTOOPT": 92, + "SYS_EPROTONOSUPPORT": 93, + "SYS_ESOCKTNOSUPPORT": 94, + "SYS_EOPNOTSUPP": 95, + "SYS_ENOTSUP": 95, + "SYS_EPFNOSUPPORT": 96, + "SYS_EAFNOSUPPORT": 97, + "SYS_EADDRINUSE": 98, + "SYS_EADDRNOTAVAIL": 99, + "SYS_ENETDOWN": 100, + "SYS_ENETUNREACH": 101, + "SYS_ENETRESET": 102, + "SYS_ECONNABORTED": 103, + "SYS_ECONNRESET": 104, + "SYS_ENOBUFS": 105, + "SYS_EISCONN": 106, + "SYS_ENOTCONN": 107, + "SYS_ESHUTDOWN": 108, + "SYS_ETOOMANYREFS": 109, + "SYS_ETIMEDOUT": 110, + "SYS_ECONNREFUSED": 111, + "SYS_EHOSTDOWN": 112, + "SYS_EHOSTUNREACH": 113, + "SYS_EALREADY": 114, + "SYS_EINPROGRESS": 115, + "SYS_ESTALE": 116, + "SYS_EUCLEAN": 117, + "SYS_ENOTNAM": 118, + "SYS_ENAVAIL": 119, + "SYS_EISNAM": 120, + "SYS_EREMOTEIO": 121, + "SYS_EDQUOT": 122, + "SYS_ENOMEDIUM": 123, + "SYS_EMEDIUMTYPE": 124, + "SYS_ECANCELED": 125, + "SYS_ENOKEY": 126, + "SYS_EKEYEXPIRED": 127, + "SYS_EKEYREVOKED": 128, + "SYS_EKEYREJECTED": 129, + "SYS_EOWNERDEAD": 130, + "SYS_ENOTRECOVERABLE": 131, + "SYS_ERFKILL": 132, +} + +func (x RemoteSocketServiceError_SystemError) Enum() *RemoteSocketServiceError_SystemError { + p := new(RemoteSocketServiceError_SystemError) + *p = x + return p +} +func (x RemoteSocketServiceError_SystemError) String() string { + return proto.EnumName(RemoteSocketServiceError_SystemError_name, int32(x)) +} +func (x *RemoteSocketServiceError_SystemError) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(RemoteSocketServiceError_SystemError_value, data, "RemoteSocketServiceError_SystemError") + if err != nil { + return err + } + *x = RemoteSocketServiceError_SystemError(value) + return nil +} +func (RemoteSocketServiceError_SystemError) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{0, 1} +} + +type CreateSocketRequest_SocketFamily int32 + +const ( + CreateSocketRequest_IPv4 CreateSocketRequest_SocketFamily = 1 + CreateSocketRequest_IPv6 CreateSocketRequest_SocketFamily = 2 +) + +var CreateSocketRequest_SocketFamily_name = map[int32]string{ + 1: "IPv4", + 2: "IPv6", +} +var CreateSocketRequest_SocketFamily_value = map[string]int32{ + "IPv4": 1, + "IPv6": 2, +} + +func (x CreateSocketRequest_SocketFamily) Enum() *CreateSocketRequest_SocketFamily { + p := new(CreateSocketRequest_SocketFamily) + *p = x + return p +} +func (x CreateSocketRequest_SocketFamily) String() string { + return proto.EnumName(CreateSocketRequest_SocketFamily_name, int32(x)) +} +func (x *CreateSocketRequest_SocketFamily) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(CreateSocketRequest_SocketFamily_value, data, "CreateSocketRequest_SocketFamily") + if err != nil { + return err + } + *x = CreateSocketRequest_SocketFamily(value) + return nil +} +func (CreateSocketRequest_SocketFamily) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{2, 0} +} + +type CreateSocketRequest_SocketProtocol int32 + +const ( + CreateSocketRequest_TCP CreateSocketRequest_SocketProtocol = 1 + CreateSocketRequest_UDP CreateSocketRequest_SocketProtocol = 2 +) + +var CreateSocketRequest_SocketProtocol_name = map[int32]string{ + 1: "TCP", + 2: "UDP", +} +var CreateSocketRequest_SocketProtocol_value = map[string]int32{ + "TCP": 1, + "UDP": 2, +} + +func (x CreateSocketRequest_SocketProtocol) Enum() *CreateSocketRequest_SocketProtocol { + p := new(CreateSocketRequest_SocketProtocol) + *p = x + return p +} +func (x CreateSocketRequest_SocketProtocol) String() string { + return proto.EnumName(CreateSocketRequest_SocketProtocol_name, int32(x)) +} +func (x *CreateSocketRequest_SocketProtocol) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(CreateSocketRequest_SocketProtocol_value, data, "CreateSocketRequest_SocketProtocol") + if err != nil { + return err + } + *x = CreateSocketRequest_SocketProtocol(value) + return nil +} +func (CreateSocketRequest_SocketProtocol) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{2, 1} +} + +type SocketOption_SocketOptionLevel int32 + +const ( + SocketOption_SOCKET_SOL_IP SocketOption_SocketOptionLevel = 0 + SocketOption_SOCKET_SOL_SOCKET SocketOption_SocketOptionLevel = 1 + SocketOption_SOCKET_SOL_TCP SocketOption_SocketOptionLevel = 6 + SocketOption_SOCKET_SOL_UDP SocketOption_SocketOptionLevel = 17 +) + +var SocketOption_SocketOptionLevel_name = map[int32]string{ + 0: "SOCKET_SOL_IP", + 1: "SOCKET_SOL_SOCKET", + 6: "SOCKET_SOL_TCP", + 17: "SOCKET_SOL_UDP", +} +var SocketOption_SocketOptionLevel_value = map[string]int32{ + "SOCKET_SOL_IP": 0, + "SOCKET_SOL_SOCKET": 1, + "SOCKET_SOL_TCP": 6, + "SOCKET_SOL_UDP": 17, +} + +func (x SocketOption_SocketOptionLevel) Enum() *SocketOption_SocketOptionLevel { + p := new(SocketOption_SocketOptionLevel) + *p = x + return p +} +func (x SocketOption_SocketOptionLevel) String() string { + return proto.EnumName(SocketOption_SocketOptionLevel_name, int32(x)) +} +func (x *SocketOption_SocketOptionLevel) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(SocketOption_SocketOptionLevel_value, data, "SocketOption_SocketOptionLevel") + if err != nil { + return err + } + *x = SocketOption_SocketOptionLevel(value) + return nil +} +func (SocketOption_SocketOptionLevel) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{10, 0} +} + +type SocketOption_SocketOptionName int32 + +const ( + SocketOption_SOCKET_SO_DEBUG SocketOption_SocketOptionName = 1 + SocketOption_SOCKET_SO_REUSEADDR SocketOption_SocketOptionName = 2 + SocketOption_SOCKET_SO_TYPE SocketOption_SocketOptionName = 3 + SocketOption_SOCKET_SO_ERROR SocketOption_SocketOptionName = 4 + SocketOption_SOCKET_SO_DONTROUTE SocketOption_SocketOptionName = 5 + SocketOption_SOCKET_SO_BROADCAST SocketOption_SocketOptionName = 6 + SocketOption_SOCKET_SO_SNDBUF SocketOption_SocketOptionName = 7 + SocketOption_SOCKET_SO_RCVBUF SocketOption_SocketOptionName = 8 + SocketOption_SOCKET_SO_KEEPALIVE SocketOption_SocketOptionName = 9 + SocketOption_SOCKET_SO_OOBINLINE SocketOption_SocketOptionName = 10 + SocketOption_SOCKET_SO_LINGER SocketOption_SocketOptionName = 13 + SocketOption_SOCKET_SO_RCVTIMEO SocketOption_SocketOptionName = 20 + SocketOption_SOCKET_SO_SNDTIMEO SocketOption_SocketOptionName = 21 + SocketOption_SOCKET_IP_TOS SocketOption_SocketOptionName = 1 + SocketOption_SOCKET_IP_TTL SocketOption_SocketOptionName = 2 + SocketOption_SOCKET_IP_HDRINCL SocketOption_SocketOptionName = 3 + SocketOption_SOCKET_IP_OPTIONS SocketOption_SocketOptionName = 4 + SocketOption_SOCKET_TCP_NODELAY SocketOption_SocketOptionName = 1 + SocketOption_SOCKET_TCP_MAXSEG SocketOption_SocketOptionName = 2 + SocketOption_SOCKET_TCP_CORK SocketOption_SocketOptionName = 3 + SocketOption_SOCKET_TCP_KEEPIDLE SocketOption_SocketOptionName = 4 + SocketOption_SOCKET_TCP_KEEPINTVL SocketOption_SocketOptionName = 5 + SocketOption_SOCKET_TCP_KEEPCNT SocketOption_SocketOptionName = 6 + SocketOption_SOCKET_TCP_SYNCNT SocketOption_SocketOptionName = 7 + SocketOption_SOCKET_TCP_LINGER2 SocketOption_SocketOptionName = 8 + SocketOption_SOCKET_TCP_DEFER_ACCEPT SocketOption_SocketOptionName = 9 + SocketOption_SOCKET_TCP_WINDOW_CLAMP SocketOption_SocketOptionName = 10 + SocketOption_SOCKET_TCP_INFO SocketOption_SocketOptionName = 11 + SocketOption_SOCKET_TCP_QUICKACK SocketOption_SocketOptionName = 12 +) + +var SocketOption_SocketOptionName_name = map[int32]string{ + 1: "SOCKET_SO_DEBUG", + 2: "SOCKET_SO_REUSEADDR", + 3: "SOCKET_SO_TYPE", + 4: "SOCKET_SO_ERROR", + 5: "SOCKET_SO_DONTROUTE", + 6: "SOCKET_SO_BROADCAST", + 7: "SOCKET_SO_SNDBUF", + 8: "SOCKET_SO_RCVBUF", + 9: "SOCKET_SO_KEEPALIVE", + 10: "SOCKET_SO_OOBINLINE", + 13: "SOCKET_SO_LINGER", + 20: "SOCKET_SO_RCVTIMEO", + 21: "SOCKET_SO_SNDTIMEO", + // Duplicate value: 1: "SOCKET_IP_TOS", + // Duplicate value: 2: "SOCKET_IP_TTL", + // Duplicate value: 3: "SOCKET_IP_HDRINCL", + // Duplicate value: 4: "SOCKET_IP_OPTIONS", + // Duplicate value: 1: "SOCKET_TCP_NODELAY", + // Duplicate value: 2: "SOCKET_TCP_MAXSEG", + // Duplicate value: 3: "SOCKET_TCP_CORK", + // Duplicate value: 4: "SOCKET_TCP_KEEPIDLE", + // Duplicate value: 5: "SOCKET_TCP_KEEPINTVL", + // Duplicate value: 6: "SOCKET_TCP_KEEPCNT", + // Duplicate value: 7: "SOCKET_TCP_SYNCNT", + // Duplicate value: 8: "SOCKET_TCP_LINGER2", + // Duplicate value: 9: "SOCKET_TCP_DEFER_ACCEPT", + // Duplicate value: 10: "SOCKET_TCP_WINDOW_CLAMP", + 11: "SOCKET_TCP_INFO", + 12: "SOCKET_TCP_QUICKACK", +} +var SocketOption_SocketOptionName_value = map[string]int32{ + "SOCKET_SO_DEBUG": 1, + "SOCKET_SO_REUSEADDR": 2, + "SOCKET_SO_TYPE": 3, + "SOCKET_SO_ERROR": 4, + "SOCKET_SO_DONTROUTE": 5, + "SOCKET_SO_BROADCAST": 6, + "SOCKET_SO_SNDBUF": 7, + "SOCKET_SO_RCVBUF": 8, + "SOCKET_SO_KEEPALIVE": 9, + "SOCKET_SO_OOBINLINE": 10, + "SOCKET_SO_LINGER": 13, + "SOCKET_SO_RCVTIMEO": 20, + "SOCKET_SO_SNDTIMEO": 21, + "SOCKET_IP_TOS": 1, + "SOCKET_IP_TTL": 2, + "SOCKET_IP_HDRINCL": 3, + "SOCKET_IP_OPTIONS": 4, + "SOCKET_TCP_NODELAY": 1, + "SOCKET_TCP_MAXSEG": 2, + "SOCKET_TCP_CORK": 3, + "SOCKET_TCP_KEEPIDLE": 4, + "SOCKET_TCP_KEEPINTVL": 5, + "SOCKET_TCP_KEEPCNT": 6, + "SOCKET_TCP_SYNCNT": 7, + "SOCKET_TCP_LINGER2": 8, + "SOCKET_TCP_DEFER_ACCEPT": 9, + "SOCKET_TCP_WINDOW_CLAMP": 10, + "SOCKET_TCP_INFO": 11, + "SOCKET_TCP_QUICKACK": 12, +} + +func (x SocketOption_SocketOptionName) Enum() *SocketOption_SocketOptionName { + p := new(SocketOption_SocketOptionName) + *p = x + return p +} +func (x SocketOption_SocketOptionName) String() string { + return proto.EnumName(SocketOption_SocketOptionName_name, int32(x)) +} +func (x *SocketOption_SocketOptionName) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(SocketOption_SocketOptionName_value, data, "SocketOption_SocketOptionName") + if err != nil { + return err + } + *x = SocketOption_SocketOptionName(value) + return nil +} +func (SocketOption_SocketOptionName) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{10, 1} +} + +type ShutDownRequest_How int32 + +const ( + ShutDownRequest_SOCKET_SHUT_RD ShutDownRequest_How = 1 + ShutDownRequest_SOCKET_SHUT_WR ShutDownRequest_How = 2 + ShutDownRequest_SOCKET_SHUT_RDWR ShutDownRequest_How = 3 +) + +var ShutDownRequest_How_name = map[int32]string{ + 1: "SOCKET_SHUT_RD", + 2: "SOCKET_SHUT_WR", + 3: "SOCKET_SHUT_RDWR", +} +var ShutDownRequest_How_value = map[string]int32{ + "SOCKET_SHUT_RD": 1, + "SOCKET_SHUT_WR": 2, + "SOCKET_SHUT_RDWR": 3, +} + +func (x ShutDownRequest_How) Enum() *ShutDownRequest_How { + p := new(ShutDownRequest_How) + *p = x + return p +} +func (x ShutDownRequest_How) String() string { + return proto.EnumName(ShutDownRequest_How_name, int32(x)) +} +func (x *ShutDownRequest_How) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(ShutDownRequest_How_value, data, "ShutDownRequest_How") + if err != nil { + return err + } + *x = ShutDownRequest_How(value) + return nil +} +func (ShutDownRequest_How) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{21, 0} +} + +type ReceiveRequest_Flags int32 + +const ( + ReceiveRequest_MSG_OOB ReceiveRequest_Flags = 1 + ReceiveRequest_MSG_PEEK ReceiveRequest_Flags = 2 +) + +var ReceiveRequest_Flags_name = map[int32]string{ + 1: "MSG_OOB", + 2: "MSG_PEEK", +} +var ReceiveRequest_Flags_value = map[string]int32{ + "MSG_OOB": 1, + "MSG_PEEK": 2, +} + +func (x ReceiveRequest_Flags) Enum() *ReceiveRequest_Flags { + p := new(ReceiveRequest_Flags) + *p = x + return p +} +func (x ReceiveRequest_Flags) String() string { + return proto.EnumName(ReceiveRequest_Flags_name, int32(x)) +} +func (x *ReceiveRequest_Flags) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(ReceiveRequest_Flags_value, data, "ReceiveRequest_Flags") + if err != nil { + return err + } + *x = ReceiveRequest_Flags(value) + return nil +} +func (ReceiveRequest_Flags) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{27, 0} +} + +type PollEvent_PollEventFlag int32 + +const ( + PollEvent_SOCKET_POLLNONE PollEvent_PollEventFlag = 0 + PollEvent_SOCKET_POLLIN PollEvent_PollEventFlag = 1 + PollEvent_SOCKET_POLLPRI PollEvent_PollEventFlag = 2 + PollEvent_SOCKET_POLLOUT PollEvent_PollEventFlag = 4 + PollEvent_SOCKET_POLLERR PollEvent_PollEventFlag = 8 + PollEvent_SOCKET_POLLHUP PollEvent_PollEventFlag = 16 + PollEvent_SOCKET_POLLNVAL PollEvent_PollEventFlag = 32 + PollEvent_SOCKET_POLLRDNORM PollEvent_PollEventFlag = 64 + PollEvent_SOCKET_POLLRDBAND PollEvent_PollEventFlag = 128 + PollEvent_SOCKET_POLLWRNORM PollEvent_PollEventFlag = 256 + PollEvent_SOCKET_POLLWRBAND PollEvent_PollEventFlag = 512 + PollEvent_SOCKET_POLLMSG PollEvent_PollEventFlag = 1024 + PollEvent_SOCKET_POLLREMOVE PollEvent_PollEventFlag = 4096 + PollEvent_SOCKET_POLLRDHUP PollEvent_PollEventFlag = 8192 +) + +var PollEvent_PollEventFlag_name = map[int32]string{ + 0: "SOCKET_POLLNONE", + 1: "SOCKET_POLLIN", + 2: "SOCKET_POLLPRI", + 4: "SOCKET_POLLOUT", + 8: "SOCKET_POLLERR", + 16: "SOCKET_POLLHUP", + 32: "SOCKET_POLLNVAL", + 64: "SOCKET_POLLRDNORM", + 128: "SOCKET_POLLRDBAND", + 256: "SOCKET_POLLWRNORM", + 512: "SOCKET_POLLWRBAND", + 1024: "SOCKET_POLLMSG", + 4096: "SOCKET_POLLREMOVE", + 8192: "SOCKET_POLLRDHUP", +} +var PollEvent_PollEventFlag_value = map[string]int32{ + "SOCKET_POLLNONE": 0, + "SOCKET_POLLIN": 1, + "SOCKET_POLLPRI": 2, + "SOCKET_POLLOUT": 4, + "SOCKET_POLLERR": 8, + "SOCKET_POLLHUP": 16, + "SOCKET_POLLNVAL": 32, + "SOCKET_POLLRDNORM": 64, + "SOCKET_POLLRDBAND": 128, + "SOCKET_POLLWRNORM": 256, + "SOCKET_POLLWRBAND": 512, + "SOCKET_POLLMSG": 1024, + "SOCKET_POLLREMOVE": 4096, + "SOCKET_POLLRDHUP": 8192, +} + +func (x PollEvent_PollEventFlag) Enum() *PollEvent_PollEventFlag { + p := new(PollEvent_PollEventFlag) + *p = x + return p +} +func (x PollEvent_PollEventFlag) String() string { + return proto.EnumName(PollEvent_PollEventFlag_name, int32(x)) +} +func (x *PollEvent_PollEventFlag) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(PollEvent_PollEventFlag_value, data, "PollEvent_PollEventFlag") + if err != nil { + return err + } + *x = PollEvent_PollEventFlag(value) + return nil +} +func (PollEvent_PollEventFlag) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{29, 0} +} + +type ResolveReply_ErrorCode int32 + +const ( + ResolveReply_SOCKET_EAI_ADDRFAMILY ResolveReply_ErrorCode = 1 + ResolveReply_SOCKET_EAI_AGAIN ResolveReply_ErrorCode = 2 + ResolveReply_SOCKET_EAI_BADFLAGS ResolveReply_ErrorCode = 3 + ResolveReply_SOCKET_EAI_FAIL ResolveReply_ErrorCode = 4 + ResolveReply_SOCKET_EAI_FAMILY ResolveReply_ErrorCode = 5 + ResolveReply_SOCKET_EAI_MEMORY ResolveReply_ErrorCode = 6 + ResolveReply_SOCKET_EAI_NODATA ResolveReply_ErrorCode = 7 + ResolveReply_SOCKET_EAI_NONAME ResolveReply_ErrorCode = 8 + ResolveReply_SOCKET_EAI_SERVICE ResolveReply_ErrorCode = 9 + ResolveReply_SOCKET_EAI_SOCKTYPE ResolveReply_ErrorCode = 10 + ResolveReply_SOCKET_EAI_SYSTEM ResolveReply_ErrorCode = 11 + ResolveReply_SOCKET_EAI_BADHINTS ResolveReply_ErrorCode = 12 + ResolveReply_SOCKET_EAI_PROTOCOL ResolveReply_ErrorCode = 13 + ResolveReply_SOCKET_EAI_OVERFLOW ResolveReply_ErrorCode = 14 + ResolveReply_SOCKET_EAI_MAX ResolveReply_ErrorCode = 15 +) + +var ResolveReply_ErrorCode_name = map[int32]string{ + 1: "SOCKET_EAI_ADDRFAMILY", + 2: "SOCKET_EAI_AGAIN", + 3: "SOCKET_EAI_BADFLAGS", + 4: "SOCKET_EAI_FAIL", + 5: "SOCKET_EAI_FAMILY", + 6: "SOCKET_EAI_MEMORY", + 7: "SOCKET_EAI_NODATA", + 8: "SOCKET_EAI_NONAME", + 9: "SOCKET_EAI_SERVICE", + 10: "SOCKET_EAI_SOCKTYPE", + 11: "SOCKET_EAI_SYSTEM", + 12: "SOCKET_EAI_BADHINTS", + 13: "SOCKET_EAI_PROTOCOL", + 14: "SOCKET_EAI_OVERFLOW", + 15: "SOCKET_EAI_MAX", +} +var ResolveReply_ErrorCode_value = map[string]int32{ + "SOCKET_EAI_ADDRFAMILY": 1, + "SOCKET_EAI_AGAIN": 2, + "SOCKET_EAI_BADFLAGS": 3, + "SOCKET_EAI_FAIL": 4, + "SOCKET_EAI_FAMILY": 5, + "SOCKET_EAI_MEMORY": 6, + "SOCKET_EAI_NODATA": 7, + "SOCKET_EAI_NONAME": 8, + "SOCKET_EAI_SERVICE": 9, + "SOCKET_EAI_SOCKTYPE": 10, + "SOCKET_EAI_SYSTEM": 11, + "SOCKET_EAI_BADHINTS": 12, + "SOCKET_EAI_PROTOCOL": 13, + "SOCKET_EAI_OVERFLOW": 14, + "SOCKET_EAI_MAX": 15, +} + +func (x ResolveReply_ErrorCode) Enum() *ResolveReply_ErrorCode { + p := new(ResolveReply_ErrorCode) + *p = x + return p +} +func (x ResolveReply_ErrorCode) String() string { + return proto.EnumName(ResolveReply_ErrorCode_name, int32(x)) +} +func (x *ResolveReply_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(ResolveReply_ErrorCode_value, data, "ResolveReply_ErrorCode") + if err != nil { + return err + } + *x = ResolveReply_ErrorCode(value) + return nil +} +func (ResolveReply_ErrorCode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{33, 0} +} + +type RemoteSocketServiceError struct { + SystemError *int32 `protobuf:"varint,1,opt,name=system_error,json=systemError,def=0" json:"system_error,omitempty"` + ErrorDetail *string `protobuf:"bytes,2,opt,name=error_detail,json=errorDetail" json:"error_detail,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RemoteSocketServiceError) Reset() { *m = RemoteSocketServiceError{} } +func (m *RemoteSocketServiceError) String() string { return proto.CompactTextString(m) } +func (*RemoteSocketServiceError) ProtoMessage() {} +func (*RemoteSocketServiceError) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{0} +} +func (m *RemoteSocketServiceError) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RemoteSocketServiceError.Unmarshal(m, b) +} +func (m *RemoteSocketServiceError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RemoteSocketServiceError.Marshal(b, m, deterministic) +} +func (dst *RemoteSocketServiceError) XXX_Merge(src proto.Message) { + xxx_messageInfo_RemoteSocketServiceError.Merge(dst, src) +} +func (m *RemoteSocketServiceError) XXX_Size() int { + return xxx_messageInfo_RemoteSocketServiceError.Size(m) +} +func (m *RemoteSocketServiceError) XXX_DiscardUnknown() { + xxx_messageInfo_RemoteSocketServiceError.DiscardUnknown(m) +} + +var xxx_messageInfo_RemoteSocketServiceError proto.InternalMessageInfo + +const Default_RemoteSocketServiceError_SystemError int32 = 0 + +func (m *RemoteSocketServiceError) GetSystemError() int32 { + if m != nil && m.SystemError != nil { + return *m.SystemError + } + return Default_RemoteSocketServiceError_SystemError +} + +func (m *RemoteSocketServiceError) GetErrorDetail() string { + if m != nil && m.ErrorDetail != nil { + return *m.ErrorDetail + } + return "" +} + +type AddressPort struct { + Port *int32 `protobuf:"varint,1,req,name=port" json:"port,omitempty"` + PackedAddress []byte `protobuf:"bytes,2,opt,name=packed_address,json=packedAddress" json:"packed_address,omitempty"` + HostnameHint *string `protobuf:"bytes,3,opt,name=hostname_hint,json=hostnameHint" json:"hostname_hint,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AddressPort) Reset() { *m = AddressPort{} } +func (m *AddressPort) String() string { return proto.CompactTextString(m) } +func (*AddressPort) ProtoMessage() {} +func (*AddressPort) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{1} +} +func (m *AddressPort) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AddressPort.Unmarshal(m, b) +} +func (m *AddressPort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AddressPort.Marshal(b, m, deterministic) +} +func (dst *AddressPort) XXX_Merge(src proto.Message) { + xxx_messageInfo_AddressPort.Merge(dst, src) +} +func (m *AddressPort) XXX_Size() int { + return xxx_messageInfo_AddressPort.Size(m) +} +func (m *AddressPort) XXX_DiscardUnknown() { + xxx_messageInfo_AddressPort.DiscardUnknown(m) +} + +var xxx_messageInfo_AddressPort proto.InternalMessageInfo + +func (m *AddressPort) GetPort() int32 { + if m != nil && m.Port != nil { + return *m.Port + } + return 0 +} + +func (m *AddressPort) GetPackedAddress() []byte { + if m != nil { + return m.PackedAddress + } + return nil +} + +func (m *AddressPort) GetHostnameHint() string { + if m != nil && m.HostnameHint != nil { + return *m.HostnameHint + } + return "" +} + +type CreateSocketRequest struct { + Family *CreateSocketRequest_SocketFamily `protobuf:"varint,1,req,name=family,enum=appengine.CreateSocketRequest_SocketFamily" json:"family,omitempty"` + Protocol *CreateSocketRequest_SocketProtocol `protobuf:"varint,2,req,name=protocol,enum=appengine.CreateSocketRequest_SocketProtocol" json:"protocol,omitempty"` + SocketOptions []*SocketOption `protobuf:"bytes,3,rep,name=socket_options,json=socketOptions" json:"socket_options,omitempty"` + ProxyExternalIp *AddressPort `protobuf:"bytes,4,opt,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"` + ListenBacklog *int32 `protobuf:"varint,5,opt,name=listen_backlog,json=listenBacklog,def=0" json:"listen_backlog,omitempty"` + RemoteIp *AddressPort `protobuf:"bytes,6,opt,name=remote_ip,json=remoteIp" json:"remote_ip,omitempty"` + AppId *string `protobuf:"bytes,9,opt,name=app_id,json=appId" json:"app_id,omitempty"` + ProjectId *int64 `protobuf:"varint,10,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateSocketRequest) Reset() { *m = CreateSocketRequest{} } +func (m *CreateSocketRequest) String() string { return proto.CompactTextString(m) } +func (*CreateSocketRequest) ProtoMessage() {} +func (*CreateSocketRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{2} +} +func (m *CreateSocketRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateSocketRequest.Unmarshal(m, b) +} +func (m *CreateSocketRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateSocketRequest.Marshal(b, m, deterministic) +} +func (dst *CreateSocketRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateSocketRequest.Merge(dst, src) +} +func (m *CreateSocketRequest) XXX_Size() int { + return xxx_messageInfo_CreateSocketRequest.Size(m) +} +func (m *CreateSocketRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateSocketRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateSocketRequest proto.InternalMessageInfo + +const Default_CreateSocketRequest_ListenBacklog int32 = 0 + +func (m *CreateSocketRequest) GetFamily() CreateSocketRequest_SocketFamily { + if m != nil && m.Family != nil { + return *m.Family + } + return CreateSocketRequest_IPv4 +} + +func (m *CreateSocketRequest) GetProtocol() CreateSocketRequest_SocketProtocol { + if m != nil && m.Protocol != nil { + return *m.Protocol + } + return CreateSocketRequest_TCP +} + +func (m *CreateSocketRequest) GetSocketOptions() []*SocketOption { + if m != nil { + return m.SocketOptions + } + return nil +} + +func (m *CreateSocketRequest) GetProxyExternalIp() *AddressPort { + if m != nil { + return m.ProxyExternalIp + } + return nil +} + +func (m *CreateSocketRequest) GetListenBacklog() int32 { + if m != nil && m.ListenBacklog != nil { + return *m.ListenBacklog + } + return Default_CreateSocketRequest_ListenBacklog +} + +func (m *CreateSocketRequest) GetRemoteIp() *AddressPort { + if m != nil { + return m.RemoteIp + } + return nil +} + +func (m *CreateSocketRequest) GetAppId() string { + if m != nil && m.AppId != nil { + return *m.AppId + } + return "" +} + +func (m *CreateSocketRequest) GetProjectId() int64 { + if m != nil && m.ProjectId != nil { + return *m.ProjectId + } + return 0 +} + +type CreateSocketReply struct { + SocketDescriptor *string `protobuf:"bytes,1,opt,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` + ServerAddress *AddressPort `protobuf:"bytes,3,opt,name=server_address,json=serverAddress" json:"server_address,omitempty"` + ProxyExternalIp *AddressPort `protobuf:"bytes,4,opt,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateSocketReply) Reset() { *m = CreateSocketReply{} } +func (m *CreateSocketReply) String() string { return proto.CompactTextString(m) } +func (*CreateSocketReply) ProtoMessage() {} +func (*CreateSocketReply) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{3} +} + +var extRange_CreateSocketReply = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*CreateSocketReply) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_CreateSocketReply +} +func (m *CreateSocketReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateSocketReply.Unmarshal(m, b) +} +func (m *CreateSocketReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateSocketReply.Marshal(b, m, deterministic) +} +func (dst *CreateSocketReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateSocketReply.Merge(dst, src) +} +func (m *CreateSocketReply) XXX_Size() int { + return xxx_messageInfo_CreateSocketReply.Size(m) +} +func (m *CreateSocketReply) XXX_DiscardUnknown() { + xxx_messageInfo_CreateSocketReply.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateSocketReply proto.InternalMessageInfo + +func (m *CreateSocketReply) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *CreateSocketReply) GetServerAddress() *AddressPort { + if m != nil { + return m.ServerAddress + } + return nil +} + +func (m *CreateSocketReply) GetProxyExternalIp() *AddressPort { + if m != nil { + return m.ProxyExternalIp + } + return nil +} + +type BindRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` + ProxyExternalIp *AddressPort `protobuf:"bytes,2,req,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BindRequest) Reset() { *m = BindRequest{} } +func (m *BindRequest) String() string { return proto.CompactTextString(m) } +func (*BindRequest) ProtoMessage() {} +func (*BindRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{4} +} +func (m *BindRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BindRequest.Unmarshal(m, b) +} +func (m *BindRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BindRequest.Marshal(b, m, deterministic) +} +func (dst *BindRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_BindRequest.Merge(dst, src) +} +func (m *BindRequest) XXX_Size() int { + return xxx_messageInfo_BindRequest.Size(m) +} +func (m *BindRequest) XXX_DiscardUnknown() { + xxx_messageInfo_BindRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_BindRequest proto.InternalMessageInfo + +func (m *BindRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *BindRequest) GetProxyExternalIp() *AddressPort { + if m != nil { + return m.ProxyExternalIp + } + return nil +} + +type BindReply struct { + ProxyExternalIp *AddressPort `protobuf:"bytes,1,opt,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BindReply) Reset() { *m = BindReply{} } +func (m *BindReply) String() string { return proto.CompactTextString(m) } +func (*BindReply) ProtoMessage() {} +func (*BindReply) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{5} +} +func (m *BindReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BindReply.Unmarshal(m, b) +} +func (m *BindReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BindReply.Marshal(b, m, deterministic) +} +func (dst *BindReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_BindReply.Merge(dst, src) +} +func (m *BindReply) XXX_Size() int { + return xxx_messageInfo_BindReply.Size(m) +} +func (m *BindReply) XXX_DiscardUnknown() { + xxx_messageInfo_BindReply.DiscardUnknown(m) +} + +var xxx_messageInfo_BindReply proto.InternalMessageInfo + +func (m *BindReply) GetProxyExternalIp() *AddressPort { + if m != nil { + return m.ProxyExternalIp + } + return nil +} + +type GetSocketNameRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetSocketNameRequest) Reset() { *m = GetSocketNameRequest{} } +func (m *GetSocketNameRequest) String() string { return proto.CompactTextString(m) } +func (*GetSocketNameRequest) ProtoMessage() {} +func (*GetSocketNameRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{6} +} +func (m *GetSocketNameRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetSocketNameRequest.Unmarshal(m, b) +} +func (m *GetSocketNameRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetSocketNameRequest.Marshal(b, m, deterministic) +} +func (dst *GetSocketNameRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetSocketNameRequest.Merge(dst, src) +} +func (m *GetSocketNameRequest) XXX_Size() int { + return xxx_messageInfo_GetSocketNameRequest.Size(m) +} +func (m *GetSocketNameRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetSocketNameRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetSocketNameRequest proto.InternalMessageInfo + +func (m *GetSocketNameRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +type GetSocketNameReply struct { + ProxyExternalIp *AddressPort `protobuf:"bytes,2,opt,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetSocketNameReply) Reset() { *m = GetSocketNameReply{} } +func (m *GetSocketNameReply) String() string { return proto.CompactTextString(m) } +func (*GetSocketNameReply) ProtoMessage() {} +func (*GetSocketNameReply) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{7} +} +func (m *GetSocketNameReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetSocketNameReply.Unmarshal(m, b) +} +func (m *GetSocketNameReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetSocketNameReply.Marshal(b, m, deterministic) +} +func (dst *GetSocketNameReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetSocketNameReply.Merge(dst, src) +} +func (m *GetSocketNameReply) XXX_Size() int { + return xxx_messageInfo_GetSocketNameReply.Size(m) +} +func (m *GetSocketNameReply) XXX_DiscardUnknown() { + xxx_messageInfo_GetSocketNameReply.DiscardUnknown(m) +} + +var xxx_messageInfo_GetSocketNameReply proto.InternalMessageInfo + +func (m *GetSocketNameReply) GetProxyExternalIp() *AddressPort { + if m != nil { + return m.ProxyExternalIp + } + return nil +} + +type GetPeerNameRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetPeerNameRequest) Reset() { *m = GetPeerNameRequest{} } +func (m *GetPeerNameRequest) String() string { return proto.CompactTextString(m) } +func (*GetPeerNameRequest) ProtoMessage() {} +func (*GetPeerNameRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{8} +} +func (m *GetPeerNameRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetPeerNameRequest.Unmarshal(m, b) +} +func (m *GetPeerNameRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetPeerNameRequest.Marshal(b, m, deterministic) +} +func (dst *GetPeerNameRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetPeerNameRequest.Merge(dst, src) +} +func (m *GetPeerNameRequest) XXX_Size() int { + return xxx_messageInfo_GetPeerNameRequest.Size(m) +} +func (m *GetPeerNameRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetPeerNameRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetPeerNameRequest proto.InternalMessageInfo + +func (m *GetPeerNameRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +type GetPeerNameReply struct { + PeerIp *AddressPort `protobuf:"bytes,2,opt,name=peer_ip,json=peerIp" json:"peer_ip,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetPeerNameReply) Reset() { *m = GetPeerNameReply{} } +func (m *GetPeerNameReply) String() string { return proto.CompactTextString(m) } +func (*GetPeerNameReply) ProtoMessage() {} +func (*GetPeerNameReply) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{9} +} +func (m *GetPeerNameReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetPeerNameReply.Unmarshal(m, b) +} +func (m *GetPeerNameReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetPeerNameReply.Marshal(b, m, deterministic) +} +func (dst *GetPeerNameReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetPeerNameReply.Merge(dst, src) +} +func (m *GetPeerNameReply) XXX_Size() int { + return xxx_messageInfo_GetPeerNameReply.Size(m) +} +func (m *GetPeerNameReply) XXX_DiscardUnknown() { + xxx_messageInfo_GetPeerNameReply.DiscardUnknown(m) +} + +var xxx_messageInfo_GetPeerNameReply proto.InternalMessageInfo + +func (m *GetPeerNameReply) GetPeerIp() *AddressPort { + if m != nil { + return m.PeerIp + } + return nil +} + +type SocketOption struct { + Level *SocketOption_SocketOptionLevel `protobuf:"varint,1,req,name=level,enum=appengine.SocketOption_SocketOptionLevel" json:"level,omitempty"` + Option *SocketOption_SocketOptionName `protobuf:"varint,2,req,name=option,enum=appengine.SocketOption_SocketOptionName" json:"option,omitempty"` + Value []byte `protobuf:"bytes,3,req,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SocketOption) Reset() { *m = SocketOption{} } +func (m *SocketOption) String() string { return proto.CompactTextString(m) } +func (*SocketOption) ProtoMessage() {} +func (*SocketOption) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{10} +} +func (m *SocketOption) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SocketOption.Unmarshal(m, b) +} +func (m *SocketOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SocketOption.Marshal(b, m, deterministic) +} +func (dst *SocketOption) XXX_Merge(src proto.Message) { + xxx_messageInfo_SocketOption.Merge(dst, src) +} +func (m *SocketOption) XXX_Size() int { + return xxx_messageInfo_SocketOption.Size(m) +} +func (m *SocketOption) XXX_DiscardUnknown() { + xxx_messageInfo_SocketOption.DiscardUnknown(m) +} + +var xxx_messageInfo_SocketOption proto.InternalMessageInfo + +func (m *SocketOption) GetLevel() SocketOption_SocketOptionLevel { + if m != nil && m.Level != nil { + return *m.Level + } + return SocketOption_SOCKET_SOL_IP +} + +func (m *SocketOption) GetOption() SocketOption_SocketOptionName { + if m != nil && m.Option != nil { + return *m.Option + } + return SocketOption_SOCKET_SO_DEBUG +} + +func (m *SocketOption) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +type SetSocketOptionsRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` + Options []*SocketOption `protobuf:"bytes,2,rep,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SetSocketOptionsRequest) Reset() { *m = SetSocketOptionsRequest{} } +func (m *SetSocketOptionsRequest) String() string { return proto.CompactTextString(m) } +func (*SetSocketOptionsRequest) ProtoMessage() {} +func (*SetSocketOptionsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{11} +} +func (m *SetSocketOptionsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SetSocketOptionsRequest.Unmarshal(m, b) +} +func (m *SetSocketOptionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SetSocketOptionsRequest.Marshal(b, m, deterministic) +} +func (dst *SetSocketOptionsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetSocketOptionsRequest.Merge(dst, src) +} +func (m *SetSocketOptionsRequest) XXX_Size() int { + return xxx_messageInfo_SetSocketOptionsRequest.Size(m) +} +func (m *SetSocketOptionsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SetSocketOptionsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SetSocketOptionsRequest proto.InternalMessageInfo + +func (m *SetSocketOptionsRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *SetSocketOptionsRequest) GetOptions() []*SocketOption { + if m != nil { + return m.Options + } + return nil +} + +type SetSocketOptionsReply struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SetSocketOptionsReply) Reset() { *m = SetSocketOptionsReply{} } +func (m *SetSocketOptionsReply) String() string { return proto.CompactTextString(m) } +func (*SetSocketOptionsReply) ProtoMessage() {} +func (*SetSocketOptionsReply) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{12} +} +func (m *SetSocketOptionsReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SetSocketOptionsReply.Unmarshal(m, b) +} +func (m *SetSocketOptionsReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SetSocketOptionsReply.Marshal(b, m, deterministic) +} +func (dst *SetSocketOptionsReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetSocketOptionsReply.Merge(dst, src) +} +func (m *SetSocketOptionsReply) XXX_Size() int { + return xxx_messageInfo_SetSocketOptionsReply.Size(m) +} +func (m *SetSocketOptionsReply) XXX_DiscardUnknown() { + xxx_messageInfo_SetSocketOptionsReply.DiscardUnknown(m) +} + +var xxx_messageInfo_SetSocketOptionsReply proto.InternalMessageInfo + +type GetSocketOptionsRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` + Options []*SocketOption `protobuf:"bytes,2,rep,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetSocketOptionsRequest) Reset() { *m = GetSocketOptionsRequest{} } +func (m *GetSocketOptionsRequest) String() string { return proto.CompactTextString(m) } +func (*GetSocketOptionsRequest) ProtoMessage() {} +func (*GetSocketOptionsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{13} +} +func (m *GetSocketOptionsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetSocketOptionsRequest.Unmarshal(m, b) +} +func (m *GetSocketOptionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetSocketOptionsRequest.Marshal(b, m, deterministic) +} +func (dst *GetSocketOptionsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetSocketOptionsRequest.Merge(dst, src) +} +func (m *GetSocketOptionsRequest) XXX_Size() int { + return xxx_messageInfo_GetSocketOptionsRequest.Size(m) +} +func (m *GetSocketOptionsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetSocketOptionsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetSocketOptionsRequest proto.InternalMessageInfo + +func (m *GetSocketOptionsRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *GetSocketOptionsRequest) GetOptions() []*SocketOption { + if m != nil { + return m.Options + } + return nil +} + +type GetSocketOptionsReply struct { + Options []*SocketOption `protobuf:"bytes,2,rep,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetSocketOptionsReply) Reset() { *m = GetSocketOptionsReply{} } +func (m *GetSocketOptionsReply) String() string { return proto.CompactTextString(m) } +func (*GetSocketOptionsReply) ProtoMessage() {} +func (*GetSocketOptionsReply) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{14} +} +func (m *GetSocketOptionsReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetSocketOptionsReply.Unmarshal(m, b) +} +func (m *GetSocketOptionsReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetSocketOptionsReply.Marshal(b, m, deterministic) +} +func (dst *GetSocketOptionsReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetSocketOptionsReply.Merge(dst, src) +} +func (m *GetSocketOptionsReply) XXX_Size() int { + return xxx_messageInfo_GetSocketOptionsReply.Size(m) +} +func (m *GetSocketOptionsReply) XXX_DiscardUnknown() { + xxx_messageInfo_GetSocketOptionsReply.DiscardUnknown(m) +} + +var xxx_messageInfo_GetSocketOptionsReply proto.InternalMessageInfo + +func (m *GetSocketOptionsReply) GetOptions() []*SocketOption { + if m != nil { + return m.Options + } + return nil +} + +type ConnectRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` + RemoteIp *AddressPort `protobuf:"bytes,2,req,name=remote_ip,json=remoteIp" json:"remote_ip,omitempty"` + TimeoutSeconds *float64 `protobuf:"fixed64,3,opt,name=timeout_seconds,json=timeoutSeconds,def=-1" json:"timeout_seconds,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ConnectRequest) Reset() { *m = ConnectRequest{} } +func (m *ConnectRequest) String() string { return proto.CompactTextString(m) } +func (*ConnectRequest) ProtoMessage() {} +func (*ConnectRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{15} +} +func (m *ConnectRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ConnectRequest.Unmarshal(m, b) +} +func (m *ConnectRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ConnectRequest.Marshal(b, m, deterministic) +} +func (dst *ConnectRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConnectRequest.Merge(dst, src) +} +func (m *ConnectRequest) XXX_Size() int { + return xxx_messageInfo_ConnectRequest.Size(m) +} +func (m *ConnectRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ConnectRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ConnectRequest proto.InternalMessageInfo + +const Default_ConnectRequest_TimeoutSeconds float64 = -1 + +func (m *ConnectRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *ConnectRequest) GetRemoteIp() *AddressPort { + if m != nil { + return m.RemoteIp + } + return nil +} + +func (m *ConnectRequest) GetTimeoutSeconds() float64 { + if m != nil && m.TimeoutSeconds != nil { + return *m.TimeoutSeconds + } + return Default_ConnectRequest_TimeoutSeconds +} + +type ConnectReply struct { + ProxyExternalIp *AddressPort `protobuf:"bytes,1,opt,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ConnectReply) Reset() { *m = ConnectReply{} } +func (m *ConnectReply) String() string { return proto.CompactTextString(m) } +func (*ConnectReply) ProtoMessage() {} +func (*ConnectReply) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{16} +} + +var extRange_ConnectReply = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*ConnectReply) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ConnectReply +} +func (m *ConnectReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ConnectReply.Unmarshal(m, b) +} +func (m *ConnectReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ConnectReply.Marshal(b, m, deterministic) +} +func (dst *ConnectReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConnectReply.Merge(dst, src) +} +func (m *ConnectReply) XXX_Size() int { + return xxx_messageInfo_ConnectReply.Size(m) +} +func (m *ConnectReply) XXX_DiscardUnknown() { + xxx_messageInfo_ConnectReply.DiscardUnknown(m) +} + +var xxx_messageInfo_ConnectReply proto.InternalMessageInfo + +func (m *ConnectReply) GetProxyExternalIp() *AddressPort { + if m != nil { + return m.ProxyExternalIp + } + return nil +} + +type ListenRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` + Backlog *int32 `protobuf:"varint,2,req,name=backlog" json:"backlog,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListenRequest) Reset() { *m = ListenRequest{} } +func (m *ListenRequest) String() string { return proto.CompactTextString(m) } +func (*ListenRequest) ProtoMessage() {} +func (*ListenRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{17} +} +func (m *ListenRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListenRequest.Unmarshal(m, b) +} +func (m *ListenRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListenRequest.Marshal(b, m, deterministic) +} +func (dst *ListenRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListenRequest.Merge(dst, src) +} +func (m *ListenRequest) XXX_Size() int { + return xxx_messageInfo_ListenRequest.Size(m) +} +func (m *ListenRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListenRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListenRequest proto.InternalMessageInfo + +func (m *ListenRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *ListenRequest) GetBacklog() int32 { + if m != nil && m.Backlog != nil { + return *m.Backlog + } + return 0 +} + +type ListenReply struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListenReply) Reset() { *m = ListenReply{} } +func (m *ListenReply) String() string { return proto.CompactTextString(m) } +func (*ListenReply) ProtoMessage() {} +func (*ListenReply) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{18} +} +func (m *ListenReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListenReply.Unmarshal(m, b) +} +func (m *ListenReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListenReply.Marshal(b, m, deterministic) +} +func (dst *ListenReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListenReply.Merge(dst, src) +} +func (m *ListenReply) XXX_Size() int { + return xxx_messageInfo_ListenReply.Size(m) +} +func (m *ListenReply) XXX_DiscardUnknown() { + xxx_messageInfo_ListenReply.DiscardUnknown(m) +} + +var xxx_messageInfo_ListenReply proto.InternalMessageInfo + +type AcceptRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` + TimeoutSeconds *float64 `protobuf:"fixed64,2,opt,name=timeout_seconds,json=timeoutSeconds,def=-1" json:"timeout_seconds,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AcceptRequest) Reset() { *m = AcceptRequest{} } +func (m *AcceptRequest) String() string { return proto.CompactTextString(m) } +func (*AcceptRequest) ProtoMessage() {} +func (*AcceptRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{19} +} +func (m *AcceptRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AcceptRequest.Unmarshal(m, b) +} +func (m *AcceptRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AcceptRequest.Marshal(b, m, deterministic) +} +func (dst *AcceptRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_AcceptRequest.Merge(dst, src) +} +func (m *AcceptRequest) XXX_Size() int { + return xxx_messageInfo_AcceptRequest.Size(m) +} +func (m *AcceptRequest) XXX_DiscardUnknown() { + xxx_messageInfo_AcceptRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_AcceptRequest proto.InternalMessageInfo + +const Default_AcceptRequest_TimeoutSeconds float64 = -1 + +func (m *AcceptRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *AcceptRequest) GetTimeoutSeconds() float64 { + if m != nil && m.TimeoutSeconds != nil { + return *m.TimeoutSeconds + } + return Default_AcceptRequest_TimeoutSeconds +} + +type AcceptReply struct { + NewSocketDescriptor []byte `protobuf:"bytes,2,opt,name=new_socket_descriptor,json=newSocketDescriptor" json:"new_socket_descriptor,omitempty"` + RemoteAddress *AddressPort `protobuf:"bytes,3,opt,name=remote_address,json=remoteAddress" json:"remote_address,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AcceptReply) Reset() { *m = AcceptReply{} } +func (m *AcceptReply) String() string { return proto.CompactTextString(m) } +func (*AcceptReply) ProtoMessage() {} +func (*AcceptReply) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{20} +} +func (m *AcceptReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AcceptReply.Unmarshal(m, b) +} +func (m *AcceptReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AcceptReply.Marshal(b, m, deterministic) +} +func (dst *AcceptReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_AcceptReply.Merge(dst, src) +} +func (m *AcceptReply) XXX_Size() int { + return xxx_messageInfo_AcceptReply.Size(m) +} +func (m *AcceptReply) XXX_DiscardUnknown() { + xxx_messageInfo_AcceptReply.DiscardUnknown(m) +} + +var xxx_messageInfo_AcceptReply proto.InternalMessageInfo + +func (m *AcceptReply) GetNewSocketDescriptor() []byte { + if m != nil { + return m.NewSocketDescriptor + } + return nil +} + +func (m *AcceptReply) GetRemoteAddress() *AddressPort { + if m != nil { + return m.RemoteAddress + } + return nil +} + +type ShutDownRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` + How *ShutDownRequest_How `protobuf:"varint,2,req,name=how,enum=appengine.ShutDownRequest_How" json:"how,omitempty"` + SendOffset *int64 `protobuf:"varint,3,req,name=send_offset,json=sendOffset" json:"send_offset,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ShutDownRequest) Reset() { *m = ShutDownRequest{} } +func (m *ShutDownRequest) String() string { return proto.CompactTextString(m) } +func (*ShutDownRequest) ProtoMessage() {} +func (*ShutDownRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{21} +} +func (m *ShutDownRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ShutDownRequest.Unmarshal(m, b) +} +func (m *ShutDownRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ShutDownRequest.Marshal(b, m, deterministic) +} +func (dst *ShutDownRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ShutDownRequest.Merge(dst, src) +} +func (m *ShutDownRequest) XXX_Size() int { + return xxx_messageInfo_ShutDownRequest.Size(m) +} +func (m *ShutDownRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ShutDownRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ShutDownRequest proto.InternalMessageInfo + +func (m *ShutDownRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *ShutDownRequest) GetHow() ShutDownRequest_How { + if m != nil && m.How != nil { + return *m.How + } + return ShutDownRequest_SOCKET_SHUT_RD +} + +func (m *ShutDownRequest) GetSendOffset() int64 { + if m != nil && m.SendOffset != nil { + return *m.SendOffset + } + return 0 +} + +type ShutDownReply struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ShutDownReply) Reset() { *m = ShutDownReply{} } +func (m *ShutDownReply) String() string { return proto.CompactTextString(m) } +func (*ShutDownReply) ProtoMessage() {} +func (*ShutDownReply) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{22} +} +func (m *ShutDownReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ShutDownReply.Unmarshal(m, b) +} +func (m *ShutDownReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ShutDownReply.Marshal(b, m, deterministic) +} +func (dst *ShutDownReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_ShutDownReply.Merge(dst, src) +} +func (m *ShutDownReply) XXX_Size() int { + return xxx_messageInfo_ShutDownReply.Size(m) +} +func (m *ShutDownReply) XXX_DiscardUnknown() { + xxx_messageInfo_ShutDownReply.DiscardUnknown(m) +} + +var xxx_messageInfo_ShutDownReply proto.InternalMessageInfo + +type CloseRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` + SendOffset *int64 `protobuf:"varint,2,opt,name=send_offset,json=sendOffset,def=-1" json:"send_offset,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CloseRequest) Reset() { *m = CloseRequest{} } +func (m *CloseRequest) String() string { return proto.CompactTextString(m) } +func (*CloseRequest) ProtoMessage() {} +func (*CloseRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{23} +} +func (m *CloseRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CloseRequest.Unmarshal(m, b) +} +func (m *CloseRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CloseRequest.Marshal(b, m, deterministic) +} +func (dst *CloseRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CloseRequest.Merge(dst, src) +} +func (m *CloseRequest) XXX_Size() int { + return xxx_messageInfo_CloseRequest.Size(m) +} +func (m *CloseRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CloseRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CloseRequest proto.InternalMessageInfo + +const Default_CloseRequest_SendOffset int64 = -1 + +func (m *CloseRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *CloseRequest) GetSendOffset() int64 { + if m != nil && m.SendOffset != nil { + return *m.SendOffset + } + return Default_CloseRequest_SendOffset +} + +type CloseReply struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CloseReply) Reset() { *m = CloseReply{} } +func (m *CloseReply) String() string { return proto.CompactTextString(m) } +func (*CloseReply) ProtoMessage() {} +func (*CloseReply) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{24} +} +func (m *CloseReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CloseReply.Unmarshal(m, b) +} +func (m *CloseReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CloseReply.Marshal(b, m, deterministic) +} +func (dst *CloseReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_CloseReply.Merge(dst, src) +} +func (m *CloseReply) XXX_Size() int { + return xxx_messageInfo_CloseReply.Size(m) +} +func (m *CloseReply) XXX_DiscardUnknown() { + xxx_messageInfo_CloseReply.DiscardUnknown(m) +} + +var xxx_messageInfo_CloseReply proto.InternalMessageInfo + +type SendRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` + Data []byte `protobuf:"bytes,2,req,name=data" json:"data,omitempty"` + StreamOffset *int64 `protobuf:"varint,3,req,name=stream_offset,json=streamOffset" json:"stream_offset,omitempty"` + Flags *int32 `protobuf:"varint,4,opt,name=flags,def=0" json:"flags,omitempty"` + SendTo *AddressPort `protobuf:"bytes,5,opt,name=send_to,json=sendTo" json:"send_to,omitempty"` + TimeoutSeconds *float64 `protobuf:"fixed64,6,opt,name=timeout_seconds,json=timeoutSeconds,def=-1" json:"timeout_seconds,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SendRequest) Reset() { *m = SendRequest{} } +func (m *SendRequest) String() string { return proto.CompactTextString(m) } +func (*SendRequest) ProtoMessage() {} +func (*SendRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{25} +} +func (m *SendRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SendRequest.Unmarshal(m, b) +} +func (m *SendRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SendRequest.Marshal(b, m, deterministic) +} +func (dst *SendRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SendRequest.Merge(dst, src) +} +func (m *SendRequest) XXX_Size() int { + return xxx_messageInfo_SendRequest.Size(m) +} +func (m *SendRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SendRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SendRequest proto.InternalMessageInfo + +const Default_SendRequest_Flags int32 = 0 +const Default_SendRequest_TimeoutSeconds float64 = -1 + +func (m *SendRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *SendRequest) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func (m *SendRequest) GetStreamOffset() int64 { + if m != nil && m.StreamOffset != nil { + return *m.StreamOffset + } + return 0 +} + +func (m *SendRequest) GetFlags() int32 { + if m != nil && m.Flags != nil { + return *m.Flags + } + return Default_SendRequest_Flags +} + +func (m *SendRequest) GetSendTo() *AddressPort { + if m != nil { + return m.SendTo + } + return nil +} + +func (m *SendRequest) GetTimeoutSeconds() float64 { + if m != nil && m.TimeoutSeconds != nil { + return *m.TimeoutSeconds + } + return Default_SendRequest_TimeoutSeconds +} + +type SendReply struct { + DataSent *int32 `protobuf:"varint,1,opt,name=data_sent,json=dataSent" json:"data_sent,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SendReply) Reset() { *m = SendReply{} } +func (m *SendReply) String() string { return proto.CompactTextString(m) } +func (*SendReply) ProtoMessage() {} +func (*SendReply) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{26} +} +func (m *SendReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SendReply.Unmarshal(m, b) +} +func (m *SendReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SendReply.Marshal(b, m, deterministic) +} +func (dst *SendReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_SendReply.Merge(dst, src) +} +func (m *SendReply) XXX_Size() int { + return xxx_messageInfo_SendReply.Size(m) +} +func (m *SendReply) XXX_DiscardUnknown() { + xxx_messageInfo_SendReply.DiscardUnknown(m) +} + +var xxx_messageInfo_SendReply proto.InternalMessageInfo + +func (m *SendReply) GetDataSent() int32 { + if m != nil && m.DataSent != nil { + return *m.DataSent + } + return 0 +} + +type ReceiveRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` + DataSize *int32 `protobuf:"varint,2,req,name=data_size,json=dataSize" json:"data_size,omitempty"` + Flags *int32 `protobuf:"varint,3,opt,name=flags,def=0" json:"flags,omitempty"` + TimeoutSeconds *float64 `protobuf:"fixed64,5,opt,name=timeout_seconds,json=timeoutSeconds,def=-1" json:"timeout_seconds,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReceiveRequest) Reset() { *m = ReceiveRequest{} } +func (m *ReceiveRequest) String() string { return proto.CompactTextString(m) } +func (*ReceiveRequest) ProtoMessage() {} +func (*ReceiveRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{27} +} +func (m *ReceiveRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReceiveRequest.Unmarshal(m, b) +} +func (m *ReceiveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReceiveRequest.Marshal(b, m, deterministic) +} +func (dst *ReceiveRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReceiveRequest.Merge(dst, src) +} +func (m *ReceiveRequest) XXX_Size() int { + return xxx_messageInfo_ReceiveRequest.Size(m) +} +func (m *ReceiveRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ReceiveRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ReceiveRequest proto.InternalMessageInfo + +const Default_ReceiveRequest_Flags int32 = 0 +const Default_ReceiveRequest_TimeoutSeconds float64 = -1 + +func (m *ReceiveRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *ReceiveRequest) GetDataSize() int32 { + if m != nil && m.DataSize != nil { + return *m.DataSize + } + return 0 +} + +func (m *ReceiveRequest) GetFlags() int32 { + if m != nil && m.Flags != nil { + return *m.Flags + } + return Default_ReceiveRequest_Flags +} + +func (m *ReceiveRequest) GetTimeoutSeconds() float64 { + if m != nil && m.TimeoutSeconds != nil { + return *m.TimeoutSeconds + } + return Default_ReceiveRequest_TimeoutSeconds +} + +type ReceiveReply struct { + StreamOffset *int64 `protobuf:"varint,2,opt,name=stream_offset,json=streamOffset" json:"stream_offset,omitempty"` + Data []byte `protobuf:"bytes,3,opt,name=data" json:"data,omitempty"` + ReceivedFrom *AddressPort `protobuf:"bytes,4,opt,name=received_from,json=receivedFrom" json:"received_from,omitempty"` + BufferSize *int32 `protobuf:"varint,5,opt,name=buffer_size,json=bufferSize" json:"buffer_size,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReceiveReply) Reset() { *m = ReceiveReply{} } +func (m *ReceiveReply) String() string { return proto.CompactTextString(m) } +func (*ReceiveReply) ProtoMessage() {} +func (*ReceiveReply) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{28} +} +func (m *ReceiveReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReceiveReply.Unmarshal(m, b) +} +func (m *ReceiveReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReceiveReply.Marshal(b, m, deterministic) +} +func (dst *ReceiveReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReceiveReply.Merge(dst, src) +} +func (m *ReceiveReply) XXX_Size() int { + return xxx_messageInfo_ReceiveReply.Size(m) +} +func (m *ReceiveReply) XXX_DiscardUnknown() { + xxx_messageInfo_ReceiveReply.DiscardUnknown(m) +} + +var xxx_messageInfo_ReceiveReply proto.InternalMessageInfo + +func (m *ReceiveReply) GetStreamOffset() int64 { + if m != nil && m.StreamOffset != nil { + return *m.StreamOffset + } + return 0 +} + +func (m *ReceiveReply) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func (m *ReceiveReply) GetReceivedFrom() *AddressPort { + if m != nil { + return m.ReceivedFrom + } + return nil +} + +func (m *ReceiveReply) GetBufferSize() int32 { + if m != nil && m.BufferSize != nil { + return *m.BufferSize + } + return 0 +} + +type PollEvent struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` + RequestedEvents *int32 `protobuf:"varint,2,req,name=requested_events,json=requestedEvents" json:"requested_events,omitempty"` + ObservedEvents *int32 `protobuf:"varint,3,req,name=observed_events,json=observedEvents" json:"observed_events,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PollEvent) Reset() { *m = PollEvent{} } +func (m *PollEvent) String() string { return proto.CompactTextString(m) } +func (*PollEvent) ProtoMessage() {} +func (*PollEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{29} +} +func (m *PollEvent) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PollEvent.Unmarshal(m, b) +} +func (m *PollEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PollEvent.Marshal(b, m, deterministic) +} +func (dst *PollEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_PollEvent.Merge(dst, src) +} +func (m *PollEvent) XXX_Size() int { + return xxx_messageInfo_PollEvent.Size(m) +} +func (m *PollEvent) XXX_DiscardUnknown() { + xxx_messageInfo_PollEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_PollEvent proto.InternalMessageInfo + +func (m *PollEvent) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *PollEvent) GetRequestedEvents() int32 { + if m != nil && m.RequestedEvents != nil { + return *m.RequestedEvents + } + return 0 +} + +func (m *PollEvent) GetObservedEvents() int32 { + if m != nil && m.ObservedEvents != nil { + return *m.ObservedEvents + } + return 0 +} + +type PollRequest struct { + Events []*PollEvent `protobuf:"bytes,1,rep,name=events" json:"events,omitempty"` + TimeoutSeconds *float64 `protobuf:"fixed64,2,opt,name=timeout_seconds,json=timeoutSeconds,def=-1" json:"timeout_seconds,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PollRequest) Reset() { *m = PollRequest{} } +func (m *PollRequest) String() string { return proto.CompactTextString(m) } +func (*PollRequest) ProtoMessage() {} +func (*PollRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{30} +} +func (m *PollRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PollRequest.Unmarshal(m, b) +} +func (m *PollRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PollRequest.Marshal(b, m, deterministic) +} +func (dst *PollRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_PollRequest.Merge(dst, src) +} +func (m *PollRequest) XXX_Size() int { + return xxx_messageInfo_PollRequest.Size(m) +} +func (m *PollRequest) XXX_DiscardUnknown() { + xxx_messageInfo_PollRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_PollRequest proto.InternalMessageInfo + +const Default_PollRequest_TimeoutSeconds float64 = -1 + +func (m *PollRequest) GetEvents() []*PollEvent { + if m != nil { + return m.Events + } + return nil +} + +func (m *PollRequest) GetTimeoutSeconds() float64 { + if m != nil && m.TimeoutSeconds != nil { + return *m.TimeoutSeconds + } + return Default_PollRequest_TimeoutSeconds +} + +type PollReply struct { + Events []*PollEvent `protobuf:"bytes,2,rep,name=events" json:"events,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PollReply) Reset() { *m = PollReply{} } +func (m *PollReply) String() string { return proto.CompactTextString(m) } +func (*PollReply) ProtoMessage() {} +func (*PollReply) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{31} +} +func (m *PollReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PollReply.Unmarshal(m, b) +} +func (m *PollReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PollReply.Marshal(b, m, deterministic) +} +func (dst *PollReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_PollReply.Merge(dst, src) +} +func (m *PollReply) XXX_Size() int { + return xxx_messageInfo_PollReply.Size(m) +} +func (m *PollReply) XXX_DiscardUnknown() { + xxx_messageInfo_PollReply.DiscardUnknown(m) +} + +var xxx_messageInfo_PollReply proto.InternalMessageInfo + +func (m *PollReply) GetEvents() []*PollEvent { + if m != nil { + return m.Events + } + return nil +} + +type ResolveRequest struct { + Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` + AddressFamilies []CreateSocketRequest_SocketFamily `protobuf:"varint,2,rep,name=address_families,json=addressFamilies,enum=appengine.CreateSocketRequest_SocketFamily" json:"address_families,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResolveRequest) Reset() { *m = ResolveRequest{} } +func (m *ResolveRequest) String() string { return proto.CompactTextString(m) } +func (*ResolveRequest) ProtoMessage() {} +func (*ResolveRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{32} +} +func (m *ResolveRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ResolveRequest.Unmarshal(m, b) +} +func (m *ResolveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ResolveRequest.Marshal(b, m, deterministic) +} +func (dst *ResolveRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResolveRequest.Merge(dst, src) +} +func (m *ResolveRequest) XXX_Size() int { + return xxx_messageInfo_ResolveRequest.Size(m) +} +func (m *ResolveRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ResolveRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ResolveRequest proto.InternalMessageInfo + +func (m *ResolveRequest) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *ResolveRequest) GetAddressFamilies() []CreateSocketRequest_SocketFamily { + if m != nil { + return m.AddressFamilies + } + return nil +} + +type ResolveReply struct { + PackedAddress [][]byte `protobuf:"bytes,2,rep,name=packed_address,json=packedAddress" json:"packed_address,omitempty"` + CanonicalName *string `protobuf:"bytes,3,opt,name=canonical_name,json=canonicalName" json:"canonical_name,omitempty"` + Aliases []string `protobuf:"bytes,4,rep,name=aliases" json:"aliases,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResolveReply) Reset() { *m = ResolveReply{} } +func (m *ResolveReply) String() string { return proto.CompactTextString(m) } +func (*ResolveReply) ProtoMessage() {} +func (*ResolveReply) Descriptor() ([]byte, []int) { + return fileDescriptor_socket_service_b5f8f233dc327808, []int{33} +} +func (m *ResolveReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ResolveReply.Unmarshal(m, b) +} +func (m *ResolveReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ResolveReply.Marshal(b, m, deterministic) +} +func (dst *ResolveReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResolveReply.Merge(dst, src) +} +func (m *ResolveReply) XXX_Size() int { + return xxx_messageInfo_ResolveReply.Size(m) +} +func (m *ResolveReply) XXX_DiscardUnknown() { + xxx_messageInfo_ResolveReply.DiscardUnknown(m) +} + +var xxx_messageInfo_ResolveReply proto.InternalMessageInfo + +func (m *ResolveReply) GetPackedAddress() [][]byte { + if m != nil { + return m.PackedAddress + } + return nil +} + +func (m *ResolveReply) GetCanonicalName() string { + if m != nil && m.CanonicalName != nil { + return *m.CanonicalName + } + return "" +} + +func (m *ResolveReply) GetAliases() []string { + if m != nil { + return m.Aliases + } + return nil +} + +func init() { + proto.RegisterType((*RemoteSocketServiceError)(nil), "appengine.RemoteSocketServiceError") + proto.RegisterType((*AddressPort)(nil), "appengine.AddressPort") + proto.RegisterType((*CreateSocketRequest)(nil), "appengine.CreateSocketRequest") + proto.RegisterType((*CreateSocketReply)(nil), "appengine.CreateSocketReply") + proto.RegisterType((*BindRequest)(nil), "appengine.BindRequest") + proto.RegisterType((*BindReply)(nil), "appengine.BindReply") + proto.RegisterType((*GetSocketNameRequest)(nil), "appengine.GetSocketNameRequest") + proto.RegisterType((*GetSocketNameReply)(nil), "appengine.GetSocketNameReply") + proto.RegisterType((*GetPeerNameRequest)(nil), "appengine.GetPeerNameRequest") + proto.RegisterType((*GetPeerNameReply)(nil), "appengine.GetPeerNameReply") + proto.RegisterType((*SocketOption)(nil), "appengine.SocketOption") + proto.RegisterType((*SetSocketOptionsRequest)(nil), "appengine.SetSocketOptionsRequest") + proto.RegisterType((*SetSocketOptionsReply)(nil), "appengine.SetSocketOptionsReply") + proto.RegisterType((*GetSocketOptionsRequest)(nil), "appengine.GetSocketOptionsRequest") + proto.RegisterType((*GetSocketOptionsReply)(nil), "appengine.GetSocketOptionsReply") + proto.RegisterType((*ConnectRequest)(nil), "appengine.ConnectRequest") + proto.RegisterType((*ConnectReply)(nil), "appengine.ConnectReply") + proto.RegisterType((*ListenRequest)(nil), "appengine.ListenRequest") + proto.RegisterType((*ListenReply)(nil), "appengine.ListenReply") + proto.RegisterType((*AcceptRequest)(nil), "appengine.AcceptRequest") + proto.RegisterType((*AcceptReply)(nil), "appengine.AcceptReply") + proto.RegisterType((*ShutDownRequest)(nil), "appengine.ShutDownRequest") + proto.RegisterType((*ShutDownReply)(nil), "appengine.ShutDownReply") + proto.RegisterType((*CloseRequest)(nil), "appengine.CloseRequest") + proto.RegisterType((*CloseReply)(nil), "appengine.CloseReply") + proto.RegisterType((*SendRequest)(nil), "appengine.SendRequest") + proto.RegisterType((*SendReply)(nil), "appengine.SendReply") + proto.RegisterType((*ReceiveRequest)(nil), "appengine.ReceiveRequest") + proto.RegisterType((*ReceiveReply)(nil), "appengine.ReceiveReply") + proto.RegisterType((*PollEvent)(nil), "appengine.PollEvent") + proto.RegisterType((*PollRequest)(nil), "appengine.PollRequest") + proto.RegisterType((*PollReply)(nil), "appengine.PollReply") + proto.RegisterType((*ResolveRequest)(nil), "appengine.ResolveRequest") + proto.RegisterType((*ResolveReply)(nil), "appengine.ResolveReply") +} + +func init() { + proto.RegisterFile("google.golang.org/appengine/internal/socket/socket_service.proto", fileDescriptor_socket_service_b5f8f233dc327808) +} + +var fileDescriptor_socket_service_b5f8f233dc327808 = []byte{ + // 3088 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0x5f, 0x77, 0xe3, 0xc6, + 0x75, 0x37, 0x48, 0xfd, 0xe3, 0x90, 0x94, 0xee, 0x62, 0xa5, 0x5d, 0x25, 0x6e, 0x12, 0x05, 0x8e, + 0x1b, 0x25, 0x8e, 0x77, 0x6d, 0x39, 0x4d, 0x9b, 0xa4, 0x49, 0x16, 0x04, 0x86, 0x24, 0x4c, 0x00, + 0x03, 0xcd, 0x0c, 0x25, 0xd1, 0x6d, 0x8a, 0xd0, 0x22, 0xa4, 0x65, 0x4c, 0x11, 0x0c, 0xc9, 0xdd, + 0xf5, 0xba, 0x69, 0xaa, 0xfe, 0x39, 0xfd, 0x12, 0x7d, 0xe8, 0x73, 0x3f, 0x43, 0x4f, 0x4f, 0x5f, + 0xfa, 0xec, 0xc7, 0x7e, 0x84, 0x9e, 0xbe, 0xb4, 0x9f, 0xa1, 0x67, 0x06, 0xe0, 0x60, 0xc8, 0xd5, + 0xae, 0x77, 0x75, 0x72, 0x4e, 0x9e, 0xa4, 0xfb, 0xbb, 0x77, 0xee, 0xff, 0x99, 0xb9, 0x03, 0xa2, + 0x47, 0x97, 0x69, 0x7a, 0x39, 0x4a, 0x1e, 0x5c, 0xa6, 0xa3, 0xfe, 0xf8, 0xf2, 0x41, 0x3a, 0xbd, + 0x7c, 0xd8, 0x9f, 0x4c, 0x92, 0xf1, 0xe5, 0x70, 0x9c, 0x3c, 0x1c, 0x8e, 0xe7, 0xc9, 0x74, 0xdc, + 0x1f, 0x3d, 0x9c, 0xa5, 0xe7, 0x9f, 0x25, 0xf3, 0xfc, 0x4f, 0x3c, 0x4b, 0xa6, 0x4f, 0x87, 0xe7, + 0xc9, 0x83, 0xc9, 0x34, 0x9d, 0xa7, 0x66, 0x45, 0xc9, 0x5b, 0xff, 0xbc, 0x8b, 0xf6, 0x69, 0x72, + 0x95, 0xce, 0x13, 0x26, 0x25, 0x59, 0x26, 0x88, 0xa7, 0xd3, 0x74, 0x6a, 0x7e, 0x07, 0xd5, 0x66, + 0xcf, 0x67, 0xf3, 0xe4, 0x2a, 0x4e, 0x04, 0xbd, 0x6f, 0x1c, 0x18, 0x87, 0xeb, 0x3f, 0x31, 0x3e, + 0xa0, 0xd5, 0x0c, 0xce, 0xa4, 0xbe, 0x8d, 0x6a, 0x92, 0x1d, 0x0f, 0x92, 0x79, 0x7f, 0x38, 0xda, + 0x2f, 0x1d, 0x18, 0x87, 0x15, 0x5a, 0x95, 0x98, 0x2b, 0x21, 0xeb, 0x73, 0x54, 0x91, 0xb2, 0x4e, + 0x3a, 0x48, 0x4c, 0x40, 0x35, 0xd6, 0x63, 0x1c, 0x07, 0x31, 0xa6, 0x94, 0x50, 0x30, 0xcc, 0x3a, + 0xaa, 0xb4, 0x6c, 0x2f, 0x27, 0x4b, 0x66, 0x15, 0x6d, 0x36, 0x6d, 0xcf, 0xef, 0x52, 0x0c, 0x6b, + 0xe6, 0x1e, 0xba, 0x13, 0x61, 0x1a, 0x78, 0x8c, 0x79, 0x24, 0x8c, 0x5d, 0x1c, 0x7a, 0xd8, 0x85, + 0x75, 0xf3, 0x2e, 0xda, 0xf1, 0xc2, 0x13, 0xdb, 0xf7, 0xdc, 0x98, 0xe2, 0xe3, 0x2e, 0x66, 0x1c, + 0x36, 0xcc, 0x3b, 0xa8, 0xce, 0x88, 0xd3, 0xc1, 0x3c, 0x76, 0x7c, 0xc2, 0xb0, 0x0b, 0x9b, 0xd6, + 0xbf, 0x99, 0xa8, 0xca, 0x34, 0x67, 0x77, 0x50, 0x95, 0xf5, 0x58, 0xcc, 0xba, 0x8e, 0x83, 0x19, + 0x83, 0xb7, 0x84, 0x6d, 0x01, 0x60, 0x61, 0x04, 0x0c, 0x73, 0x1b, 0x21, 0x49, 0x86, 0x04, 0x87, + 0x1c, 0x4a, 0x8a, 0xcd, 0xa8, 0xd3, 0x86, 0xb2, 0x22, 0xbd, 0x90, 0x53, 0x58, 0x13, 0x9e, 0x66, + 0x24, 0x81, 0x75, 0xc5, 0x0b, 0xcf, 0x3c, 0x02, 0x1b, 0x8a, 0x3c, 0x6a, 0x78, 0x2d, 0xd8, 0x5c, + 0x18, 0x16, 0x8a, 0xcf, 0xb0, 0x03, 0x5b, 0x8a, 0xdf, 0xb0, 0xdd, 0x26, 0x54, 0x94, 0x61, 0xa7, + 0xed, 0xf9, 0x2e, 0x20, 0x45, 0xdb, 0x2d, 0xdb, 0x0b, 0xa1, 0x2a, 0x02, 0x96, 0xf4, 0x29, 0xe9, + 0xfa, 0x6e, 0xc3, 0x27, 0x4e, 0x07, 0xaa, 0x9a, 0xb7, 0x01, 0x0e, 0xa0, 0x56, 0x2c, 0x12, 0xd1, + 0x41, 0x5d, 0xd1, 0x4d, 0xbb, 0xeb, 0x73, 0xd8, 0xd6, 0x9c, 0xe0, 0x0d, 0xbf, 0x03, 0x3b, 0x85, + 0x13, 0x5d, 0xd6, 0x03, 0x50, 0xf2, 0xf8, 0xcc, 0x63, 0x1c, 0xee, 0x28, 0xf6, 0x99, 0x8b, 0x4f, + 0xc0, 0xd4, 0xcc, 0x09, 0xfa, 0xae, 0xae, 0xce, 0xf5, 0x28, 0xec, 0x2a, 0x01, 0x8f, 0x09, 0x7a, + 0xaf, 0xa0, 0x45, 0xa9, 0xe0, 0x5e, 0xa1, 0xa0, 0xe9, 0xf9, 0x18, 0xee, 0x2b, 0x3a, 0x90, 0xf4, + 0xbe, 0x66, 0x80, 0xf3, 0x1e, 0x7c, 0x4d, 0x19, 0xe0, 0x67, 0xbc, 0xc1, 0x7a, 0xf0, 0x75, 0xe5, + 0x50, 0x53, 0x24, 0xf5, 0x6d, 0x4d, 0x9e, 0x45, 0x0e, 0xfc, 0x91, 0xa2, 0x59, 0xe4, 0x45, 0x18, + 0xbe, 0xa1, 0xc4, 0x29, 0x69, 0x32, 0xf8, 0x66, 0x61, 0xce, 0xf7, 0xc2, 0x0e, 0x7c, 0xab, 0xa8, + 0xbd, 0x90, 0x3e, 0x30, 0x6b, 0x68, 0x4b, 0x92, 0x2e, 0x09, 0xe0, 0xdb, 0x4a, 0x98, 0xda, 0x61, + 0x0b, 0x83, 0xa5, 0x7c, 0x71, 0xb1, 0xed, 0xfa, 0x1d, 0x78, 0x47, 0x76, 0x9b, 0x02, 0x44, 0x3d, + 0xde, 0x31, 0x77, 0x11, 0x64, 0xfe, 0xd8, 0x01, 0xe6, 0x84, 0xf8, 0x24, 0x6c, 0xc1, 0x77, 0x34, + 0x2f, 0x7d, 0xa7, 0x03, 0xef, 0xea, 0x5e, 0xf7, 0x18, 0xfc, 0xb1, 0x52, 0x14, 0x12, 0x8e, 0x83, + 0x88, 0xf7, 0xe0, 0xbb, 0xca, 0x33, 0x9f, 0x90, 0x08, 0x0e, 0xf5, 0x3a, 0xb3, 0x16, 0x7c, 0xbf, + 0x68, 0x43, 0x97, 0x06, 0xf0, 0x9e, 0xd6, 0x3b, 0x34, 0x6c, 0xc1, 0x0f, 0xf2, 0x1d, 0x16, 0x63, + 0xff, 0x28, 0x64, 0xbd, 0xd0, 0x81, 0xf7, 0x95, 0x84, 0xff, 0x51, 0xdb, 0xe7, 0xf0, 0x40, 0xa3, + 0x29, 0xe3, 0xf0, 0xb0, 0xa0, 0x43, 0xa1, 0xe1, 0x03, 0x15, 0x6c, 0x37, 0xb4, 0xb9, 0xd3, 0x86, + 0x0f, 0x35, 0x0f, 0x1c, 0xe6, 0xc1, 0x51, 0xb1, 0xe0, 0x48, 0x28, 0xfc, 0x48, 0xef, 0x66, 0x0c, + 0x3f, 0xd4, 0x49, 0x0a, 0x7f, 0xa2, 0xa4, 0xcf, 0x9a, 0x5d, 0xdf, 0x87, 0x1f, 0x69, 0xda, 0xec, + 0x90, 0xc0, 0x9f, 0x2a, 0x73, 0x42, 0xfc, 0xd8, 0x81, 0x3f, 0xd3, 0x01, 0xe6, 0x73, 0xf8, 0xb1, + 0x5a, 0xd1, 0x68, 0x92, 0x90, 0xc3, 0x4f, 0xf5, 0x1c, 0x72, 0x0a, 0x7f, 0xae, 0xb5, 0xa2, 0x6b, + 0x73, 0x1b, 0x7e, 0xa6, 0x3c, 0xe0, 0x5e, 0x80, 0xe1, 0xe7, 0xc5, 0xe6, 0x24, 0x8c, 0xc2, 0x2f, + 0xb4, 0xe5, 0x21, 0xe6, 0xf0, 0x48, 0xa3, 0xa3, 0x4e, 0x0b, 0x6c, 0xa5, 0x8e, 0xe2, 0x80, 0x70, + 0x0c, 0x0d, 0x4d, 0xbf, 0xec, 0x1d, 0x47, 0x35, 0x8b, 0xed, 0x9e, 0x80, 0x5b, 0x34, 0x1e, 0x0d, + 0x42, 0x0e, 0x58, 0x99, 0x73, 0x48, 0x10, 0x40, 0x53, 0xb1, 0x23, 0x4a, 0x38, 0x81, 0x96, 0xaa, + 0x78, 0xd0, 0xf5, 0xb9, 0xd7, 0x26, 0x11, 0xb4, 0x8b, 0xf6, 0x22, 0xdc, 0x25, 0x1c, 0x3c, 0x3d, + 0x05, 0xa2, 0xe8, 0x1f, 0xab, 0x45, 0xe4, 0x04, 0xd3, 0xa6, 0x4f, 0x4e, 0xa1, 0xa3, 0x0a, 0x1d, + 0x12, 0xde, 0x0d, 0xbd, 0x63, 0xf0, 0x8b, 0x3c, 0xd9, 0x6e, 0xd3, 0x85, 0x40, 0x0f, 0xc4, 0x69, + 0xb7, 0x20, 0x54, 0x80, 0xef, 0x35, 0x6c, 0xc7, 0x01, 0xa2, 0x03, 0x0d, 0xdb, 0x85, 0x48, 0x07, + 0x98, 0x13, 0xc2, 0xb1, 0x0e, 0x04, 0xf6, 0x19, 0xd0, 0xa2, 0xbf, 0xbc, 0x86, 0x3c, 0xcc, 0x58, + 0xb1, 0xd1, 0x7d, 0x86, 0x8f, 0x81, 0x2b, 0x09, 0x8a, 0x19, 0xb7, 0x29, 0x87, 0xae, 0x42, 0x18, + 0xa7, 0x72, 0xbb, 0x9d, 0xa8, 0x35, 0x5d, 0x86, 0x29, 0x83, 0x53, 0x3d, 0x18, 0x71, 0x8a, 0xc3, + 0x99, 0xda, 0x4e, 0xae, 0xd0, 0xe2, 0xba, 0x94, 0xe2, 0x63, 0xe8, 0x29, 0xb9, 0x80, 0xb5, 0x98, + 0xf7, 0x09, 0x86, 0x4f, 0x4c, 0x13, 0x6d, 0x17, 0xe9, 0xe5, 0xbd, 0x08, 0xc3, 0x5f, 0xa8, 0xf3, + 0x32, 0x24, 0x12, 0x25, 0x11, 0x87, 0xbf, 0x34, 0xef, 0xa3, 0xbb, 0x85, 0x60, 0x48, 0x58, 0x37, + 0x8a, 0x08, 0xe5, 0xf0, 0x4b, 0xc5, 0x10, 0x86, 0x79, 0xc1, 0xf8, 0x2b, 0xa5, 0x9a, 0x44, 0xc2, + 0xad, 0x6e, 0x14, 0x41, 0xac, 0x1f, 0x7b, 0xac, 0x2b, 0x80, 0x85, 0x9f, 0x51, 0xb3, 0x58, 0xfa, + 0x2b, 0x85, 0xda, 0x1a, 0xda, 0x57, 0x0a, 0x45, 0x3c, 0x5e, 0xd8, 0x65, 0x18, 0x3e, 0x15, 0x77, + 0x9c, 0xc2, 0x42, 0xc2, 0xed, 0x13, 0xdb, 0xf3, 0xe1, 0xbc, 0x48, 0x08, 0xe6, 0x2e, 0x39, 0x0d, + 0x61, 0x50, 0x04, 0x85, 0x79, 0x37, 0xa4, 0xd8, 0x76, 0xda, 0x90, 0x14, 0xc7, 0x07, 0xe6, 0x14, + 0x33, 0xcc, 0xe1, 0x42, 0x99, 0x76, 0x48, 0x18, 0xda, 0x0d, 0x42, 0x39, 0x76, 0xe1, 0x52, 0x99, + 0x16, 0x68, 0x26, 0xf9, 0x58, 0x8b, 0xa5, 0xd1, 0x6d, 0x32, 0x18, 0x2a, 0xc0, 0x63, 0x42, 0x0c, + 0x7e, 0xad, 0x97, 0x45, 0x22, 0x9f, 0x29, 0x83, 0xac, 0xdd, 0xcd, 0x1c, 0x1b, 0x29, 0x83, 0x9c, + 0x90, 0xc0, 0x0e, 0x7b, 0x14, 0x37, 0x19, 0x5c, 0x29, 0x41, 0xb1, 0x07, 0x5d, 0xd2, 0xe5, 0x30, + 0x5e, 0xf2, 0x8c, 0xe2, 0x66, 0x57, 0xdc, 0xd2, 0xa9, 0x12, 0x6c, 0x13, 0x96, 0x69, 0x9c, 0x28, + 0x41, 0x01, 0x2d, 0x62, 0xfd, 0x8d, 0x72, 0xc6, 0xf6, 0x29, 0xb6, 0xdd, 0x1e, 0x4c, 0x55, 0x4a, + 0xbc, 0x30, 0xa2, 0xa4, 0x45, 0xc5, 0xa5, 0x3e, 0x2b, 0xb6, 0x23, 0xb7, 0x7d, 0x0c, 0xf3, 0xe2, + 0x38, 0x73, 0x7c, 0x6c, 0x87, 0xf0, 0x44, 0x2f, 0x61, 0x68, 0x07, 0xf0, 0xb4, 0x00, 0xb2, 0xe4, + 0x3f, 0xd3, 0xae, 0x32, 0x21, 0xf0, 0xb9, 0x72, 0x31, 0x3b, 0x11, 0x3c, 0x02, 0xcf, 0x95, 0x88, + 0x7b, 0xdc, 0x25, 0x1c, 0xbe, 0xd0, 0xce, 0xf1, 0x00, 0xbb, 0x5e, 0x37, 0x80, 0xbf, 0x56, 0xde, + 0x65, 0x80, 0x6c, 0xcd, 0xdf, 0x2a, 0x39, 0xc7, 0x0e, 0x1d, 0xec, 0x63, 0x17, 0xfe, 0x46, 0x3b, + 0x7f, 0x3a, 0xb8, 0x07, 0xbf, 0x53, 0xeb, 0x3a, 0xb8, 0x87, 0xcf, 0x22, 0x8f, 0x62, 0x17, 0xfe, + 0xd6, 0xdc, 0x2d, 0x40, 0x8a, 0x4f, 0x48, 0x07, 0xbb, 0x70, 0x6d, 0x98, 0x7b, 0x79, 0xa2, 0x24, + 0xfa, 0x31, 0x76, 0x44, 0xad, 0xff, 0xce, 0x30, 0xef, 0x2e, 0x1a, 0xf7, 0x34, 0xc4, 0x54, 0x5c, + 0x51, 0xf0, 0xf7, 0x86, 0xb9, 0x9f, 0xb7, 0x79, 0x48, 0x38, 0xc5, 0x8e, 0x38, 0x48, 0xec, 0x86, + 0x8f, 0xe1, 0x1f, 0x0c, 0x13, 0x16, 0xe7, 0x44, 0xb3, 0xe3, 0xf9, 0x3e, 0xfc, 0xa3, 0xf1, 0xf5, + 0x12, 0x18, 0xd6, 0x15, 0xaa, 0xda, 0x83, 0xc1, 0x34, 0x99, 0xcd, 0xa2, 0x74, 0x3a, 0x37, 0x4d, + 0xb4, 0x36, 0x49, 0xa7, 0xf3, 0x7d, 0xe3, 0xa0, 0x74, 0xb8, 0x4e, 0xe5, 0xff, 0xe6, 0xbb, 0x68, + 0x7b, 0xd2, 0x3f, 0xff, 0x2c, 0x19, 0xc4, 0xfd, 0x4c, 0x52, 0xce, 0x7f, 0x35, 0x5a, 0xcf, 0xd0, + 0x7c, 0xb9, 0xf9, 0x0e, 0xaa, 0x3f, 0x4e, 0x67, 0xf3, 0x71, 0xff, 0x2a, 0x89, 0x1f, 0x0f, 0xc7, + 0xf3, 0xfd, 0xb2, 0x9c, 0x12, 0x6b, 0x0b, 0xb0, 0x3d, 0x1c, 0xcf, 0xad, 0x7f, 0x5a, 0x43, 0x77, + 0x9d, 0x69, 0xd2, 0x5f, 0x0c, 0xa3, 0x34, 0xf9, 0xcd, 0x93, 0x64, 0x36, 0x37, 0x1d, 0xb4, 0x71, + 0xd1, 0xbf, 0x1a, 0x8e, 0x9e, 0x4b, 0xcb, 0xdb, 0x47, 0xef, 0x3d, 0x50, 0x03, 0xec, 0x83, 0x1b, + 0xe4, 0x1f, 0x64, 0x54, 0x53, 0x2e, 0xa1, 0xf9, 0x52, 0xd3, 0x43, 0x5b, 0x72, 0xfa, 0x3d, 0x4f, + 0xc5, 0x88, 0x2a, 0xd4, 0xbc, 0xff, 0x5a, 0x6a, 0xa2, 0x7c, 0x11, 0x55, 0xcb, 0xcd, 0x9f, 0xa3, + 0xed, 0x7c, 0xae, 0x4e, 0x27, 0xf3, 0x61, 0x3a, 0x9e, 0xed, 0x97, 0x0f, 0xca, 0x87, 0xd5, 0xa3, + 0xfb, 0x9a, 0xc2, 0x6c, 0x31, 0x91, 0x7c, 0x5a, 0x9f, 0x69, 0xd4, 0xcc, 0x6c, 0xa0, 0x3b, 0x93, + 0x69, 0xfa, 0xf9, 0xf3, 0x38, 0xf9, 0x3c, 0x9b, 0xd6, 0xe3, 0xe1, 0x64, 0x7f, 0xed, 0xc0, 0x38, + 0xac, 0x1e, 0xdd, 0xd3, 0x54, 0x68, 0xa9, 0xa7, 0x3b, 0x72, 0x01, 0xce, 0xe5, 0xbd, 0x89, 0x79, + 0x88, 0xb6, 0x47, 0xc3, 0xd9, 0x3c, 0x19, 0xc7, 0x9f, 0xf6, 0xcf, 0x3f, 0x1b, 0xa5, 0x97, 0xfb, + 0xeb, 0x8b, 0xe9, 0xbc, 0x9e, 0x31, 0x1a, 0x19, 0x6e, 0x7e, 0x84, 0x2a, 0x53, 0x39, 0xe1, 0x0b, + 0x2b, 0x1b, 0xaf, 0xb4, 0xb2, 0x95, 0x09, 0x7a, 0x13, 0x73, 0x0f, 0x6d, 0xf4, 0x27, 0x93, 0x78, + 0x38, 0xd8, 0xaf, 0xc8, 0x42, 0xad, 0xf7, 0x27, 0x13, 0x6f, 0x60, 0x7e, 0x03, 0xa1, 0xc9, 0x34, + 0xfd, 0x75, 0x72, 0x3e, 0x17, 0x2c, 0x74, 0x60, 0x1c, 0x96, 0x69, 0x25, 0x47, 0xbc, 0x81, 0x65, + 0xa1, 0x9a, 0x9e, 0x7b, 0x73, 0x0b, 0xad, 0x79, 0xd1, 0xd3, 0x1f, 0x82, 0x91, 0xff, 0xf7, 0x23, + 0x28, 0x59, 0x16, 0xda, 0x5e, 0x4e, 0xac, 0xb9, 0x89, 0xca, 0xdc, 0x89, 0xc0, 0x10, 0xff, 0x74, + 0xdd, 0x08, 0x4a, 0xd6, 0x97, 0x06, 0xba, 0xb3, 0x5c, 0x91, 0xc9, 0xe8, 0xb9, 0xf9, 0x1e, 0xba, + 0x93, 0xa7, 0x7d, 0x90, 0xcc, 0xce, 0xa7, 0xc3, 0xc9, 0x3c, 0x7f, 0x93, 0x54, 0x28, 0x64, 0x0c, + 0x57, 0xe1, 0xe6, 0xcf, 0xd0, 0xb6, 0x78, 0xf4, 0x24, 0x53, 0xd5, 0x97, 0xe5, 0x57, 0x86, 0x5e, + 0xcf, 0xa4, 0x17, 0xfd, 0xfa, 0x7b, 0x28, 0xd1, 0xf7, 0x2b, 0x5b, 0xff, 0xb3, 0x09, 0xd7, 0xd7, + 0xd7, 0xd7, 0x25, 0xeb, 0x77, 0xa8, 0xda, 0x18, 0x8e, 0x07, 0x8b, 0x86, 0x7e, 0x49, 0x24, 0xa5, + 0x1b, 0x23, 0xb9, 0xd1, 0x15, 0xd1, 0xc1, 0xaf, 0xef, 0x8a, 0x45, 0x50, 0x25, 0xb3, 0x2f, 0xf2, + 0x78, 0xa3, 0x42, 0xe3, 0x8d, 0x62, 0xb3, 0x1c, 0xb4, 0xdb, 0x4a, 0xe6, 0x59, 0x75, 0xc2, 0xfe, + 0x55, 0x72, 0x9b, 0xc8, 0xac, 0x33, 0x64, 0xae, 0x28, 0x79, 0xa9, 0x7b, 0xa5, 0x37, 0x73, 0xcf, + 0x96, 0x9a, 0xa3, 0x24, 0x99, 0xde, 0xda, 0x39, 0x07, 0xc1, 0x92, 0x0a, 0xe1, 0xda, 0x43, 0xb4, + 0x39, 0x49, 0x92, 0xe9, 0x57, 0x3b, 0xb4, 0x21, 0xc4, 0xbc, 0x89, 0xf5, 0xe5, 0xe6, 0x62, 0x47, + 0x64, 0x7b, 0xdf, 0xfc, 0x05, 0x5a, 0x1f, 0x25, 0x4f, 0x93, 0x51, 0x7e, 0x92, 0x7d, 0xef, 0x25, + 0x27, 0xc6, 0x12, 0xe1, 0x8b, 0x05, 0x34, 0x5b, 0x67, 0x3e, 0x42, 0x1b, 0xd9, 0xa1, 0x93, 0x1f, + 0x62, 0x87, 0xaf, 0xa3, 0x41, 0x46, 0x90, 0xaf, 0x33, 0x77, 0xd1, 0xfa, 0xd3, 0xfe, 0xe8, 0x49, + 0xb2, 0x5f, 0x3e, 0x28, 0x1d, 0xd6, 0x68, 0x46, 0x58, 0x09, 0xba, 0xf3, 0x82, 0x4d, 0xed, 0x41, + 0xcd, 0x88, 0x1f, 0x7b, 0x11, 0xbc, 0x25, 0x67, 0x95, 0x02, 0xca, 0xfe, 0x05, 0x43, 0xce, 0x16, + 0x05, 0x2c, 0xb6, 0xf3, 0xc6, 0x0a, 0x26, 0x76, 0xf6, 0x1d, 0xeb, 0xdf, 0xd7, 0x11, 0xac, 0x7a, + 0x26, 0x6f, 0xbb, 0x85, 0x60, 0xec, 0xe2, 0x46, 0xb7, 0x05, 0x86, 0x1c, 0xc9, 0x14, 0x48, 0xc5, + 0x94, 0x28, 0xc6, 0x23, 0x28, 0x2d, 0xa9, 0x8d, 0xe5, 0x95, 0x5a, 0x5e, 0xd6, 0x90, 0x7d, 0x47, + 0x58, 0x5b, 0xd6, 0xe0, 0x92, 0x90, 0x53, 0xd2, 0xe5, 0x18, 0xd6, 0x97, 0x19, 0x0d, 0x4a, 0x6c, + 0xd7, 0xb1, 0xe5, 0x07, 0x04, 0x31, 0x74, 0x28, 0x06, 0x0b, 0xdd, 0x46, 0xb7, 0x09, 0x9b, 0xcb, + 0x28, 0x75, 0x4e, 0x04, 0xba, 0xb5, 0xac, 0xa4, 0x83, 0x71, 0x64, 0xfb, 0xde, 0x09, 0x86, 0xca, + 0x32, 0x83, 0x90, 0x86, 0x17, 0xfa, 0x5e, 0x88, 0x01, 0x2d, 0xeb, 0xf1, 0xbd, 0xb0, 0x85, 0x29, + 0xd4, 0xcd, 0x7b, 0xc8, 0x5c, 0xd2, 0x2e, 0x86, 0x25, 0x02, 0xbb, 0xcb, 0x38, 0x0b, 0xdd, 0x0c, + 0xdf, 0xd3, 0x6a, 0xe2, 0x45, 0x31, 0x27, 0x0c, 0x8c, 0x15, 0x88, 0xfb, 0x50, 0xd2, 0xca, 0xe4, + 0x45, 0x71, 0x5b, 0x8c, 0x9a, 0x8e, 0x0f, 0xe5, 0x65, 0x98, 0x44, 0xdc, 0x23, 0x21, 0x83, 0x35, + 0xcd, 0x16, 0x77, 0xa2, 0x58, 0x3c, 0xef, 0x7d, 0xbb, 0x07, 0x86, 0x26, 0x2e, 0xf0, 0xc0, 0x3e, + 0x63, 0xb8, 0x05, 0x25, 0x2d, 0xdb, 0x02, 0x76, 0x08, 0xed, 0x40, 0x59, 0x0b, 0x5b, 0x80, 0x22, + 0x21, 0x9e, 0xeb, 0x63, 0x58, 0x33, 0xf7, 0xd1, 0xee, 0x2a, 0x23, 0xe4, 0x27, 0x3e, 0xac, 0xaf, + 0x98, 0x15, 0x1c, 0x27, 0x14, 0x65, 0x58, 0x36, 0x2b, 0x9e, 0xb0, 0x21, 0x87, 0xcd, 0x15, 0xf1, + 0x2c, 0x81, 0x47, 0xb0, 0x65, 0xbe, 0x8d, 0xee, 0x6b, 0xb8, 0x8b, 0x9b, 0x98, 0xc6, 0xb6, 0xe3, + 0xe0, 0x88, 0x43, 0x65, 0x85, 0x79, 0xea, 0x85, 0x2e, 0x39, 0x8d, 0x1d, 0xdf, 0x0e, 0x22, 0x40, + 0x2b, 0x81, 0x78, 0x61, 0x93, 0x40, 0x75, 0x25, 0x90, 0xe3, 0xae, 0xe7, 0x74, 0x6c, 0xa7, 0x03, + 0x35, 0x39, 0x11, 0x3d, 0x47, 0xf7, 0xd9, 0xe2, 0xc8, 0xca, 0xaf, 0xf3, 0x5b, 0x1d, 0xea, 0x1f, + 0xa2, 0xcd, 0xc5, 0xec, 0x50, 0x7a, 0xf5, 0xec, 0xb0, 0x90, 0xb3, 0xee, 0xa3, 0xbd, 0x17, 0x4d, + 0x4f, 0x46, 0xcf, 0x85, 0x4f, 0xad, 0x3f, 0x90, 0x4f, 0x1f, 0xa3, 0xbd, 0xd6, 0x4d, 0x3e, 0xdd, + 0x46, 0xd7, 0xbf, 0x18, 0x68, 0xdb, 0x49, 0xc7, 0xe3, 0xe4, 0x7c, 0x7e, 0x2b, 0xf7, 0x97, 0xe6, + 0x9c, 0x57, 0xdf, 0x8f, 0xc5, 0x9c, 0xf3, 0x1e, 0xda, 0x99, 0x0f, 0xaf, 0x92, 0xf4, 0xc9, 0x3c, + 0x9e, 0x25, 0xe7, 0xe9, 0x78, 0x90, 0xcd, 0x09, 0xc6, 0x4f, 0x4a, 0xef, 0x7f, 0x48, 0xb7, 0x73, + 0x16, 0xcb, 0x38, 0xd6, 0x2f, 0x51, 0x4d, 0x39, 0xf8, 0x7b, 0xba, 0x48, 0xf5, 0x21, 0xe1, 0x04, + 0xd5, 0x7d, 0x39, 0xb9, 0xdd, 0x2a, 0xfc, 0x7d, 0xb4, 0xb9, 0x98, 0x04, 0x4b, 0x72, 0x3e, 0x5f, + 0x90, 0x56, 0x1d, 0x55, 0x17, 0x7a, 0x45, 0xbb, 0x0c, 0x51, 0xdd, 0x3e, 0x3f, 0x4f, 0x26, 0xb7, + 0xcb, 0xf2, 0x0d, 0x09, 0x2b, 0xbd, 0x34, 0x61, 0xd7, 0x06, 0xaa, 0x2e, 0x6c, 0x89, 0x84, 0x1d, + 0xa1, 0xbd, 0x71, 0xf2, 0x2c, 0x7e, 0xd1, 0x5a, 0xf6, 0x66, 0xb8, 0x3b, 0x4e, 0x9e, 0xb1, 0x1b, + 0x06, 0xb9, 0xbc, 0xac, 0xaf, 0x39, 0xc8, 0x65, 0xd2, 0x39, 0x64, 0xfd, 0x97, 0x81, 0x76, 0xd8, + 0xe3, 0x27, 0x73, 0x37, 0x7d, 0x76, 0xbb, 0xbc, 0x7e, 0x80, 0xca, 0x8f, 0xd3, 0x67, 0xf9, 0x6d, + 0xfb, 0x4d, 0xbd, 0x8b, 0x97, 0xb5, 0x3e, 0x68, 0xa7, 0xcf, 0xa8, 0x10, 0x35, 0xbf, 0x85, 0xaa, + 0xb3, 0x64, 0x3c, 0x88, 0xd3, 0x8b, 0x8b, 0x59, 0x32, 0x97, 0xd7, 0x6c, 0x99, 0x22, 0x01, 0x11, + 0x89, 0x58, 0x0e, 0x2a, 0xb7, 0xd3, 0x67, 0xfa, 0x45, 0xd6, 0xee, 0xf2, 0x98, 0xba, 0xcb, 0xf7, + 0xa8, 0xc0, 0x4e, 0xc5, 0x85, 0xa7, 0xdd, 0x1b, 0x99, 0xdc, 0x29, 0x85, 0xb2, 0xb5, 0x83, 0xea, + 0x85, 0x07, 0xa2, 0xae, 0xbf, 0x42, 0x35, 0x67, 0x94, 0xce, 0x6e, 0x35, 0xed, 0x98, 0xef, 0x2c, + 0xfb, 0x2c, 0xea, 0x51, 0x96, 0x25, 0xd5, 0xfd, 0xae, 0x21, 0x94, 0x5b, 0x10, 0xf6, 0xfe, 0xcf, + 0x40, 0x55, 0x96, 0xdc, 0x72, 0xa8, 0xbd, 0x87, 0xd6, 0x06, 0xfd, 0x79, 0x5f, 0xa6, 0xb5, 0xd6, + 0x28, 0x6d, 0x19, 0x54, 0xd2, 0xe2, 0x9d, 0x38, 0x9b, 0x4f, 0x93, 0xfe, 0xd5, 0x72, 0xf6, 0x6a, + 0x19, 0x98, 0xf9, 0x61, 0xde, 0x47, 0xeb, 0x17, 0xa3, 0xfe, 0xe5, 0x4c, 0x0e, 0xe4, 0xf2, 0xc9, + 0x93, 0xd1, 0x62, 0x3e, 0x93, 0x51, 0xcc, 0x53, 0xf9, 0x1a, 0x7a, 0xc5, 0x7c, 0x26, 0xc4, 0x78, + 0x7a, 0x53, 0x37, 0x6f, 0xbc, 0xb4, 0x9b, 0x0f, 0x51, 0x25, 0x8b, 0x57, 0xb4, 0xf2, 0xdb, 0xa8, + 0x22, 0x1c, 0x8e, 0x67, 0xc9, 0x78, 0x9e, 0xfd, 0x30, 0x42, 0xb7, 0x04, 0xc0, 0x92, 0xf1, 0xdc, + 0xfa, 0x4f, 0x03, 0x6d, 0xd3, 0xe4, 0x3c, 0x19, 0x3e, 0xbd, 0x5d, 0x35, 0x94, 0xf2, 0xe1, 0x17, + 0x49, 0xbe, 0x9b, 0x33, 0xe5, 0xc3, 0x2f, 0x92, 0x22, 0xfa, 0xf2, 0x4a, 0xf4, 0x37, 0x04, 0xb3, + 0xfe, 0xd2, 0x60, 0x2c, 0xb4, 0xde, 0x94, 0xab, 0xaa, 0x68, 0x33, 0x60, 0x2d, 0x31, 0xa8, 0x80, + 0x61, 0xd6, 0xd0, 0x96, 0x20, 0x22, 0x8c, 0x3b, 0x50, 0xb2, 0xfe, 0xd5, 0x40, 0x35, 0x15, 0x86, + 0x08, 0xfa, 0x85, 0xea, 0xc8, 0x3e, 0x59, 0xa9, 0xce, 0xa2, 0xb4, 0xc2, 0x3d, 0xbd, 0xb4, 0x3f, + 0x45, 0xf5, 0x69, 0xa6, 0x6c, 0x10, 0x5f, 0x4c, 0xd3, 0xab, 0xaf, 0x78, 0x4e, 0xd5, 0x16, 0xc2, + 0xcd, 0x69, 0x7a, 0x25, 0xf6, 0xd4, 0xa7, 0x4f, 0x2e, 0x2e, 0x92, 0x69, 0x96, 0x13, 0xf9, 0xd6, + 0xa5, 0x28, 0x83, 0x44, 0x56, 0xac, 0x2f, 0xcb, 0xa8, 0x12, 0xa5, 0xa3, 0x11, 0x7e, 0x9a, 0x8c, + 0xdf, 0x30, 0xdb, 0xdf, 0x43, 0x30, 0xcd, 0xaa, 0x94, 0x0c, 0xe2, 0x44, 0xac, 0x9f, 0xe5, 0x49, + 0xdf, 0x51, 0xb8, 0x54, 0x3b, 0x33, 0xbf, 0x8b, 0x76, 0xd2, 0x4f, 0xe5, 0x4b, 0x51, 0x49, 0x96, + 0xa5, 0xe4, 0xf6, 0x02, 0xce, 0x04, 0xad, 0xff, 0x28, 0xa1, 0xba, 0x72, 0x47, 0x24, 0x5a, 0x9b, + 0x35, 0x22, 0xe2, 0xfb, 0x21, 0x09, 0x31, 0xbc, 0xa5, 0x4d, 0x6e, 0x02, 0xf4, 0xc2, 0xa5, 0x13, + 0x40, 0x40, 0x11, 0xf5, 0x96, 0x46, 0x5e, 0x81, 0x91, 0x2e, 0x87, 0xb5, 0x15, 0x0c, 0x53, 0x0a, + 0x5b, 0x2b, 0x58, 0xbb, 0x1b, 0x01, 0xac, 0xda, 0x3d, 0xb1, 0x7d, 0x38, 0xd0, 0x26, 0x2c, 0x01, + 0x52, 0x37, 0x24, 0x34, 0x80, 0x47, 0xe6, 0xbd, 0x15, 0xb8, 0x61, 0x87, 0xf2, 0x1b, 0xd3, 0x32, + 0x7e, 0x4a, 0xa5, 0xf8, 0x75, 0xe9, 0x05, 0x3c, 0x93, 0x5f, 0x93, 0x1f, 0x9f, 0x0a, 0x3c, 0x60, + 0x2d, 0xb8, 0xde, 0x5a, 0x55, 0x8e, 0x03, 0x72, 0x82, 0xe1, 0xfa, 0x40, 0x7e, 0xc0, 0xd2, 0x8d, + 0x0a, 0xb7, 0xaf, 0x1f, 0x59, 0x8f, 0x51, 0x55, 0x24, 0x70, 0xb1, 0x7f, 0x7e, 0x80, 0x36, 0xf2, + 0x84, 0x1b, 0x72, 0x9e, 0xd8, 0xd5, 0xda, 0x46, 0x25, 0x9a, 0xe6, 0x32, 0x6f, 0x76, 0x4b, 0xfd, + 0x38, 0xeb, 0x9c, 0xac, 0xc5, 0x0b, 0x3b, 0xa5, 0xaf, 0xb6, 0x63, 0xfd, 0x56, 0xec, 0xf3, 0x59, + 0x3a, 0x2a, 0xf6, 0xb9, 0x89, 0xd6, 0xc6, 0xfd, 0xab, 0x24, 0x6f, 0x36, 0xf9, 0xbf, 0x79, 0x82, + 0x20, 0xbf, 0xbb, 0x62, 0xf9, 0x31, 0x6a, 0x98, 0x64, 0xda, 0xdf, 0xf0, 0x4b, 0xd6, 0x4e, 0xae, + 0xa4, 0x99, 0xeb, 0xb0, 0xfe, 0xbb, 0x2c, 0xf6, 0x67, 0x6e, 0x5e, 0x38, 0x7f, 0xd3, 0xc7, 0xb8, + 0xf2, 0x8b, 0x1f, 0xe3, 0xde, 0x45, 0xdb, 0xe7, 0xfd, 0x71, 0x3a, 0x1e, 0x9e, 0xf7, 0x47, 0xb1, + 0xf4, 0x36, 0xfb, 0x1a, 0x57, 0x57, 0xa8, 0x7c, 0x96, 0xed, 0xa3, 0xcd, 0xfe, 0x68, 0xd8, 0x9f, + 0x25, 0xe2, 0xa0, 0x2d, 0x1f, 0x56, 0xe8, 0x82, 0xb4, 0xfe, 0xb7, 0xa4, 0xff, 0xa0, 0xfb, 0x35, + 0xb4, 0x97, 0x17, 0x10, 0xdb, 0x5e, 0x2c, 0x5e, 0x69, 0x4d, 0x3b, 0xf0, 0x7c, 0xf1, 0x80, 0x28, + 0xae, 0x2e, 0xc9, 0x92, 0xbf, 0x65, 0x96, 0xb4, 0x09, 0x5b, 0xa0, 0x0d, 0xdb, 0x6d, 0xfa, 0x76, + 0x8b, 0x2d, 0x3d, 0xe3, 0x04, 0xa3, 0x69, 0x7b, 0x7e, 0xf6, 0x0b, 0xf0, 0x12, 0x28, 0x55, 0xaf, + 0xaf, 0xc0, 0x01, 0x0e, 0x08, 0xed, 0x2d, 0xbd, 0x1d, 0x04, 0x9c, 0xff, 0x1c, 0xb4, 0xf9, 0x02, + 0x1c, 0xda, 0x01, 0x86, 0x2d, 0xed, 0x49, 0x21, 0x60, 0x86, 0xe9, 0x89, 0xe7, 0x2c, 0xbf, 0xe1, + 0x24, 0x4e, 0x9c, 0x8e, 0x7c, 0x68, 0xa2, 0x15, 0x3d, 0xd9, 0xef, 0xd8, 0x4b, 0x6f, 0x86, 0x3c, + 0xa2, 0xb6, 0x17, 0x72, 0x06, 0xb5, 0x15, 0x86, 0xfc, 0xdd, 0xc1, 0x21, 0x3e, 0xd4, 0x57, 0x18, + 0xea, 0x37, 0x9d, 0x6d, 0x6d, 0x0f, 0xcb, 0xb8, 0xec, 0x33, 0xd8, 0x69, 0x6c, 0x7d, 0xb2, 0x91, + 0x9d, 0x5a, 0xff, 0x1f, 0x00, 0x00, 0xff, 0xff, 0x31, 0x03, 0x4e, 0xbd, 0xfd, 0x1f, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/appengine/internal/transaction.go b/vendor/google.golang.org/appengine/internal/transaction.go new file mode 100644 index 000000000..9006ae653 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/transaction.go @@ -0,0 +1,115 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package internal + +// This file implements hooks for applying datastore transactions. + +import ( + "errors" + "reflect" + + "github.com/golang/protobuf/proto" + netcontext "golang.org/x/net/context" + + basepb "google.golang.org/appengine/internal/base" + pb "google.golang.org/appengine/internal/datastore" +) + +var transactionSetters = make(map[reflect.Type]reflect.Value) + +// RegisterTransactionSetter registers a function that sets transaction information +// in a protocol buffer message. f should be a function with two arguments, +// the first being a protocol buffer type, and the second being *datastore.Transaction. +func RegisterTransactionSetter(f interface{}) { + v := reflect.ValueOf(f) + transactionSetters[v.Type().In(0)] = v +} + +// applyTransaction applies the transaction t to message pb +// by using the relevant setter passed to RegisterTransactionSetter. +func applyTransaction(pb proto.Message, t *pb.Transaction) { + v := reflect.ValueOf(pb) + if f, ok := transactionSetters[v.Type()]; ok { + f.Call([]reflect.Value{v, reflect.ValueOf(t)}) + } +} + +var transactionKey = "used for *Transaction" + +func transactionFromContext(ctx netcontext.Context) *transaction { + t, _ := ctx.Value(&transactionKey).(*transaction) + return t +} + +func withTransaction(ctx netcontext.Context, t *transaction) netcontext.Context { + return netcontext.WithValue(ctx, &transactionKey, t) +} + +type transaction struct { + transaction pb.Transaction + finished bool +} + +var ErrConcurrentTransaction = errors.New("internal: concurrent transaction") + +func RunTransactionOnce(c netcontext.Context, f func(netcontext.Context) error, xg bool, readOnly bool, previousTransaction *pb.Transaction) (*pb.Transaction, error) { + if transactionFromContext(c) != nil { + return nil, errors.New("nested transactions are not supported") + } + + // Begin the transaction. + t := &transaction{} + req := &pb.BeginTransactionRequest{ + App: proto.String(FullyQualifiedAppID(c)), + } + if xg { + req.AllowMultipleEg = proto.Bool(true) + } + if previousTransaction != nil { + req.PreviousTransaction = previousTransaction + } + if readOnly { + req.Mode = pb.BeginTransactionRequest_READ_ONLY.Enum() + } else { + req.Mode = pb.BeginTransactionRequest_READ_WRITE.Enum() + } + if err := Call(c, "datastore_v3", "BeginTransaction", req, &t.transaction); err != nil { + return nil, err + } + + // Call f, rolling back the transaction if f returns a non-nil error, or panics. + // The panic is not recovered. + defer func() { + if t.finished { + return + } + t.finished = true + // Ignore the error return value, since we are already returning a non-nil + // error (or we're panicking). + Call(c, "datastore_v3", "Rollback", &t.transaction, &basepb.VoidProto{}) + }() + if err := f(withTransaction(c, t)); err != nil { + return &t.transaction, err + } + t.finished = true + + // Commit the transaction. + res := &pb.CommitResponse{} + err := Call(c, "datastore_v3", "Commit", &t.transaction, res) + if ae, ok := err.(*APIError); ok { + /* TODO: restore this conditional + if appengine.IsDevAppServer() { + */ + // The Python Dev AppServer raises an ApplicationError with error code 2 (which is + // Error.CONCURRENT_TRANSACTION) and message "Concurrency exception.". + if ae.Code == int32(pb.Error_BAD_REQUEST) && ae.Detail == "ApplicationError: 2 Concurrency exception." { + return &t.transaction, ErrConcurrentTransaction + } + if ae.Code == int32(pb.Error_CONCURRENT_TRANSACTION) { + return &t.transaction, ErrConcurrentTransaction + } + } + return &t.transaction, err +} diff --git a/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go new file mode 100644 index 000000000..5f727750a --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go @@ -0,0 +1,527 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto + +package urlfetch + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type URLFetchServiceError_ErrorCode int32 + +const ( + URLFetchServiceError_OK URLFetchServiceError_ErrorCode = 0 + URLFetchServiceError_INVALID_URL URLFetchServiceError_ErrorCode = 1 + URLFetchServiceError_FETCH_ERROR URLFetchServiceError_ErrorCode = 2 + URLFetchServiceError_UNSPECIFIED_ERROR URLFetchServiceError_ErrorCode = 3 + URLFetchServiceError_RESPONSE_TOO_LARGE URLFetchServiceError_ErrorCode = 4 + URLFetchServiceError_DEADLINE_EXCEEDED URLFetchServiceError_ErrorCode = 5 + URLFetchServiceError_SSL_CERTIFICATE_ERROR URLFetchServiceError_ErrorCode = 6 + URLFetchServiceError_DNS_ERROR URLFetchServiceError_ErrorCode = 7 + URLFetchServiceError_CLOSED URLFetchServiceError_ErrorCode = 8 + URLFetchServiceError_INTERNAL_TRANSIENT_ERROR URLFetchServiceError_ErrorCode = 9 + URLFetchServiceError_TOO_MANY_REDIRECTS URLFetchServiceError_ErrorCode = 10 + URLFetchServiceError_MALFORMED_REPLY URLFetchServiceError_ErrorCode = 11 + URLFetchServiceError_CONNECTION_ERROR URLFetchServiceError_ErrorCode = 12 +) + +var URLFetchServiceError_ErrorCode_name = map[int32]string{ + 0: "OK", + 1: "INVALID_URL", + 2: "FETCH_ERROR", + 3: "UNSPECIFIED_ERROR", + 4: "RESPONSE_TOO_LARGE", + 5: "DEADLINE_EXCEEDED", + 6: "SSL_CERTIFICATE_ERROR", + 7: "DNS_ERROR", + 8: "CLOSED", + 9: "INTERNAL_TRANSIENT_ERROR", + 10: "TOO_MANY_REDIRECTS", + 11: "MALFORMED_REPLY", + 12: "CONNECTION_ERROR", +} +var URLFetchServiceError_ErrorCode_value = map[string]int32{ + "OK": 0, + "INVALID_URL": 1, + "FETCH_ERROR": 2, + "UNSPECIFIED_ERROR": 3, + "RESPONSE_TOO_LARGE": 4, + "DEADLINE_EXCEEDED": 5, + "SSL_CERTIFICATE_ERROR": 6, + "DNS_ERROR": 7, + "CLOSED": 8, + "INTERNAL_TRANSIENT_ERROR": 9, + "TOO_MANY_REDIRECTS": 10, + "MALFORMED_REPLY": 11, + "CONNECTION_ERROR": 12, +} + +func (x URLFetchServiceError_ErrorCode) Enum() *URLFetchServiceError_ErrorCode { + p := new(URLFetchServiceError_ErrorCode) + *p = x + return p +} +func (x URLFetchServiceError_ErrorCode) String() string { + return proto.EnumName(URLFetchServiceError_ErrorCode_name, int32(x)) +} +func (x *URLFetchServiceError_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(URLFetchServiceError_ErrorCode_value, data, "URLFetchServiceError_ErrorCode") + if err != nil { + return err + } + *x = URLFetchServiceError_ErrorCode(value) + return nil +} +func (URLFetchServiceError_ErrorCode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{0, 0} +} + +type URLFetchRequest_RequestMethod int32 + +const ( + URLFetchRequest_GET URLFetchRequest_RequestMethod = 1 + URLFetchRequest_POST URLFetchRequest_RequestMethod = 2 + URLFetchRequest_HEAD URLFetchRequest_RequestMethod = 3 + URLFetchRequest_PUT URLFetchRequest_RequestMethod = 4 + URLFetchRequest_DELETE URLFetchRequest_RequestMethod = 5 + URLFetchRequest_PATCH URLFetchRequest_RequestMethod = 6 +) + +var URLFetchRequest_RequestMethod_name = map[int32]string{ + 1: "GET", + 2: "POST", + 3: "HEAD", + 4: "PUT", + 5: "DELETE", + 6: "PATCH", +} +var URLFetchRequest_RequestMethod_value = map[string]int32{ + "GET": 1, + "POST": 2, + "HEAD": 3, + "PUT": 4, + "DELETE": 5, + "PATCH": 6, +} + +func (x URLFetchRequest_RequestMethod) Enum() *URLFetchRequest_RequestMethod { + p := new(URLFetchRequest_RequestMethod) + *p = x + return p +} +func (x URLFetchRequest_RequestMethod) String() string { + return proto.EnumName(URLFetchRequest_RequestMethod_name, int32(x)) +} +func (x *URLFetchRequest_RequestMethod) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(URLFetchRequest_RequestMethod_value, data, "URLFetchRequest_RequestMethod") + if err != nil { + return err + } + *x = URLFetchRequest_RequestMethod(value) + return nil +} +func (URLFetchRequest_RequestMethod) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{1, 0} +} + +type URLFetchServiceError struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *URLFetchServiceError) Reset() { *m = URLFetchServiceError{} } +func (m *URLFetchServiceError) String() string { return proto.CompactTextString(m) } +func (*URLFetchServiceError) ProtoMessage() {} +func (*URLFetchServiceError) Descriptor() ([]byte, []int) { + return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{0} +} +func (m *URLFetchServiceError) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_URLFetchServiceError.Unmarshal(m, b) +} +func (m *URLFetchServiceError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_URLFetchServiceError.Marshal(b, m, deterministic) +} +func (dst *URLFetchServiceError) XXX_Merge(src proto.Message) { + xxx_messageInfo_URLFetchServiceError.Merge(dst, src) +} +func (m *URLFetchServiceError) XXX_Size() int { + return xxx_messageInfo_URLFetchServiceError.Size(m) +} +func (m *URLFetchServiceError) XXX_DiscardUnknown() { + xxx_messageInfo_URLFetchServiceError.DiscardUnknown(m) +} + +var xxx_messageInfo_URLFetchServiceError proto.InternalMessageInfo + +type URLFetchRequest struct { + Method *URLFetchRequest_RequestMethod `protobuf:"varint,1,req,name=Method,enum=appengine.URLFetchRequest_RequestMethod" json:"Method,omitempty"` + Url *string `protobuf:"bytes,2,req,name=Url" json:"Url,omitempty"` + Header []*URLFetchRequest_Header `protobuf:"group,3,rep,name=Header,json=header" json:"header,omitempty"` + Payload []byte `protobuf:"bytes,6,opt,name=Payload" json:"Payload,omitempty"` + FollowRedirects *bool `protobuf:"varint,7,opt,name=FollowRedirects,def=1" json:"FollowRedirects,omitempty"` + Deadline *float64 `protobuf:"fixed64,8,opt,name=Deadline" json:"Deadline,omitempty"` + MustValidateServerCertificate *bool `protobuf:"varint,9,opt,name=MustValidateServerCertificate,def=1" json:"MustValidateServerCertificate,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *URLFetchRequest) Reset() { *m = URLFetchRequest{} } +func (m *URLFetchRequest) String() string { return proto.CompactTextString(m) } +func (*URLFetchRequest) ProtoMessage() {} +func (*URLFetchRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{1} +} +func (m *URLFetchRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_URLFetchRequest.Unmarshal(m, b) +} +func (m *URLFetchRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_URLFetchRequest.Marshal(b, m, deterministic) +} +func (dst *URLFetchRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_URLFetchRequest.Merge(dst, src) +} +func (m *URLFetchRequest) XXX_Size() int { + return xxx_messageInfo_URLFetchRequest.Size(m) +} +func (m *URLFetchRequest) XXX_DiscardUnknown() { + xxx_messageInfo_URLFetchRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_URLFetchRequest proto.InternalMessageInfo + +const Default_URLFetchRequest_FollowRedirects bool = true +const Default_URLFetchRequest_MustValidateServerCertificate bool = true + +func (m *URLFetchRequest) GetMethod() URLFetchRequest_RequestMethod { + if m != nil && m.Method != nil { + return *m.Method + } + return URLFetchRequest_GET +} + +func (m *URLFetchRequest) GetUrl() string { + if m != nil && m.Url != nil { + return *m.Url + } + return "" +} + +func (m *URLFetchRequest) GetHeader() []*URLFetchRequest_Header { + if m != nil { + return m.Header + } + return nil +} + +func (m *URLFetchRequest) GetPayload() []byte { + if m != nil { + return m.Payload + } + return nil +} + +func (m *URLFetchRequest) GetFollowRedirects() bool { + if m != nil && m.FollowRedirects != nil { + return *m.FollowRedirects + } + return Default_URLFetchRequest_FollowRedirects +} + +func (m *URLFetchRequest) GetDeadline() float64 { + if m != nil && m.Deadline != nil { + return *m.Deadline + } + return 0 +} + +func (m *URLFetchRequest) GetMustValidateServerCertificate() bool { + if m != nil && m.MustValidateServerCertificate != nil { + return *m.MustValidateServerCertificate + } + return Default_URLFetchRequest_MustValidateServerCertificate +} + +type URLFetchRequest_Header struct { + Key *string `protobuf:"bytes,4,req,name=Key" json:"Key,omitempty"` + Value *string `protobuf:"bytes,5,req,name=Value" json:"Value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *URLFetchRequest_Header) Reset() { *m = URLFetchRequest_Header{} } +func (m *URLFetchRequest_Header) String() string { return proto.CompactTextString(m) } +func (*URLFetchRequest_Header) ProtoMessage() {} +func (*URLFetchRequest_Header) Descriptor() ([]byte, []int) { + return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{1, 0} +} +func (m *URLFetchRequest_Header) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_URLFetchRequest_Header.Unmarshal(m, b) +} +func (m *URLFetchRequest_Header) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_URLFetchRequest_Header.Marshal(b, m, deterministic) +} +func (dst *URLFetchRequest_Header) XXX_Merge(src proto.Message) { + xxx_messageInfo_URLFetchRequest_Header.Merge(dst, src) +} +func (m *URLFetchRequest_Header) XXX_Size() int { + return xxx_messageInfo_URLFetchRequest_Header.Size(m) +} +func (m *URLFetchRequest_Header) XXX_DiscardUnknown() { + xxx_messageInfo_URLFetchRequest_Header.DiscardUnknown(m) +} + +var xxx_messageInfo_URLFetchRequest_Header proto.InternalMessageInfo + +func (m *URLFetchRequest_Header) GetKey() string { + if m != nil && m.Key != nil { + return *m.Key + } + return "" +} + +func (m *URLFetchRequest_Header) GetValue() string { + if m != nil && m.Value != nil { + return *m.Value + } + return "" +} + +type URLFetchResponse struct { + Content []byte `protobuf:"bytes,1,opt,name=Content" json:"Content,omitempty"` + StatusCode *int32 `protobuf:"varint,2,req,name=StatusCode" json:"StatusCode,omitempty"` + Header []*URLFetchResponse_Header `protobuf:"group,3,rep,name=Header,json=header" json:"header,omitempty"` + ContentWasTruncated *bool `protobuf:"varint,6,opt,name=ContentWasTruncated,def=0" json:"ContentWasTruncated,omitempty"` + ExternalBytesSent *int64 `protobuf:"varint,7,opt,name=ExternalBytesSent" json:"ExternalBytesSent,omitempty"` + ExternalBytesReceived *int64 `protobuf:"varint,8,opt,name=ExternalBytesReceived" json:"ExternalBytesReceived,omitempty"` + FinalUrl *string `protobuf:"bytes,9,opt,name=FinalUrl" json:"FinalUrl,omitempty"` + ApiCpuMilliseconds *int64 `protobuf:"varint,10,opt,name=ApiCpuMilliseconds,def=0" json:"ApiCpuMilliseconds,omitempty"` + ApiBytesSent *int64 `protobuf:"varint,11,opt,name=ApiBytesSent,def=0" json:"ApiBytesSent,omitempty"` + ApiBytesReceived *int64 `protobuf:"varint,12,opt,name=ApiBytesReceived,def=0" json:"ApiBytesReceived,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *URLFetchResponse) Reset() { *m = URLFetchResponse{} } +func (m *URLFetchResponse) String() string { return proto.CompactTextString(m) } +func (*URLFetchResponse) ProtoMessage() {} +func (*URLFetchResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{2} +} +func (m *URLFetchResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_URLFetchResponse.Unmarshal(m, b) +} +func (m *URLFetchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_URLFetchResponse.Marshal(b, m, deterministic) +} +func (dst *URLFetchResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_URLFetchResponse.Merge(dst, src) +} +func (m *URLFetchResponse) XXX_Size() int { + return xxx_messageInfo_URLFetchResponse.Size(m) +} +func (m *URLFetchResponse) XXX_DiscardUnknown() { + xxx_messageInfo_URLFetchResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_URLFetchResponse proto.InternalMessageInfo + +const Default_URLFetchResponse_ContentWasTruncated bool = false +const Default_URLFetchResponse_ApiCpuMilliseconds int64 = 0 +const Default_URLFetchResponse_ApiBytesSent int64 = 0 +const Default_URLFetchResponse_ApiBytesReceived int64 = 0 + +func (m *URLFetchResponse) GetContent() []byte { + if m != nil { + return m.Content + } + return nil +} + +func (m *URLFetchResponse) GetStatusCode() int32 { + if m != nil && m.StatusCode != nil { + return *m.StatusCode + } + return 0 +} + +func (m *URLFetchResponse) GetHeader() []*URLFetchResponse_Header { + if m != nil { + return m.Header + } + return nil +} + +func (m *URLFetchResponse) GetContentWasTruncated() bool { + if m != nil && m.ContentWasTruncated != nil { + return *m.ContentWasTruncated + } + return Default_URLFetchResponse_ContentWasTruncated +} + +func (m *URLFetchResponse) GetExternalBytesSent() int64 { + if m != nil && m.ExternalBytesSent != nil { + return *m.ExternalBytesSent + } + return 0 +} + +func (m *URLFetchResponse) GetExternalBytesReceived() int64 { + if m != nil && m.ExternalBytesReceived != nil { + return *m.ExternalBytesReceived + } + return 0 +} + +func (m *URLFetchResponse) GetFinalUrl() string { + if m != nil && m.FinalUrl != nil { + return *m.FinalUrl + } + return "" +} + +func (m *URLFetchResponse) GetApiCpuMilliseconds() int64 { + if m != nil && m.ApiCpuMilliseconds != nil { + return *m.ApiCpuMilliseconds + } + return Default_URLFetchResponse_ApiCpuMilliseconds +} + +func (m *URLFetchResponse) GetApiBytesSent() int64 { + if m != nil && m.ApiBytesSent != nil { + return *m.ApiBytesSent + } + return Default_URLFetchResponse_ApiBytesSent +} + +func (m *URLFetchResponse) GetApiBytesReceived() int64 { + if m != nil && m.ApiBytesReceived != nil { + return *m.ApiBytesReceived + } + return Default_URLFetchResponse_ApiBytesReceived +} + +type URLFetchResponse_Header struct { + Key *string `protobuf:"bytes,4,req,name=Key" json:"Key,omitempty"` + Value *string `protobuf:"bytes,5,req,name=Value" json:"Value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *URLFetchResponse_Header) Reset() { *m = URLFetchResponse_Header{} } +func (m *URLFetchResponse_Header) String() string { return proto.CompactTextString(m) } +func (*URLFetchResponse_Header) ProtoMessage() {} +func (*URLFetchResponse_Header) Descriptor() ([]byte, []int) { + return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{2, 0} +} +func (m *URLFetchResponse_Header) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_URLFetchResponse_Header.Unmarshal(m, b) +} +func (m *URLFetchResponse_Header) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_URLFetchResponse_Header.Marshal(b, m, deterministic) +} +func (dst *URLFetchResponse_Header) XXX_Merge(src proto.Message) { + xxx_messageInfo_URLFetchResponse_Header.Merge(dst, src) +} +func (m *URLFetchResponse_Header) XXX_Size() int { + return xxx_messageInfo_URLFetchResponse_Header.Size(m) +} +func (m *URLFetchResponse_Header) XXX_DiscardUnknown() { + xxx_messageInfo_URLFetchResponse_Header.DiscardUnknown(m) +} + +var xxx_messageInfo_URLFetchResponse_Header proto.InternalMessageInfo + +func (m *URLFetchResponse_Header) GetKey() string { + if m != nil && m.Key != nil { + return *m.Key + } + return "" +} + +func (m *URLFetchResponse_Header) GetValue() string { + if m != nil && m.Value != nil { + return *m.Value + } + return "" +} + +func init() { + proto.RegisterType((*URLFetchServiceError)(nil), "appengine.URLFetchServiceError") + proto.RegisterType((*URLFetchRequest)(nil), "appengine.URLFetchRequest") + proto.RegisterType((*URLFetchRequest_Header)(nil), "appengine.URLFetchRequest.Header") + proto.RegisterType((*URLFetchResponse)(nil), "appengine.URLFetchResponse") + proto.RegisterType((*URLFetchResponse_Header)(nil), "appengine.URLFetchResponse.Header") +} + +func init() { + proto.RegisterFile("google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto", fileDescriptor_urlfetch_service_b245a7065f33bced) +} + +var fileDescriptor_urlfetch_service_b245a7065f33bced = []byte{ + // 770 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xdd, 0x6e, 0xe3, 0x54, + 0x10, 0xc6, 0x76, 0x7e, 0xa7, 0x5d, 0x7a, 0x76, 0xb6, 0x45, 0x66, 0xb5, 0xa0, 0x10, 0x09, 0x29, + 0x17, 0x90, 0x2e, 0x2b, 0x24, 0x44, 0xaf, 0x70, 0xed, 0x93, 0xad, 0xa9, 0x63, 0x47, 0xc7, 0x4e, + 0x61, 0xb9, 0xb1, 0xac, 0x78, 0x9a, 0x5a, 0xb2, 0xec, 0x60, 0x9f, 0x2c, 0xf4, 0x35, 0x78, 0x0d, + 0xde, 0x87, 0xa7, 0xe1, 0x02, 0x9d, 0xc4, 0xc9, 0x6e, 0xbb, 0xd1, 0x4a, 0x5c, 0x65, 0xe6, 0x9b, + 0xef, 0xcc, 0x99, 0x7c, 0xdf, 0xf8, 0x80, 0xb3, 0x2c, 0xcb, 0x65, 0x4e, 0xe3, 0x65, 0x99, 0x27, + 0xc5, 0x72, 0x5c, 0x56, 0xcb, 0xf3, 0x64, 0xb5, 0xa2, 0x62, 0x99, 0x15, 0x74, 0x9e, 0x15, 0x92, + 0xaa, 0x22, 0xc9, 0xcf, 0xd7, 0x55, 0x7e, 0x4b, 0x72, 0x71, 0xb7, 0x0f, 0xe2, 0x9a, 0xaa, 0xb7, + 0xd9, 0x82, 0xc6, 0xab, 0xaa, 0x94, 0x25, 0xf6, 0xf7, 0x67, 0x86, 0x7f, 0xeb, 0x70, 0x3a, 0x17, + 0xde, 0x44, 0xb1, 0xc2, 0x2d, 0x89, 0x57, 0x55, 0x59, 0x0d, 0xff, 0xd2, 0xa1, 0xbf, 0x89, 0xec, + 0x32, 0x25, 0xec, 0x80, 0x1e, 0x5c, 0xb3, 0x4f, 0xf0, 0x04, 0x8e, 0x5c, 0xff, 0xc6, 0xf2, 0x5c, + 0x27, 0x9e, 0x0b, 0x8f, 0x69, 0x0a, 0x98, 0xf0, 0xc8, 0xbe, 0x8a, 0xb9, 0x10, 0x81, 0x60, 0x3a, + 0x9e, 0xc1, 0xd3, 0xb9, 0x1f, 0xce, 0xb8, 0xed, 0x4e, 0x5c, 0xee, 0x34, 0xb0, 0x81, 0x9f, 0x01, + 0x0a, 0x1e, 0xce, 0x02, 0x3f, 0xe4, 0x71, 0x14, 0x04, 0xb1, 0x67, 0x89, 0xd7, 0x9c, 0xb5, 0x14, + 0xdd, 0xe1, 0x96, 0xe3, 0xb9, 0x3e, 0x8f, 0xf9, 0xaf, 0x36, 0xe7, 0x0e, 0x77, 0x58, 0x1b, 0x3f, + 0x87, 0xb3, 0x30, 0xf4, 0x62, 0x9b, 0x8b, 0xc8, 0x9d, 0xb8, 0xb6, 0x15, 0xf1, 0xa6, 0x53, 0x07, + 0x9f, 0x40, 0xdf, 0xf1, 0xc3, 0x26, 0xed, 0x22, 0x40, 0xc7, 0xf6, 0x82, 0x90, 0x3b, 0xac, 0x87, + 0x2f, 0xc0, 0x74, 0xfd, 0x88, 0x0b, 0xdf, 0xf2, 0xe2, 0x48, 0x58, 0x7e, 0xe8, 0x72, 0x3f, 0x6a, + 0x98, 0x7d, 0x35, 0x82, 0xba, 0x79, 0x6a, 0xf9, 0x6f, 0x62, 0xc1, 0x1d, 0x57, 0x70, 0x3b, 0x0a, + 0x19, 0xe0, 0x33, 0x38, 0x99, 0x5a, 0xde, 0x24, 0x10, 0x53, 0xee, 0xc4, 0x82, 0xcf, 0xbc, 0x37, + 0xec, 0x08, 0x4f, 0x81, 0xd9, 0x81, 0xef, 0x73, 0x3b, 0x72, 0x03, 0xbf, 0x69, 0x71, 0x3c, 0xfc, + 0xc7, 0x80, 0x93, 0x9d, 0x5a, 0x82, 0x7e, 0x5f, 0x53, 0x2d, 0xf1, 0x27, 0xe8, 0x4c, 0x49, 0xde, + 0x95, 0xa9, 0xa9, 0x0d, 0xf4, 0xd1, 0xa7, 0xaf, 0x46, 0xe3, 0xbd, 0xba, 0xe3, 0x47, 0xdc, 0x71, + 0xf3, 0xbb, 0xe5, 0x8b, 0xe6, 0x1c, 0x32, 0x30, 0xe6, 0x55, 0x6e, 0xea, 0x03, 0x7d, 0xd4, 0x17, + 0x2a, 0xc4, 0x1f, 0xa1, 0x73, 0x47, 0x49, 0x4a, 0x95, 0x69, 0x0c, 0x8c, 0x11, 0xbc, 0xfa, 0xea, + 0x23, 0x3d, 0xaf, 0x36, 0x44, 0xd1, 0x1c, 0xc0, 0x17, 0xd0, 0x9d, 0x25, 0xf7, 0x79, 0x99, 0xa4, + 0x66, 0x67, 0xa0, 0x8d, 0x8e, 0x2f, 0xf5, 0x9e, 0x26, 0x76, 0x10, 0x8e, 0xe1, 0x64, 0x52, 0xe6, + 0x79, 0xf9, 0x87, 0xa0, 0x34, 0xab, 0x68, 0x21, 0x6b, 0xb3, 0x3b, 0xd0, 0x46, 0xbd, 0x8b, 0x96, + 0xac, 0xd6, 0x24, 0x1e, 0x17, 0xf1, 0x39, 0xf4, 0x1c, 0x4a, 0xd2, 0x3c, 0x2b, 0xc8, 0xec, 0x0d, + 0xb4, 0x91, 0x26, 0xf6, 0x39, 0xfe, 0x0c, 0x5f, 0x4c, 0xd7, 0xb5, 0xbc, 0x49, 0xf2, 0x2c, 0x4d, + 0x24, 0xa9, 0xed, 0xa1, 0xca, 0xa6, 0x4a, 0x66, 0xb7, 0xd9, 0x22, 0x91, 0x64, 0xf6, 0xdf, 0xeb, + 0xfc, 0x71, 0xea, 0xf3, 0x97, 0xd0, 0xd9, 0xfe, 0x0f, 0x25, 0xc6, 0x35, 0xdd, 0x9b, 0xad, 0xad, + 0x18, 0xd7, 0x74, 0x8f, 0xa7, 0xd0, 0xbe, 0x49, 0xf2, 0x35, 0x99, 0xed, 0x0d, 0xb6, 0x4d, 0x86, + 0x1e, 0x3c, 0x79, 0xa0, 0x26, 0x76, 0xc1, 0x78, 0xcd, 0x23, 0xa6, 0x61, 0x0f, 0x5a, 0xb3, 0x20, + 0x8c, 0x98, 0xae, 0xa2, 0x2b, 0x6e, 0x39, 0xcc, 0x50, 0xc5, 0xd9, 0x3c, 0x62, 0x2d, 0xb5, 0x2e, + 0x0e, 0xf7, 0x78, 0xc4, 0x59, 0x1b, 0xfb, 0xd0, 0x9e, 0x59, 0x91, 0x7d, 0xc5, 0x3a, 0xc3, 0x7f, + 0x0d, 0x60, 0xef, 0x84, 0xad, 0x57, 0x65, 0x51, 0x13, 0x9a, 0xd0, 0xb5, 0xcb, 0x42, 0x52, 0x21, + 0x4d, 0x4d, 0x49, 0x29, 0x76, 0x29, 0x7e, 0x09, 0x10, 0xca, 0x44, 0xae, 0x6b, 0xf5, 0x71, 0x6c, + 0x8c, 0x6b, 0x8b, 0xf7, 0x10, 0xbc, 0x78, 0xe4, 0xdf, 0xf0, 0xa0, 0x7f, 0xdb, 0x6b, 0x1e, 0x1b, + 0xf8, 0x03, 0x3c, 0x6b, 0xae, 0xf9, 0x25, 0xa9, 0xa3, 0x6a, 0x5d, 0x28, 0x81, 0xb6, 0x66, 0xf6, + 0x2e, 0xda, 0xb7, 0x49, 0x5e, 0x93, 0x38, 0xc4, 0xc0, 0x6f, 0xe0, 0x29, 0xff, 0x73, 0xfb, 0x02, + 0x5c, 0xde, 0x4b, 0xaa, 0x43, 0x35, 0xb8, 0x72, 0xd7, 0x10, 0x1f, 0x16, 0xf0, 0x7b, 0x38, 0x7b, + 0x00, 0x0a, 0x5a, 0x50, 0xf6, 0x96, 0xd2, 0x8d, 0xcd, 0x86, 0x38, 0x5c, 0x54, 0xfb, 0x30, 0xc9, + 0x8a, 0x24, 0x57, 0xfb, 0xaa, 0xec, 0xed, 0x8b, 0x7d, 0x8e, 0xdf, 0x01, 0x5a, 0xab, 0xcc, 0x5e, + 0xad, 0xa7, 0x59, 0x9e, 0x67, 0x35, 0x2d, 0xca, 0x22, 0xad, 0x4d, 0x50, 0xed, 0x2e, 0xb4, 0x97, + 0xe2, 0x40, 0x11, 0xbf, 0x86, 0x63, 0x6b, 0x95, 0xbd, 0x9b, 0xf6, 0x68, 0x47, 0x7e, 0x00, 0xe3, + 0xb7, 0xc0, 0x76, 0xf9, 0x7e, 0xcc, 0xe3, 0x1d, 0xf5, 0x83, 0xd2, 0xff, 0x5f, 0xa6, 0x4b, 0xf8, + 0xad, 0xb7, 0x7b, 0x2a, 0xff, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x1d, 0x9f, 0x6d, 0x24, 0x63, 0x05, + 0x00, 0x00, +} diff --git a/vendor/google.golang.org/appengine/namespace.go b/vendor/google.golang.org/appengine/namespace.go new file mode 100644 index 000000000..21860ca08 --- /dev/null +++ b/vendor/google.golang.org/appengine/namespace.go @@ -0,0 +1,25 @@ +// Copyright 2012 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package appengine + +import ( + "fmt" + "regexp" + + "golang.org/x/net/context" + + "google.golang.org/appengine/internal" +) + +// Namespace returns a replacement context that operates within the given namespace. +func Namespace(c context.Context, namespace string) (context.Context, error) { + if !validNamespace.MatchString(namespace) { + return nil, fmt.Errorf("appengine: namespace %q does not match /%s/", namespace, validNamespace) + } + return internal.NamespacedContext(c, namespace), nil +} + +// validNamespace matches valid namespace names. +var validNamespace = regexp.MustCompile(`^[0-9A-Za-z._-]{0,100}$`) diff --git a/vendor/google.golang.org/appengine/socket/doc.go b/vendor/google.golang.org/appengine/socket/doc.go new file mode 100644 index 000000000..3de46df82 --- /dev/null +++ b/vendor/google.golang.org/appengine/socket/doc.go @@ -0,0 +1,10 @@ +// Copyright 2012 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// Package socket provides outbound network sockets. +// +// This package is only required in the classic App Engine environment. +// Applications running only in App Engine "flexible environment" should +// use the standard library's net package. +package socket diff --git a/vendor/google.golang.org/appengine/socket/socket_classic.go b/vendor/google.golang.org/appengine/socket/socket_classic.go new file mode 100644 index 000000000..0ad50e2d3 --- /dev/null +++ b/vendor/google.golang.org/appengine/socket/socket_classic.go @@ -0,0 +1,290 @@ +// Copyright 2012 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build appengine + +package socket + +import ( + "fmt" + "io" + "net" + "strconv" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + "google.golang.org/appengine/internal" + + pb "google.golang.org/appengine/internal/socket" +) + +// Dial connects to the address addr on the network protocol. +// The address format is host:port, where host may be a hostname or an IP address. +// Known protocols are "tcp" and "udp". +// The returned connection satisfies net.Conn, and is valid while ctx is valid; +// if the connection is to be used after ctx becomes invalid, invoke SetContext +// with the new context. +func Dial(ctx context.Context, protocol, addr string) (*Conn, error) { + return DialTimeout(ctx, protocol, addr, 0) +} + +var ipFamilies = []pb.CreateSocketRequest_SocketFamily{ + pb.CreateSocketRequest_IPv4, + pb.CreateSocketRequest_IPv6, +} + +// DialTimeout is like Dial but takes a timeout. +// The timeout includes name resolution, if required. +func DialTimeout(ctx context.Context, protocol, addr string, timeout time.Duration) (*Conn, error) { + dialCtx := ctx // Used for dialing and name resolution, but not stored in the *Conn. + if timeout > 0 { + var cancel context.CancelFunc + dialCtx, cancel = context.WithTimeout(ctx, timeout) + defer cancel() + } + + host, portStr, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + port, err := strconv.Atoi(portStr) + if err != nil { + return nil, fmt.Errorf("socket: bad port %q: %v", portStr, err) + } + + var prot pb.CreateSocketRequest_SocketProtocol + switch protocol { + case "tcp": + prot = pb.CreateSocketRequest_TCP + case "udp": + prot = pb.CreateSocketRequest_UDP + default: + return nil, fmt.Errorf("socket: unknown protocol %q", protocol) + } + + packedAddrs, resolved, err := resolve(dialCtx, ipFamilies, host) + if err != nil { + return nil, fmt.Errorf("socket: failed resolving %q: %v", host, err) + } + if len(packedAddrs) == 0 { + return nil, fmt.Errorf("no addresses for %q", host) + } + + packedAddr := packedAddrs[0] // use first address + fam := pb.CreateSocketRequest_IPv4 + if len(packedAddr) == net.IPv6len { + fam = pb.CreateSocketRequest_IPv6 + } + + req := &pb.CreateSocketRequest{ + Family: fam.Enum(), + Protocol: prot.Enum(), + RemoteIp: &pb.AddressPort{ + Port: proto.Int32(int32(port)), + PackedAddress: packedAddr, + }, + } + if resolved { + req.RemoteIp.HostnameHint = &host + } + res := &pb.CreateSocketReply{} + if err := internal.Call(dialCtx, "remote_socket", "CreateSocket", req, res); err != nil { + return nil, err + } + + return &Conn{ + ctx: ctx, + desc: res.GetSocketDescriptor(), + prot: prot, + local: res.ProxyExternalIp, + remote: req.RemoteIp, + }, nil +} + +// LookupIP returns the given host's IP addresses. +func LookupIP(ctx context.Context, host string) (addrs []net.IP, err error) { + packedAddrs, _, err := resolve(ctx, ipFamilies, host) + if err != nil { + return nil, fmt.Errorf("socket: failed resolving %q: %v", host, err) + } + addrs = make([]net.IP, len(packedAddrs)) + for i, pa := range packedAddrs { + addrs[i] = net.IP(pa) + } + return addrs, nil +} + +func resolve(ctx context.Context, fams []pb.CreateSocketRequest_SocketFamily, host string) ([][]byte, bool, error) { + // Check if it's an IP address. + if ip := net.ParseIP(host); ip != nil { + if ip := ip.To4(); ip != nil { + return [][]byte{ip}, false, nil + } + return [][]byte{ip}, false, nil + } + + req := &pb.ResolveRequest{ + Name: &host, + AddressFamilies: fams, + } + res := &pb.ResolveReply{} + if err := internal.Call(ctx, "remote_socket", "Resolve", req, res); err != nil { + // XXX: need to map to pb.ResolveReply_ErrorCode? + return nil, false, err + } + return res.PackedAddress, true, nil +} + +// withDeadline is like context.WithDeadline, except it ignores the zero deadline. +func withDeadline(parent context.Context, deadline time.Time) (context.Context, context.CancelFunc) { + if deadline.IsZero() { + return parent, func() {} + } + return context.WithDeadline(parent, deadline) +} + +// Conn represents a socket connection. +// It implements net.Conn. +type Conn struct { + ctx context.Context + desc string + offset int64 + + prot pb.CreateSocketRequest_SocketProtocol + local, remote *pb.AddressPort + + readDeadline, writeDeadline time.Time // optional +} + +// SetContext sets the context that is used by this Conn. +// It is usually used only when using a Conn that was created in a different context, +// such as when a connection is created during a warmup request but used while +// servicing a user request. +func (cn *Conn) SetContext(ctx context.Context) { + cn.ctx = ctx +} + +func (cn *Conn) Read(b []byte) (n int, err error) { + const maxRead = 1 << 20 + if len(b) > maxRead { + b = b[:maxRead] + } + + req := &pb.ReceiveRequest{ + SocketDescriptor: &cn.desc, + DataSize: proto.Int32(int32(len(b))), + } + res := &pb.ReceiveReply{} + if !cn.readDeadline.IsZero() { + req.TimeoutSeconds = proto.Float64(cn.readDeadline.Sub(time.Now()).Seconds()) + } + ctx, cancel := withDeadline(cn.ctx, cn.readDeadline) + defer cancel() + if err := internal.Call(ctx, "remote_socket", "Receive", req, res); err != nil { + return 0, err + } + if len(res.Data) == 0 { + return 0, io.EOF + } + if len(res.Data) > len(b) { + return 0, fmt.Errorf("socket: internal error: read too much data: %d > %d", len(res.Data), len(b)) + } + return copy(b, res.Data), nil +} + +func (cn *Conn) Write(b []byte) (n int, err error) { + const lim = 1 << 20 // max per chunk + + for n < len(b) { + chunk := b[n:] + if len(chunk) > lim { + chunk = chunk[:lim] + } + + req := &pb.SendRequest{ + SocketDescriptor: &cn.desc, + Data: chunk, + StreamOffset: &cn.offset, + } + res := &pb.SendReply{} + if !cn.writeDeadline.IsZero() { + req.TimeoutSeconds = proto.Float64(cn.writeDeadline.Sub(time.Now()).Seconds()) + } + ctx, cancel := withDeadline(cn.ctx, cn.writeDeadline) + defer cancel() + if err = internal.Call(ctx, "remote_socket", "Send", req, res); err != nil { + // assume zero bytes were sent in this RPC + break + } + n += int(res.GetDataSent()) + cn.offset += int64(res.GetDataSent()) + } + + return +} + +func (cn *Conn) Close() error { + req := &pb.CloseRequest{ + SocketDescriptor: &cn.desc, + } + res := &pb.CloseReply{} + if err := internal.Call(cn.ctx, "remote_socket", "Close", req, res); err != nil { + return err + } + cn.desc = "CLOSED" + return nil +} + +func addr(prot pb.CreateSocketRequest_SocketProtocol, ap *pb.AddressPort) net.Addr { + if ap == nil { + return nil + } + switch prot { + case pb.CreateSocketRequest_TCP: + return &net.TCPAddr{ + IP: net.IP(ap.PackedAddress), + Port: int(*ap.Port), + } + case pb.CreateSocketRequest_UDP: + return &net.UDPAddr{ + IP: net.IP(ap.PackedAddress), + Port: int(*ap.Port), + } + } + panic("unknown protocol " + prot.String()) +} + +func (cn *Conn) LocalAddr() net.Addr { return addr(cn.prot, cn.local) } +func (cn *Conn) RemoteAddr() net.Addr { return addr(cn.prot, cn.remote) } + +func (cn *Conn) SetDeadline(t time.Time) error { + cn.readDeadline = t + cn.writeDeadline = t + return nil +} + +func (cn *Conn) SetReadDeadline(t time.Time) error { + cn.readDeadline = t + return nil +} + +func (cn *Conn) SetWriteDeadline(t time.Time) error { + cn.writeDeadline = t + return nil +} + +// KeepAlive signals that the connection is still in use. +// It may be called to prevent the socket being closed due to inactivity. +func (cn *Conn) KeepAlive() error { + req := &pb.GetSocketNameRequest{ + SocketDescriptor: &cn.desc, + } + res := &pb.GetSocketNameReply{} + return internal.Call(cn.ctx, "remote_socket", "GetSocketName", req, res) +} + +func init() { + internal.RegisterErrorCodeMap("remote_socket", pb.RemoteSocketServiceError_ErrorCode_name) +} diff --git a/vendor/google.golang.org/appengine/socket/socket_vm.go b/vendor/google.golang.org/appengine/socket/socket_vm.go new file mode 100644 index 000000000..c804169a1 --- /dev/null +++ b/vendor/google.golang.org/appengine/socket/socket_vm.go @@ -0,0 +1,64 @@ +// Copyright 2015 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build !appengine + +package socket + +import ( + "net" + "time" + + "golang.org/x/net/context" +) + +// Dial connects to the address addr on the network protocol. +// The address format is host:port, where host may be a hostname or an IP address. +// Known protocols are "tcp" and "udp". +// The returned connection satisfies net.Conn, and is valid while ctx is valid; +// if the connection is to be used after ctx becomes invalid, invoke SetContext +// with the new context. +func Dial(ctx context.Context, protocol, addr string) (*Conn, error) { + conn, err := net.Dial(protocol, addr) + if err != nil { + return nil, err + } + return &Conn{conn}, nil +} + +// DialTimeout is like Dial but takes a timeout. +// The timeout includes name resolution, if required. +func DialTimeout(ctx context.Context, protocol, addr string, timeout time.Duration) (*Conn, error) { + conn, err := net.DialTimeout(protocol, addr, timeout) + if err != nil { + return nil, err + } + return &Conn{conn}, nil +} + +// LookupIP returns the given host's IP addresses. +func LookupIP(ctx context.Context, host string) (addrs []net.IP, err error) { + return net.LookupIP(host) +} + +// Conn represents a socket connection. +// It implements net.Conn. +type Conn struct { + net.Conn +} + +// SetContext sets the context that is used by this Conn. +// It is usually used only when using a Conn that was created in a different context, +// such as when a connection is created during a warmup request but used while +// servicing a user request. +func (cn *Conn) SetContext(ctx context.Context) { + // This function is not required in App Engine "flexible environment". +} + +// KeepAlive signals that the connection is still in use. +// It may be called to prevent the socket being closed due to inactivity. +func (cn *Conn) KeepAlive() error { + // This function is not required in App Engine "flexible environment". + return nil +} diff --git a/vendor/google.golang.org/appengine/timeout.go b/vendor/google.golang.org/appengine/timeout.go new file mode 100644 index 000000000..05642a992 --- /dev/null +++ b/vendor/google.golang.org/appengine/timeout.go @@ -0,0 +1,20 @@ +// Copyright 2013 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package appengine + +import "golang.org/x/net/context" + +// IsTimeoutError reports whether err is a timeout error. +func IsTimeoutError(err error) bool { + if err == context.DeadlineExceeded { + return true + } + if t, ok := err.(interface { + IsTimeout() bool + }); ok { + return t.IsTimeout() + } + return false +} diff --git a/vendor/google.golang.org/appengine/urlfetch/urlfetch.go b/vendor/google.golang.org/appengine/urlfetch/urlfetch.go new file mode 100644 index 000000000..6ffe1e6d9 --- /dev/null +++ b/vendor/google.golang.org/appengine/urlfetch/urlfetch.go @@ -0,0 +1,210 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// Package urlfetch provides an http.RoundTripper implementation +// for fetching URLs via App Engine's urlfetch service. +package urlfetch // import "google.golang.org/appengine/urlfetch" + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + + "google.golang.org/appengine/internal" + pb "google.golang.org/appengine/internal/urlfetch" +) + +// Transport is an implementation of http.RoundTripper for +// App Engine. Users should generally create an http.Client using +// this transport and use the Client rather than using this transport +// directly. +type Transport struct { + Context context.Context + + // Controls whether the application checks the validity of SSL certificates + // over HTTPS connections. A value of false (the default) instructs the + // application to send a request to the server only if the certificate is + // valid and signed by a trusted certificate authority (CA), and also + // includes a hostname that matches the certificate. A value of true + // instructs the application to perform no certificate validation. + AllowInvalidServerCertificate bool +} + +// Verify statically that *Transport implements http.RoundTripper. +var _ http.RoundTripper = (*Transport)(nil) + +// Client returns an *http.Client using a default urlfetch Transport. This +// client will have the default deadline of 5 seconds, and will check the +// validity of SSL certificates. +// +// Any deadline of the provided context will be used for requests through this client; +// if the client does not have a deadline then a 5 second default is used. +func Client(ctx context.Context) *http.Client { + return &http.Client{ + Transport: &Transport{ + Context: ctx, + }, + } +} + +type bodyReader struct { + content []byte + truncated bool + closed bool +} + +// ErrTruncatedBody is the error returned after the final Read() from a +// response's Body if the body has been truncated by App Engine's proxy. +var ErrTruncatedBody = errors.New("urlfetch: truncated body") + +func statusCodeToText(code int) string { + if t := http.StatusText(code); t != "" { + return t + } + return strconv.Itoa(code) +} + +func (br *bodyReader) Read(p []byte) (n int, err error) { + if br.closed { + if br.truncated { + return 0, ErrTruncatedBody + } + return 0, io.EOF + } + n = copy(p, br.content) + if n > 0 { + br.content = br.content[n:] + return + } + if br.truncated { + br.closed = true + return 0, ErrTruncatedBody + } + return 0, io.EOF +} + +func (br *bodyReader) Close() error { + br.closed = true + br.content = nil + return nil +} + +// A map of the URL Fetch-accepted methods that take a request body. +var methodAcceptsRequestBody = map[string]bool{ + "POST": true, + "PUT": true, + "PATCH": true, +} + +// urlString returns a valid string given a URL. This function is necessary because +// the String method of URL doesn't correctly handle URLs with non-empty Opaque values. +// See http://code.google.com/p/go/issues/detail?id=4860. +func urlString(u *url.URL) string { + if u.Opaque == "" || strings.HasPrefix(u.Opaque, "//") { + return u.String() + } + aux := *u + aux.Opaque = "//" + aux.Host + aux.Opaque + return aux.String() +} + +// RoundTrip issues a single HTTP request and returns its response. Per the +// http.RoundTripper interface, RoundTrip only returns an error if there +// was an unsupported request or the URL Fetch proxy fails. +// Note that HTTP response codes such as 5xx, 403, 404, etc are not +// errors as far as the transport is concerned and will be returned +// with err set to nil. +func (t *Transport) RoundTrip(req *http.Request) (res *http.Response, err error) { + methNum, ok := pb.URLFetchRequest_RequestMethod_value[req.Method] + if !ok { + return nil, fmt.Errorf("urlfetch: unsupported HTTP method %q", req.Method) + } + + method := pb.URLFetchRequest_RequestMethod(methNum) + + freq := &pb.URLFetchRequest{ + Method: &method, + Url: proto.String(urlString(req.URL)), + FollowRedirects: proto.Bool(false), // http.Client's responsibility + MustValidateServerCertificate: proto.Bool(!t.AllowInvalidServerCertificate), + } + if deadline, ok := t.Context.Deadline(); ok { + freq.Deadline = proto.Float64(deadline.Sub(time.Now()).Seconds()) + } + + for k, vals := range req.Header { + for _, val := range vals { + freq.Header = append(freq.Header, &pb.URLFetchRequest_Header{ + Key: proto.String(k), + Value: proto.String(val), + }) + } + } + if methodAcceptsRequestBody[req.Method] && req.Body != nil { + // Avoid a []byte copy if req.Body has a Bytes method. + switch b := req.Body.(type) { + case interface { + Bytes() []byte + }: + freq.Payload = b.Bytes() + default: + freq.Payload, err = ioutil.ReadAll(req.Body) + if err != nil { + return nil, err + } + } + } + + fres := &pb.URLFetchResponse{} + if err := internal.Call(t.Context, "urlfetch", "Fetch", freq, fres); err != nil { + return nil, err + } + + res = &http.Response{} + res.StatusCode = int(*fres.StatusCode) + res.Status = fmt.Sprintf("%d %s", res.StatusCode, statusCodeToText(res.StatusCode)) + res.Header = make(http.Header) + res.Request = req + + // Faked: + res.ProtoMajor = 1 + res.ProtoMinor = 1 + res.Proto = "HTTP/1.1" + res.Close = true + + for _, h := range fres.Header { + hkey := http.CanonicalHeaderKey(*h.Key) + hval := *h.Value + if hkey == "Content-Length" { + // Will get filled in below for all but HEAD requests. + if req.Method == "HEAD" { + res.ContentLength, _ = strconv.ParseInt(hval, 10, 64) + } + continue + } + res.Header.Add(hkey, hval) + } + + if req.Method != "HEAD" { + res.ContentLength = int64(len(fres.Content)) + } + + truncated := fres.GetContentWasTruncated() + res.Body = &bodyReader{content: fres.Content, truncated: truncated} + return +} + +func init() { + internal.RegisterErrorCodeMap("urlfetch", pb.URLFetchServiceError_ErrorCode_name) + internal.RegisterTimeoutErrorCode("urlfetch", int32(pb.URLFetchServiceError_DEADLINE_EXCEEDED)) +} diff --git a/vendor/google.golang.org/genproto/LICENSE b/vendor/google.golang.org/genproto/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/google.golang.org/genproto/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go new file mode 100644 index 000000000..7e9e63c42 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go @@ -0,0 +1,54 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/api/annotations.proto + +package annotations + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + descriptor "github.com/golang/protobuf/protoc-gen-go/descriptor" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +var E_Http = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MethodOptions)(nil), + ExtensionType: (*HttpRule)(nil), + Field: 72295728, + Name: "google.api.http", + Tag: "bytes,72295728,opt,name=http", + Filename: "google/api/annotations.proto", +} + +func init() { + proto.RegisterExtension(E_Http) +} + +func init() { proto.RegisterFile("google/api/annotations.proto", fileDescriptor_c591c5aa9fb79aab) } + +var fileDescriptor_c591c5aa9fb79aab = []byte{ + // 208 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x49, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x4f, 0x2c, 0xc8, 0xd4, 0x4f, 0xcc, 0xcb, 0xcb, 0x2f, 0x49, 0x2c, 0xc9, 0xcc, + 0xcf, 0x2b, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x82, 0xc8, 0xea, 0x25, 0x16, 0x64, + 0x4a, 0x89, 0x22, 0xa9, 0xcc, 0x28, 0x29, 0x29, 0x80, 0x28, 0x91, 0x52, 0x80, 0x0a, 0x83, 0x79, + 0x49, 0xa5, 0x69, 0xfa, 0x29, 0xa9, 0xc5, 0xc9, 0x45, 0x99, 0x05, 0x25, 0xf9, 0x45, 0x10, 0x15, + 0x56, 0xde, 0x5c, 0x2c, 0x20, 0xf5, 0x42, 0x72, 0x7a, 0x50, 0xd3, 0x60, 0x4a, 0xf5, 0x7c, 0x53, + 0x4b, 0x32, 0xf2, 0x53, 0xfc, 0x0b, 0xc0, 0x56, 0x4a, 0x6c, 0x38, 0xb5, 0x47, 0x49, 0x81, 0x51, + 0x83, 0xdb, 0x48, 0x44, 0x0f, 0x61, 0xad, 0x9e, 0x47, 0x49, 0x49, 0x41, 0x50, 0x69, 0x4e, 0x6a, + 0x10, 0xd8, 0x10, 0xa7, 0x3c, 0x2e, 0xbe, 0xe4, 0xfc, 0x5c, 0x24, 0x05, 0x4e, 0x02, 0x8e, 0x08, + 0x67, 0x07, 0x80, 0x4c, 0x0e, 0x60, 0x8c, 0x72, 0x84, 0xca, 0xa7, 0xe7, 0xe7, 0x24, 0xe6, 0xa5, + 0xeb, 0xe5, 0x17, 0xa5, 0xeb, 0xa7, 0xa7, 0xe6, 0x81, 0xed, 0xd5, 0x87, 0x48, 0x25, 0x16, 0x64, + 0x16, 0xa3, 0x7b, 0xda, 0x1a, 0x89, 0xbd, 0x88, 0x89, 0xc5, 0xdd, 0x31, 0xc0, 0x33, 0x89, 0x0d, + 0xac, 0xc9, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0xe3, 0x29, 0x19, 0x62, 0x28, 0x01, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go new file mode 100644 index 000000000..abe688ec7 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go @@ -0,0 +1,693 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/api/http.proto + +package annotations + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Defines the HTTP configuration for an API service. It contains a list of +// [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method +// to one or more HTTP REST API methods. +type Http struct { + // A list of HTTP configuration rules that apply to individual API methods. + // + // **NOTE:** All service configuration rules follow "last one wins" order. + Rules []*HttpRule `protobuf:"bytes,1,rep,name=rules,proto3" json:"rules,omitempty"` + // When set to true, URL path parmeters will be fully URI-decoded except in + // cases of single segment matches in reserved expansion, where "%2F" will be + // left encoded. + // + // The default behavior is to not decode RFC 6570 reserved characters in multi + // segment matches. + FullyDecodeReservedExpansion bool `protobuf:"varint,2,opt,name=fully_decode_reserved_expansion,json=fullyDecodeReservedExpansion,proto3" json:"fully_decode_reserved_expansion,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Http) Reset() { *m = Http{} } +func (m *Http) String() string { return proto.CompactTextString(m) } +func (*Http) ProtoMessage() {} +func (*Http) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9994be407cdcc9, []int{0} +} + +func (m *Http) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Http.Unmarshal(m, b) +} +func (m *Http) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Http.Marshal(b, m, deterministic) +} +func (m *Http) XXX_Merge(src proto.Message) { + xxx_messageInfo_Http.Merge(m, src) +} +func (m *Http) XXX_Size() int { + return xxx_messageInfo_Http.Size(m) +} +func (m *Http) XXX_DiscardUnknown() { + xxx_messageInfo_Http.DiscardUnknown(m) +} + +var xxx_messageInfo_Http proto.InternalMessageInfo + +func (m *Http) GetRules() []*HttpRule { + if m != nil { + return m.Rules + } + return nil +} + +func (m *Http) GetFullyDecodeReservedExpansion() bool { + if m != nil { + return m.FullyDecodeReservedExpansion + } + return false +} + +// `HttpRule` defines the mapping of an RPC method to one or more HTTP +// REST API methods. The mapping specifies how different portions of the RPC +// request message are mapped to URL path, URL query parameters, and +// HTTP request body. The mapping is typically specified as an +// `google.api.http` annotation on the RPC method, +// see "google/api/annotations.proto" for details. +// +// The mapping consists of a field specifying the path template and +// method kind. The path template can refer to fields in the request +// message, as in the example below which describes a REST GET +// operation on a resource collection of messages: +// +// +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http).get = "/v1/messages/{message_id}/{sub.subfield}"; +// } +// } +// message GetMessageRequest { +// message SubMessage { +// string subfield = 1; +// } +// string message_id = 1; // mapped to the URL +// SubMessage sub = 2; // `sub.subfield` is url-mapped +// } +// message Message { +// string text = 1; // content of the resource +// } +// +// The same http annotation can alternatively be expressed inside the +// `GRPC API Configuration` YAML file. +// +// http: +// rules: +// - selector: .Messaging.GetMessage +// get: /v1/messages/{message_id}/{sub.subfield} +// +// This definition enables an automatic, bidrectional mapping of HTTP +// JSON to RPC. Example: +// +// HTTP | RPC +// -----|----- +// `GET /v1/messages/123456/foo` | `GetMessage(message_id: "123456" sub: SubMessage(subfield: "foo"))` +// +// In general, not only fields but also field paths can be referenced +// from a path pattern. Fields mapped to the path pattern cannot be +// repeated and must have a primitive (non-message) type. +// +// Any fields in the request message which are not bound by the path +// pattern automatically become (optional) HTTP query +// parameters. Assume the following definition of the request message: +// +// +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http).get = "/v1/messages/{message_id}"; +// } +// } +// message GetMessageRequest { +// message SubMessage { +// string subfield = 1; +// } +// string message_id = 1; // mapped to the URL +// int64 revision = 2; // becomes a parameter +// SubMessage sub = 3; // `sub.subfield` becomes a parameter +// } +// +// +// This enables a HTTP JSON to RPC mapping as below: +// +// HTTP | RPC +// -----|----- +// `GET /v1/messages/123456?revision=2&sub.subfield=foo` | `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield: "foo"))` +// +// Note that fields which are mapped to HTTP parameters must have a +// primitive type or a repeated primitive type. Message types are not +// allowed. In the case of a repeated type, the parameter can be +// repeated in the URL, as in `...?param=A¶m=B`. +// +// For HTTP method kinds which allow a request body, the `body` field +// specifies the mapping. Consider a REST update method on the +// message resource collection: +// +// +// service Messaging { +// rpc UpdateMessage(UpdateMessageRequest) returns (Message) { +// option (google.api.http) = { +// put: "/v1/messages/{message_id}" +// body: "message" +// }; +// } +// } +// message UpdateMessageRequest { +// string message_id = 1; // mapped to the URL +// Message message = 2; // mapped to the body +// } +// +// +// The following HTTP JSON to RPC mapping is enabled, where the +// representation of the JSON in the request body is determined by +// protos JSON encoding: +// +// HTTP | RPC +// -----|----- +// `PUT /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: "123456" message { text: "Hi!" })` +// +// The special name `*` can be used in the body mapping to define that +// every field not bound by the path template should be mapped to the +// request body. This enables the following alternative definition of +// the update method: +// +// service Messaging { +// rpc UpdateMessage(Message) returns (Message) { +// option (google.api.http) = { +// put: "/v1/messages/{message_id}" +// body: "*" +// }; +// } +// } +// message Message { +// string message_id = 1; +// string text = 2; +// } +// +// +// The following HTTP JSON to RPC mapping is enabled: +// +// HTTP | RPC +// -----|----- +// `PUT /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: "123456" text: "Hi!")` +// +// Note that when using `*` in the body mapping, it is not possible to +// have HTTP parameters, as all fields not bound by the path end in +// the body. This makes this option more rarely used in practice of +// defining REST APIs. The common usage of `*` is in custom methods +// which don't use the URL at all for transferring data. +// +// It is possible to define multiple HTTP methods for one RPC by using +// the `additional_bindings` option. Example: +// +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http) = { +// get: "/v1/messages/{message_id}" +// additional_bindings { +// get: "/v1/users/{user_id}/messages/{message_id}" +// } +// }; +// } +// } +// message GetMessageRequest { +// string message_id = 1; +// string user_id = 2; +// } +// +// +// This enables the following two alternative HTTP JSON to RPC +// mappings: +// +// HTTP | RPC +// -----|----- +// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")` +// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id: "123456")` +// +// # Rules for HTTP mapping +// +// The rules for mapping HTTP path, query parameters, and body fields +// to the request message are as follows: +// +// 1. The `body` field specifies either `*` or a field path, or is +// omitted. If omitted, it indicates there is no HTTP request body. +// 2. Leaf fields (recursive expansion of nested messages in the +// request) can be classified into three types: +// (a) Matched in the URL template. +// (b) Covered by body (if body is `*`, everything except (a) fields; +// else everything under the body field) +// (c) All other fields. +// 3. URL query parameters found in the HTTP request are mapped to (c) fields. +// 4. Any body sent with an HTTP request can contain only (b) fields. +// +// The syntax of the path template is as follows: +// +// Template = "/" Segments [ Verb ] ; +// Segments = Segment { "/" Segment } ; +// Segment = "*" | "**" | LITERAL | Variable ; +// Variable = "{" FieldPath [ "=" Segments ] "}" ; +// FieldPath = IDENT { "." IDENT } ; +// Verb = ":" LITERAL ; +// +// The syntax `*` matches a single path segment. The syntax `**` matches zero +// or more path segments, which must be the last part of the path except the +// `Verb`. The syntax `LITERAL` matches literal text in the path. +// +// The syntax `Variable` matches part of the URL path as specified by its +// template. A variable template must not contain other variables. If a variable +// matches a single path segment, its template may be omitted, e.g. `{var}` +// is equivalent to `{var=*}`. +// +// If a variable contains exactly one path segment, such as `"{var}"` or +// `"{var=*}"`, when such a variable is expanded into a URL path, all characters +// except `[-_.~0-9a-zA-Z]` are percent-encoded. Such variables show up in the +// Discovery Document as `{var}`. +// +// If a variable contains one or more path segments, such as `"{var=foo/*}"` +// or `"{var=**}"`, when such a variable is expanded into a URL path, all +// characters except `[-_.~/0-9a-zA-Z]` are percent-encoded. Such variables +// show up in the Discovery Document as `{+var}`. +// +// NOTE: While the single segment variable matches the semantics of +// [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 +// Simple String Expansion, the multi segment variable **does not** match +// RFC 6570 Reserved Expansion. The reason is that the Reserved Expansion +// does not expand special characters like `?` and `#`, which would lead +// to invalid URLs. +// +// NOTE: the field paths in variables and in the `body` must not refer to +// repeated fields or map fields. +type HttpRule struct { + // Selects methods to which this rule applies. + // + // Refer to [selector][google.api.DocumentationRule.selector] for syntax details. + Selector string `protobuf:"bytes,1,opt,name=selector,proto3" json:"selector,omitempty"` + // Determines the URL pattern is matched by this rules. This pattern can be + // used with any of the {get|put|post|delete|patch} methods. A custom method + // can be defined using the 'custom' field. + // + // Types that are valid to be assigned to Pattern: + // *HttpRule_Get + // *HttpRule_Put + // *HttpRule_Post + // *HttpRule_Delete + // *HttpRule_Patch + // *HttpRule_Custom + Pattern isHttpRule_Pattern `protobuf_oneof:"pattern"` + // The name of the request field whose value is mapped to the HTTP body, or + // `*` for mapping all fields not captured by the path pattern to the HTTP + // body. NOTE: the referred field must not be a repeated field and must be + // present at the top-level of request message type. + Body string `protobuf:"bytes,7,opt,name=body,proto3" json:"body,omitempty"` + // Optional. The name of the response field whose value is mapped to the HTTP + // body of response. Other response fields are ignored. When + // not set, the response message will be used as HTTP body of response. + ResponseBody string `protobuf:"bytes,12,opt,name=response_body,json=responseBody,proto3" json:"response_body,omitempty"` + // Additional HTTP bindings for the selector. Nested bindings must + // not contain an `additional_bindings` field themselves (that is, + // the nesting may only be one level deep). + AdditionalBindings []*HttpRule `protobuf:"bytes,11,rep,name=additional_bindings,json=additionalBindings,proto3" json:"additional_bindings,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HttpRule) Reset() { *m = HttpRule{} } +func (m *HttpRule) String() string { return proto.CompactTextString(m) } +func (*HttpRule) ProtoMessage() {} +func (*HttpRule) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9994be407cdcc9, []int{1} +} + +func (m *HttpRule) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_HttpRule.Unmarshal(m, b) +} +func (m *HttpRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_HttpRule.Marshal(b, m, deterministic) +} +func (m *HttpRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_HttpRule.Merge(m, src) +} +func (m *HttpRule) XXX_Size() int { + return xxx_messageInfo_HttpRule.Size(m) +} +func (m *HttpRule) XXX_DiscardUnknown() { + xxx_messageInfo_HttpRule.DiscardUnknown(m) +} + +var xxx_messageInfo_HttpRule proto.InternalMessageInfo + +func (m *HttpRule) GetSelector() string { + if m != nil { + return m.Selector + } + return "" +} + +type isHttpRule_Pattern interface { + isHttpRule_Pattern() +} + +type HttpRule_Get struct { + Get string `protobuf:"bytes,2,opt,name=get,proto3,oneof"` +} + +type HttpRule_Put struct { + Put string `protobuf:"bytes,3,opt,name=put,proto3,oneof"` +} + +type HttpRule_Post struct { + Post string `protobuf:"bytes,4,opt,name=post,proto3,oneof"` +} + +type HttpRule_Delete struct { + Delete string `protobuf:"bytes,5,opt,name=delete,proto3,oneof"` +} + +type HttpRule_Patch struct { + Patch string `protobuf:"bytes,6,opt,name=patch,proto3,oneof"` +} + +type HttpRule_Custom struct { + Custom *CustomHttpPattern `protobuf:"bytes,8,opt,name=custom,proto3,oneof"` +} + +func (*HttpRule_Get) isHttpRule_Pattern() {} + +func (*HttpRule_Put) isHttpRule_Pattern() {} + +func (*HttpRule_Post) isHttpRule_Pattern() {} + +func (*HttpRule_Delete) isHttpRule_Pattern() {} + +func (*HttpRule_Patch) isHttpRule_Pattern() {} + +func (*HttpRule_Custom) isHttpRule_Pattern() {} + +func (m *HttpRule) GetPattern() isHttpRule_Pattern { + if m != nil { + return m.Pattern + } + return nil +} + +func (m *HttpRule) GetGet() string { + if x, ok := m.GetPattern().(*HttpRule_Get); ok { + return x.Get + } + return "" +} + +func (m *HttpRule) GetPut() string { + if x, ok := m.GetPattern().(*HttpRule_Put); ok { + return x.Put + } + return "" +} + +func (m *HttpRule) GetPost() string { + if x, ok := m.GetPattern().(*HttpRule_Post); ok { + return x.Post + } + return "" +} + +func (m *HttpRule) GetDelete() string { + if x, ok := m.GetPattern().(*HttpRule_Delete); ok { + return x.Delete + } + return "" +} + +func (m *HttpRule) GetPatch() string { + if x, ok := m.GetPattern().(*HttpRule_Patch); ok { + return x.Patch + } + return "" +} + +func (m *HttpRule) GetCustom() *CustomHttpPattern { + if x, ok := m.GetPattern().(*HttpRule_Custom); ok { + return x.Custom + } + return nil +} + +func (m *HttpRule) GetBody() string { + if m != nil { + return m.Body + } + return "" +} + +func (m *HttpRule) GetResponseBody() string { + if m != nil { + return m.ResponseBody + } + return "" +} + +func (m *HttpRule) GetAdditionalBindings() []*HttpRule { + if m != nil { + return m.AdditionalBindings + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*HttpRule) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _HttpRule_OneofMarshaler, _HttpRule_OneofUnmarshaler, _HttpRule_OneofSizer, []interface{}{ + (*HttpRule_Get)(nil), + (*HttpRule_Put)(nil), + (*HttpRule_Post)(nil), + (*HttpRule_Delete)(nil), + (*HttpRule_Patch)(nil), + (*HttpRule_Custom)(nil), + } +} + +func _HttpRule_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*HttpRule) + // pattern + switch x := m.Pattern.(type) { + case *HttpRule_Get: + b.EncodeVarint(2<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Get) + case *HttpRule_Put: + b.EncodeVarint(3<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Put) + case *HttpRule_Post: + b.EncodeVarint(4<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Post) + case *HttpRule_Delete: + b.EncodeVarint(5<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Delete) + case *HttpRule_Patch: + b.EncodeVarint(6<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Patch) + case *HttpRule_Custom: + b.EncodeVarint(8<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Custom); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("HttpRule.Pattern has unexpected type %T", x) + } + return nil +} + +func _HttpRule_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*HttpRule) + switch tag { + case 2: // pattern.get + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Pattern = &HttpRule_Get{x} + return true, err + case 3: // pattern.put + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Pattern = &HttpRule_Put{x} + return true, err + case 4: // pattern.post + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Pattern = &HttpRule_Post{x} + return true, err + case 5: // pattern.delete + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Pattern = &HttpRule_Delete{x} + return true, err + case 6: // pattern.patch + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Pattern = &HttpRule_Patch{x} + return true, err + case 8: // pattern.custom + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(CustomHttpPattern) + err := b.DecodeMessage(msg) + m.Pattern = &HttpRule_Custom{msg} + return true, err + default: + return false, nil + } +} + +func _HttpRule_OneofSizer(msg proto.Message) (n int) { + m := msg.(*HttpRule) + // pattern + switch x := m.Pattern.(type) { + case *HttpRule_Get: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.Get))) + n += len(x.Get) + case *HttpRule_Put: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.Put))) + n += len(x.Put) + case *HttpRule_Post: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.Post))) + n += len(x.Post) + case *HttpRule_Delete: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.Delete))) + n += len(x.Delete) + case *HttpRule_Patch: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.Patch))) + n += len(x.Patch) + case *HttpRule_Custom: + s := proto.Size(x.Custom) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// A custom pattern is used for defining custom HTTP verb. +type CustomHttpPattern struct { + // The name of this custom HTTP verb. + Kind string `protobuf:"bytes,1,opt,name=kind,proto3" json:"kind,omitempty"` + // The path matched by this custom verb. + Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CustomHttpPattern) Reset() { *m = CustomHttpPattern{} } +func (m *CustomHttpPattern) String() string { return proto.CompactTextString(m) } +func (*CustomHttpPattern) ProtoMessage() {} +func (*CustomHttpPattern) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9994be407cdcc9, []int{2} +} + +func (m *CustomHttpPattern) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CustomHttpPattern.Unmarshal(m, b) +} +func (m *CustomHttpPattern) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CustomHttpPattern.Marshal(b, m, deterministic) +} +func (m *CustomHttpPattern) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomHttpPattern.Merge(m, src) +} +func (m *CustomHttpPattern) XXX_Size() int { + return xxx_messageInfo_CustomHttpPattern.Size(m) +} +func (m *CustomHttpPattern) XXX_DiscardUnknown() { + xxx_messageInfo_CustomHttpPattern.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomHttpPattern proto.InternalMessageInfo + +func (m *CustomHttpPattern) GetKind() string { + if m != nil { + return m.Kind + } + return "" +} + +func (m *CustomHttpPattern) GetPath() string { + if m != nil { + return m.Path + } + return "" +} + +func init() { + proto.RegisterType((*Http)(nil), "google.api.Http") + proto.RegisterType((*HttpRule)(nil), "google.api.HttpRule") + proto.RegisterType((*CustomHttpPattern)(nil), "google.api.CustomHttpPattern") +} + +func init() { proto.RegisterFile("google/api/http.proto", fileDescriptor_ff9994be407cdcc9) } + +var fileDescriptor_ff9994be407cdcc9 = []byte{ + // 419 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0xc1, 0x8e, 0xd3, 0x30, + 0x10, 0x86, 0x49, 0x9b, 0x76, 0xdb, 0xe9, 0x82, 0x84, 0x59, 0x90, 0x85, 0x40, 0x54, 0xe5, 0x52, + 0x71, 0x48, 0xa5, 0xe5, 0xc0, 0x61, 0x4f, 0x1b, 0xa8, 0x58, 0x6e, 0x55, 0x8e, 0x5c, 0x22, 0x37, + 0x1e, 0x52, 0x83, 0xd7, 0xb6, 0xe2, 0x09, 0xa2, 0xaf, 0xc3, 0x63, 0xf1, 0x24, 0x1c, 0x91, 0x9d, + 0x84, 0x56, 0x42, 0xe2, 0x36, 0xf3, 0xff, 0x9f, 0xa7, 0x7f, 0x27, 0x03, 0x4f, 0x6b, 0x6b, 0x6b, + 0x8d, 0x1b, 0xe1, 0xd4, 0xe6, 0x40, 0xe4, 0x32, 0xd7, 0x58, 0xb2, 0x0c, 0x3a, 0x39, 0x13, 0x4e, + 0xad, 0x8e, 0x90, 0xde, 0x11, 0x39, 0xf6, 0x06, 0x26, 0x4d, 0xab, 0xd1, 0xf3, 0x64, 0x39, 0x5e, + 0x2f, 0xae, 0xaf, 0xb2, 0x13, 0x93, 0x05, 0xa0, 0x68, 0x35, 0x16, 0x1d, 0xc2, 0xb6, 0xf0, 0xea, + 0x4b, 0xab, 0xf5, 0xb1, 0x94, 0x58, 0x59, 0x89, 0x65, 0x83, 0x1e, 0x9b, 0xef, 0x28, 0x4b, 0xfc, + 0xe1, 0x84, 0xf1, 0xca, 0x1a, 0x3e, 0x5a, 0x26, 0xeb, 0x59, 0xf1, 0x22, 0x62, 0x1f, 0x22, 0x55, + 0xf4, 0xd0, 0x76, 0x60, 0x56, 0xbf, 0x46, 0x30, 0x1b, 0x46, 0xb3, 0xe7, 0x30, 0xf3, 0xa8, 0xb1, + 0x22, 0xdb, 0xf0, 0x64, 0x99, 0xac, 0xe7, 0xc5, 0xdf, 0x9e, 0x31, 0x18, 0xd7, 0x48, 0x71, 0xe6, + 0xfc, 0xee, 0x41, 0x11, 0x9a, 0xa0, 0xb9, 0x96, 0xf8, 0x78, 0xd0, 0x5c, 0x4b, 0xec, 0x0a, 0x52, + 0x67, 0x3d, 0xf1, 0xb4, 0x17, 0x63, 0xc7, 0x38, 0x4c, 0x25, 0x6a, 0x24, 0xe4, 0x93, 0x5e, 0xef, + 0x7b, 0xf6, 0x0c, 0x26, 0x4e, 0x50, 0x75, 0xe0, 0xd3, 0xde, 0xe8, 0x5a, 0xf6, 0x0e, 0xa6, 0x55, + 0xeb, 0xc9, 0xde, 0xf3, 0xd9, 0x32, 0x59, 0x2f, 0xae, 0x5f, 0x9e, 0x2f, 0xe3, 0x7d, 0x74, 0x42, + 0xee, 0x9d, 0x20, 0xc2, 0xc6, 0x84, 0x81, 0x1d, 0xce, 0x18, 0xa4, 0x7b, 0x2b, 0x8f, 0xfc, 0x22, + 0xfe, 0x81, 0x58, 0xb3, 0xd7, 0xf0, 0xb0, 0x41, 0xef, 0xac, 0xf1, 0x58, 0x46, 0xf3, 0x32, 0x9a, + 0x97, 0x83, 0x98, 0x07, 0x68, 0x0b, 0x4f, 0x84, 0x94, 0x8a, 0x94, 0x35, 0x42, 0x97, 0x7b, 0x65, + 0xa4, 0x32, 0xb5, 0xe7, 0x8b, 0xff, 0x7c, 0x0b, 0x76, 0x7a, 0x90, 0xf7, 0x7c, 0x3e, 0x87, 0x0b, + 0xd7, 0x85, 0x5a, 0xdd, 0xc0, 0xe3, 0x7f, 0x92, 0x86, 0x7c, 0xdf, 0x94, 0x91, 0xfd, 0x82, 0x63, + 0x1d, 0x34, 0x27, 0xe8, 0xd0, 0x6d, 0xb7, 0x88, 0x75, 0xfe, 0x15, 0x1e, 0x55, 0xf6, 0xfe, 0xec, + 0x67, 0xf3, 0x79, 0x1c, 0x13, 0xae, 0x67, 0x97, 0x7c, 0xbe, 0xed, 0x8d, 0xda, 0x6a, 0x61, 0xea, + 0xcc, 0x36, 0xf5, 0xa6, 0x46, 0x13, 0x6f, 0x6b, 0xd3, 0x59, 0xc2, 0x29, 0x1f, 0xaf, 0x4e, 0x18, + 0x63, 0x49, 0x84, 0x98, 0xfe, 0xe6, 0xac, 0xfe, 0x9d, 0x24, 0x3f, 0x47, 0xe9, 0xc7, 0xdb, 0xdd, + 0xa7, 0xfd, 0x34, 0xbe, 0x7b, 0xfb, 0x27, 0x00, 0x00, 0xff, 0xff, 0xae, 0xde, 0xa1, 0xd0, 0xac, + 0x02, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/distribution/distribution.pb.go b/vendor/google.golang.org/genproto/googleapis/api/distribution/distribution.pb.go new file mode 100644 index 000000000..ab510a8ff --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/distribution/distribution.pb.go @@ -0,0 +1,632 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/api/distribution.proto + +package distribution + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + _ "google.golang.org/genproto/googleapis/api/annotations" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Distribution contains summary statistics for a population of values and, +// optionally, a histogram representing the distribution of those values across +// a specified set of histogram buckets. +// +// The summary statistics are the count, mean, sum of the squared deviation from +// the mean, the minimum, and the maximum of the set of population of values. +// +// The histogram is based on a sequence of buckets and gives a count of values +// that fall into each bucket. The boundaries of the buckets are given either +// explicitly or by specifying parameters for a method of computing them +// (buckets of fixed width or buckets of exponentially increasing width). +// +// Although it is not forbidden, it is generally a bad idea to include +// non-finite values (infinities or NaNs) in the population of values, as this +// will render the `mean` and `sum_of_squared_deviation` fields meaningless. +type Distribution struct { + // The number of values in the population. Must be non-negative. + Count int64 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` + // The arithmetic mean of the values in the population. If `count` is zero + // then this field must be zero. + Mean float64 `protobuf:"fixed64,2,opt,name=mean,proto3" json:"mean,omitempty"` + // The sum of squared deviations from the mean of the values in the + // population. For values x_i this is: + // + // Sum[i=1..n]((x_i - mean)^2) + // + // Knuth, "The Art of Computer Programming", Vol. 2, page 323, 3rd edition + // describes Welford's method for accumulating this sum in one pass. + // + // If `count` is zero then this field must be zero. + SumOfSquaredDeviation float64 `protobuf:"fixed64,3,opt,name=sum_of_squared_deviation,json=sumOfSquaredDeviation,proto3" json:"sum_of_squared_deviation,omitempty"` + // If specified, contains the range of the population values. The field + // must not be present if the `count` is zero. + Range *Distribution_Range `protobuf:"bytes,4,opt,name=range,proto3" json:"range,omitempty"` + // Defines the histogram bucket boundaries. + BucketOptions *Distribution_BucketOptions `protobuf:"bytes,6,opt,name=bucket_options,json=bucketOptions,proto3" json:"bucket_options,omitempty"` + // If `bucket_options` is given, then the sum of the values in `bucket_counts` + // must equal the value in `count`. If `bucket_options` is not given, no + // `bucket_counts` fields may be given. + // + // Bucket counts are given in order under the numbering scheme described + // above (the underflow bucket has number 0; the finite buckets, if any, + // have numbers 1 through N-2; the overflow bucket has number N-1). + // + // The size of `bucket_counts` must be no greater than N as defined in + // `bucket_options`. + // + // Any suffix of trailing zero bucket_count fields may be omitted. + BucketCounts []int64 `protobuf:"varint,7,rep,packed,name=bucket_counts,json=bucketCounts,proto3" json:"bucket_counts,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Distribution) Reset() { *m = Distribution{} } +func (m *Distribution) String() string { return proto.CompactTextString(m) } +func (*Distribution) ProtoMessage() {} +func (*Distribution) Descriptor() ([]byte, []int) { + return fileDescriptor_0835ee0fd90bf943, []int{0} +} + +func (m *Distribution) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Distribution.Unmarshal(m, b) +} +func (m *Distribution) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Distribution.Marshal(b, m, deterministic) +} +func (m *Distribution) XXX_Merge(src proto.Message) { + xxx_messageInfo_Distribution.Merge(m, src) +} +func (m *Distribution) XXX_Size() int { + return xxx_messageInfo_Distribution.Size(m) +} +func (m *Distribution) XXX_DiscardUnknown() { + xxx_messageInfo_Distribution.DiscardUnknown(m) +} + +var xxx_messageInfo_Distribution proto.InternalMessageInfo + +func (m *Distribution) GetCount() int64 { + if m != nil { + return m.Count + } + return 0 +} + +func (m *Distribution) GetMean() float64 { + if m != nil { + return m.Mean + } + return 0 +} + +func (m *Distribution) GetSumOfSquaredDeviation() float64 { + if m != nil { + return m.SumOfSquaredDeviation + } + return 0 +} + +func (m *Distribution) GetRange() *Distribution_Range { + if m != nil { + return m.Range + } + return nil +} + +func (m *Distribution) GetBucketOptions() *Distribution_BucketOptions { + if m != nil { + return m.BucketOptions + } + return nil +} + +func (m *Distribution) GetBucketCounts() []int64 { + if m != nil { + return m.BucketCounts + } + return nil +} + +// The range of the population values. +type Distribution_Range struct { + // The minimum of the population values. + Min float64 `protobuf:"fixed64,1,opt,name=min,proto3" json:"min,omitempty"` + // The maximum of the population values. + Max float64 `protobuf:"fixed64,2,opt,name=max,proto3" json:"max,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Distribution_Range) Reset() { *m = Distribution_Range{} } +func (m *Distribution_Range) String() string { return proto.CompactTextString(m) } +func (*Distribution_Range) ProtoMessage() {} +func (*Distribution_Range) Descriptor() ([]byte, []int) { + return fileDescriptor_0835ee0fd90bf943, []int{0, 0} +} + +func (m *Distribution_Range) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Distribution_Range.Unmarshal(m, b) +} +func (m *Distribution_Range) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Distribution_Range.Marshal(b, m, deterministic) +} +func (m *Distribution_Range) XXX_Merge(src proto.Message) { + xxx_messageInfo_Distribution_Range.Merge(m, src) +} +func (m *Distribution_Range) XXX_Size() int { + return xxx_messageInfo_Distribution_Range.Size(m) +} +func (m *Distribution_Range) XXX_DiscardUnknown() { + xxx_messageInfo_Distribution_Range.DiscardUnknown(m) +} + +var xxx_messageInfo_Distribution_Range proto.InternalMessageInfo + +func (m *Distribution_Range) GetMin() float64 { + if m != nil { + return m.Min + } + return 0 +} + +func (m *Distribution_Range) GetMax() float64 { + if m != nil { + return m.Max + } + return 0 +} + +// A Distribution may optionally contain a histogram of the values in the +// population. The histogram is given in `bucket_counts` as counts of values +// that fall into one of a sequence of non-overlapping buckets. The sequence +// of buckets is described by `bucket_options`. +// +// A bucket specifies an inclusive lower bound and exclusive upper bound for +// the values that are counted for that bucket. The upper bound of a bucket +// is strictly greater than the lower bound. +// +// The sequence of N buckets for a Distribution consists of an underflow +// bucket (number 0), zero or more finite buckets (number 1 through N - 2) and +// an overflow bucket (number N - 1). The buckets are contiguous: the lower +// bound of bucket i (i > 0) is the same as the upper bound of bucket i - 1. +// The buckets span the whole range of finite values: lower bound of the +// underflow bucket is -infinity and the upper bound of the overflow bucket is +// +infinity. The finite buckets are so-called because both bounds are +// finite. +// +// `BucketOptions` describes bucket boundaries in one of three ways. Two +// describe the boundaries by giving parameters for a formula to generate +// boundaries and one gives the bucket boundaries explicitly. +// +// If `bucket_boundaries` is not given, then no `bucket_counts` may be given. +type Distribution_BucketOptions struct { + // Exactly one of these three fields must be set. + // + // Types that are valid to be assigned to Options: + // *Distribution_BucketOptions_LinearBuckets + // *Distribution_BucketOptions_ExponentialBuckets + // *Distribution_BucketOptions_ExplicitBuckets + Options isDistribution_BucketOptions_Options `protobuf_oneof:"options"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Distribution_BucketOptions) Reset() { *m = Distribution_BucketOptions{} } +func (m *Distribution_BucketOptions) String() string { return proto.CompactTextString(m) } +func (*Distribution_BucketOptions) ProtoMessage() {} +func (*Distribution_BucketOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_0835ee0fd90bf943, []int{0, 1} +} + +func (m *Distribution_BucketOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Distribution_BucketOptions.Unmarshal(m, b) +} +func (m *Distribution_BucketOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Distribution_BucketOptions.Marshal(b, m, deterministic) +} +func (m *Distribution_BucketOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_Distribution_BucketOptions.Merge(m, src) +} +func (m *Distribution_BucketOptions) XXX_Size() int { + return xxx_messageInfo_Distribution_BucketOptions.Size(m) +} +func (m *Distribution_BucketOptions) XXX_DiscardUnknown() { + xxx_messageInfo_Distribution_BucketOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_Distribution_BucketOptions proto.InternalMessageInfo + +type isDistribution_BucketOptions_Options interface { + isDistribution_BucketOptions_Options() +} + +type Distribution_BucketOptions_LinearBuckets struct { + LinearBuckets *Distribution_BucketOptions_Linear `protobuf:"bytes,1,opt,name=linear_buckets,json=linearBuckets,proto3,oneof"` +} + +type Distribution_BucketOptions_ExponentialBuckets struct { + ExponentialBuckets *Distribution_BucketOptions_Exponential `protobuf:"bytes,2,opt,name=exponential_buckets,json=exponentialBuckets,proto3,oneof"` +} + +type Distribution_BucketOptions_ExplicitBuckets struct { + ExplicitBuckets *Distribution_BucketOptions_Explicit `protobuf:"bytes,3,opt,name=explicit_buckets,json=explicitBuckets,proto3,oneof"` +} + +func (*Distribution_BucketOptions_LinearBuckets) isDistribution_BucketOptions_Options() {} + +func (*Distribution_BucketOptions_ExponentialBuckets) isDistribution_BucketOptions_Options() {} + +func (*Distribution_BucketOptions_ExplicitBuckets) isDistribution_BucketOptions_Options() {} + +func (m *Distribution_BucketOptions) GetOptions() isDistribution_BucketOptions_Options { + if m != nil { + return m.Options + } + return nil +} + +func (m *Distribution_BucketOptions) GetLinearBuckets() *Distribution_BucketOptions_Linear { + if x, ok := m.GetOptions().(*Distribution_BucketOptions_LinearBuckets); ok { + return x.LinearBuckets + } + return nil +} + +func (m *Distribution_BucketOptions) GetExponentialBuckets() *Distribution_BucketOptions_Exponential { + if x, ok := m.GetOptions().(*Distribution_BucketOptions_ExponentialBuckets); ok { + return x.ExponentialBuckets + } + return nil +} + +func (m *Distribution_BucketOptions) GetExplicitBuckets() *Distribution_BucketOptions_Explicit { + if x, ok := m.GetOptions().(*Distribution_BucketOptions_ExplicitBuckets); ok { + return x.ExplicitBuckets + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Distribution_BucketOptions) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Distribution_BucketOptions_OneofMarshaler, _Distribution_BucketOptions_OneofUnmarshaler, _Distribution_BucketOptions_OneofSizer, []interface{}{ + (*Distribution_BucketOptions_LinearBuckets)(nil), + (*Distribution_BucketOptions_ExponentialBuckets)(nil), + (*Distribution_BucketOptions_ExplicitBuckets)(nil), + } +} + +func _Distribution_BucketOptions_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Distribution_BucketOptions) + // options + switch x := m.Options.(type) { + case *Distribution_BucketOptions_LinearBuckets: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.LinearBuckets); err != nil { + return err + } + case *Distribution_BucketOptions_ExponentialBuckets: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ExponentialBuckets); err != nil { + return err + } + case *Distribution_BucketOptions_ExplicitBuckets: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ExplicitBuckets); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Distribution_BucketOptions.Options has unexpected type %T", x) + } + return nil +} + +func _Distribution_BucketOptions_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Distribution_BucketOptions) + switch tag { + case 1: // options.linear_buckets + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Distribution_BucketOptions_Linear) + err := b.DecodeMessage(msg) + m.Options = &Distribution_BucketOptions_LinearBuckets{msg} + return true, err + case 2: // options.exponential_buckets + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Distribution_BucketOptions_Exponential) + err := b.DecodeMessage(msg) + m.Options = &Distribution_BucketOptions_ExponentialBuckets{msg} + return true, err + case 3: // options.explicit_buckets + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Distribution_BucketOptions_Explicit) + err := b.DecodeMessage(msg) + m.Options = &Distribution_BucketOptions_ExplicitBuckets{msg} + return true, err + default: + return false, nil + } +} + +func _Distribution_BucketOptions_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Distribution_BucketOptions) + // options + switch x := m.Options.(type) { + case *Distribution_BucketOptions_LinearBuckets: + s := proto.Size(x.LinearBuckets) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *Distribution_BucketOptions_ExponentialBuckets: + s := proto.Size(x.ExponentialBuckets) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *Distribution_BucketOptions_ExplicitBuckets: + s := proto.Size(x.ExplicitBuckets) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Specify a sequence of buckets that all have the same width (except +// overflow and underflow). Each bucket represents a constant absolute +// uncertainty on the specific value in the bucket. +// +// Defines `num_finite_buckets + 2` (= N) buckets with these boundaries for +// bucket `i`: +// +// Upper bound (0 <= i < N-1): offset + (width * i). +// Lower bound (1 <= i < N): offset + (width * (i - 1)). +type Distribution_BucketOptions_Linear struct { + // Must be greater than 0. + NumFiniteBuckets int32 `protobuf:"varint,1,opt,name=num_finite_buckets,json=numFiniteBuckets,proto3" json:"num_finite_buckets,omitempty"` + // Must be greater than 0. + Width float64 `protobuf:"fixed64,2,opt,name=width,proto3" json:"width,omitempty"` + // Lower bound of the first bucket. + Offset float64 `protobuf:"fixed64,3,opt,name=offset,proto3" json:"offset,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Distribution_BucketOptions_Linear) Reset() { *m = Distribution_BucketOptions_Linear{} } +func (m *Distribution_BucketOptions_Linear) String() string { return proto.CompactTextString(m) } +func (*Distribution_BucketOptions_Linear) ProtoMessage() {} +func (*Distribution_BucketOptions_Linear) Descriptor() ([]byte, []int) { + return fileDescriptor_0835ee0fd90bf943, []int{0, 1, 0} +} + +func (m *Distribution_BucketOptions_Linear) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Distribution_BucketOptions_Linear.Unmarshal(m, b) +} +func (m *Distribution_BucketOptions_Linear) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Distribution_BucketOptions_Linear.Marshal(b, m, deterministic) +} +func (m *Distribution_BucketOptions_Linear) XXX_Merge(src proto.Message) { + xxx_messageInfo_Distribution_BucketOptions_Linear.Merge(m, src) +} +func (m *Distribution_BucketOptions_Linear) XXX_Size() int { + return xxx_messageInfo_Distribution_BucketOptions_Linear.Size(m) +} +func (m *Distribution_BucketOptions_Linear) XXX_DiscardUnknown() { + xxx_messageInfo_Distribution_BucketOptions_Linear.DiscardUnknown(m) +} + +var xxx_messageInfo_Distribution_BucketOptions_Linear proto.InternalMessageInfo + +func (m *Distribution_BucketOptions_Linear) GetNumFiniteBuckets() int32 { + if m != nil { + return m.NumFiniteBuckets + } + return 0 +} + +func (m *Distribution_BucketOptions_Linear) GetWidth() float64 { + if m != nil { + return m.Width + } + return 0 +} + +func (m *Distribution_BucketOptions_Linear) GetOffset() float64 { + if m != nil { + return m.Offset + } + return 0 +} + +// Specify a sequence of buckets that have a width that is proportional to +// the value of the lower bound. Each bucket represents a constant relative +// uncertainty on a specific value in the bucket. +// +// Defines `num_finite_buckets + 2` (= N) buckets with these boundaries for +// bucket i: +// +// Upper bound (0 <= i < N-1): scale * (growth_factor ^ i). +// Lower bound (1 <= i < N): scale * (growth_factor ^ (i - 1)). +type Distribution_BucketOptions_Exponential struct { + // Must be greater than 0. + NumFiniteBuckets int32 `protobuf:"varint,1,opt,name=num_finite_buckets,json=numFiniteBuckets,proto3" json:"num_finite_buckets,omitempty"` + // Must be greater than 1. + GrowthFactor float64 `protobuf:"fixed64,2,opt,name=growth_factor,json=growthFactor,proto3" json:"growth_factor,omitempty"` + // Must be greater than 0. + Scale float64 `protobuf:"fixed64,3,opt,name=scale,proto3" json:"scale,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Distribution_BucketOptions_Exponential) Reset() { + *m = Distribution_BucketOptions_Exponential{} +} +func (m *Distribution_BucketOptions_Exponential) String() string { return proto.CompactTextString(m) } +func (*Distribution_BucketOptions_Exponential) ProtoMessage() {} +func (*Distribution_BucketOptions_Exponential) Descriptor() ([]byte, []int) { + return fileDescriptor_0835ee0fd90bf943, []int{0, 1, 1} +} + +func (m *Distribution_BucketOptions_Exponential) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Distribution_BucketOptions_Exponential.Unmarshal(m, b) +} +func (m *Distribution_BucketOptions_Exponential) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Distribution_BucketOptions_Exponential.Marshal(b, m, deterministic) +} +func (m *Distribution_BucketOptions_Exponential) XXX_Merge(src proto.Message) { + xxx_messageInfo_Distribution_BucketOptions_Exponential.Merge(m, src) +} +func (m *Distribution_BucketOptions_Exponential) XXX_Size() int { + return xxx_messageInfo_Distribution_BucketOptions_Exponential.Size(m) +} +func (m *Distribution_BucketOptions_Exponential) XXX_DiscardUnknown() { + xxx_messageInfo_Distribution_BucketOptions_Exponential.DiscardUnknown(m) +} + +var xxx_messageInfo_Distribution_BucketOptions_Exponential proto.InternalMessageInfo + +func (m *Distribution_BucketOptions_Exponential) GetNumFiniteBuckets() int32 { + if m != nil { + return m.NumFiniteBuckets + } + return 0 +} + +func (m *Distribution_BucketOptions_Exponential) GetGrowthFactor() float64 { + if m != nil { + return m.GrowthFactor + } + return 0 +} + +func (m *Distribution_BucketOptions_Exponential) GetScale() float64 { + if m != nil { + return m.Scale + } + return 0 +} + +// A set of buckets with arbitrary widths. +// +// Defines `size(bounds) + 1` (= N) buckets with these boundaries for +// bucket i: +// +// Upper bound (0 <= i < N-1): bounds[i] +// Lower bound (1 <= i < N); bounds[i - 1] +// +// There must be at least one element in `bounds`. If `bounds` has only one +// element, there are no finite buckets, and that single element is the +// common boundary of the overflow and underflow buckets. +type Distribution_BucketOptions_Explicit struct { + // The values must be monotonically increasing. + Bounds []float64 `protobuf:"fixed64,1,rep,packed,name=bounds,proto3" json:"bounds,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Distribution_BucketOptions_Explicit) Reset() { *m = Distribution_BucketOptions_Explicit{} } +func (m *Distribution_BucketOptions_Explicit) String() string { return proto.CompactTextString(m) } +func (*Distribution_BucketOptions_Explicit) ProtoMessage() {} +func (*Distribution_BucketOptions_Explicit) Descriptor() ([]byte, []int) { + return fileDescriptor_0835ee0fd90bf943, []int{0, 1, 2} +} + +func (m *Distribution_BucketOptions_Explicit) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Distribution_BucketOptions_Explicit.Unmarshal(m, b) +} +func (m *Distribution_BucketOptions_Explicit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Distribution_BucketOptions_Explicit.Marshal(b, m, deterministic) +} +func (m *Distribution_BucketOptions_Explicit) XXX_Merge(src proto.Message) { + xxx_messageInfo_Distribution_BucketOptions_Explicit.Merge(m, src) +} +func (m *Distribution_BucketOptions_Explicit) XXX_Size() int { + return xxx_messageInfo_Distribution_BucketOptions_Explicit.Size(m) +} +func (m *Distribution_BucketOptions_Explicit) XXX_DiscardUnknown() { + xxx_messageInfo_Distribution_BucketOptions_Explicit.DiscardUnknown(m) +} + +var xxx_messageInfo_Distribution_BucketOptions_Explicit proto.InternalMessageInfo + +func (m *Distribution_BucketOptions_Explicit) GetBounds() []float64 { + if m != nil { + return m.Bounds + } + return nil +} + +func init() { + proto.RegisterType((*Distribution)(nil), "google.api.Distribution") + proto.RegisterType((*Distribution_Range)(nil), "google.api.Distribution.Range") + proto.RegisterType((*Distribution_BucketOptions)(nil), "google.api.Distribution.BucketOptions") + proto.RegisterType((*Distribution_BucketOptions_Linear)(nil), "google.api.Distribution.BucketOptions.Linear") + proto.RegisterType((*Distribution_BucketOptions_Exponential)(nil), "google.api.Distribution.BucketOptions.Exponential") + proto.RegisterType((*Distribution_BucketOptions_Explicit)(nil), "google.api.Distribution.BucketOptions.Explicit") +} + +func init() { proto.RegisterFile("google/api/distribution.proto", fileDescriptor_0835ee0fd90bf943) } + +var fileDescriptor_0835ee0fd90bf943 = []byte{ + // 522 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0x5d, 0x6b, 0xd4, 0x40, + 0x14, 0xdd, 0x34, 0xfb, 0xa1, 0x77, 0x3f, 0x5c, 0xc7, 0x2a, 0x21, 0xa8, 0x2c, 0x2d, 0xc8, 0x82, + 0x9a, 0x85, 0x55, 0xf0, 0xc1, 0xb7, 0x6d, 0x2d, 0xfb, 0xa0, 0xb4, 0x8c, 0xe0, 0x83, 0x08, 0x61, + 0x36, 0x99, 0xa4, 0xa3, 0xc9, 0x4c, 0xcc, 0x4c, 0xda, 0xfd, 0x01, 0xfe, 0x29, 0xff, 0x9d, 0xe4, + 0x4e, 0xb6, 0x4d, 0x11, 0x61, 0x7d, 0x9b, 0x73, 0xef, 0x99, 0x73, 0xce, 0xbd, 0x64, 0x02, 0xcf, + 0x52, 0xa5, 0xd2, 0x8c, 0x2f, 0x58, 0x21, 0x16, 0xb1, 0xd0, 0xa6, 0x14, 0x9b, 0xca, 0x08, 0x25, + 0x83, 0xa2, 0x54, 0x46, 0x11, 0xb0, 0xed, 0x80, 0x15, 0xc2, 0x7f, 0xda, 0xa2, 0x32, 0x29, 0x95, + 0x61, 0x35, 0x51, 0x5b, 0xe6, 0xd1, 0xaf, 0x01, 0x8c, 0x4e, 0x5b, 0x02, 0xe4, 0x10, 0x7a, 0x91, + 0xaa, 0xa4, 0xf1, 0x9c, 0x99, 0x33, 0x77, 0xa9, 0x05, 0x84, 0x40, 0x37, 0xe7, 0x4c, 0x7a, 0x07, + 0x33, 0x67, 0xee, 0x50, 0x3c, 0x93, 0x77, 0xe0, 0xe9, 0x2a, 0x0f, 0x55, 0x12, 0xea, 0x9f, 0x15, + 0x2b, 0x79, 0x1c, 0xc6, 0xfc, 0x4a, 0xa0, 0xba, 0xe7, 0x22, 0xef, 0xb1, 0xae, 0xf2, 0xf3, 0xe4, + 0xb3, 0xed, 0x9e, 0xee, 0x9a, 0xe4, 0x2d, 0xf4, 0x4a, 0x26, 0x53, 0xee, 0x75, 0x67, 0xce, 0x7c, + 0xb8, 0x7c, 0x1e, 0xdc, 0xa6, 0x0d, 0xda, 0x59, 0x02, 0x5a, 0xb3, 0xa8, 0x25, 0x93, 0x4f, 0x30, + 0xd9, 0x54, 0xd1, 0x0f, 0x6e, 0x42, 0x55, 0xe0, 0x04, 0x5e, 0x1f, 0xaf, 0xbf, 0xf8, 0xe7, 0xf5, + 0x15, 0xd2, 0xcf, 0x2d, 0x9b, 0x8e, 0x37, 0x6d, 0x48, 0x8e, 0xa1, 0x29, 0x84, 0x38, 0xa1, 0xf6, + 0x06, 0x33, 0x77, 0xee, 0xd2, 0x91, 0x2d, 0x9e, 0x60, 0xcd, 0x7f, 0x09, 0x3d, 0xcc, 0x40, 0xa6, + 0xe0, 0xe6, 0x42, 0xe2, 0x4e, 0x1c, 0x5a, 0x1f, 0xb1, 0xc2, 0xb6, 0xcd, 0x42, 0xea, 0xa3, 0xff, + 0xbb, 0x0b, 0xe3, 0x3b, 0x96, 0xe4, 0x0b, 0x4c, 0x32, 0x21, 0x39, 0x2b, 0x43, 0xab, 0xaa, 0x51, + 0x60, 0xb8, 0x7c, 0xbd, 0x5f, 0xe4, 0xe0, 0x23, 0x5e, 0x5e, 0x77, 0xe8, 0xd8, 0xca, 0xd8, 0xae, + 0x26, 0x1c, 0x1e, 0xf1, 0x6d, 0xa1, 0x24, 0x97, 0x46, 0xb0, 0xec, 0x46, 0xfc, 0x00, 0xc5, 0x97, + 0x7b, 0x8a, 0x7f, 0xb8, 0x55, 0x58, 0x77, 0x28, 0x69, 0x09, 0xee, 0x6c, 0xbe, 0xc1, 0x94, 0x6f, + 0x8b, 0x4c, 0x44, 0xc2, 0xdc, 0x78, 0xb8, 0xe8, 0xb1, 0xd8, 0xdf, 0x03, 0xaf, 0xaf, 0x3b, 0xf4, + 0xc1, 0x4e, 0xaa, 0x51, 0xf7, 0x63, 0xe8, 0xdb, 0xf9, 0xc8, 0x2b, 0x20, 0xb2, 0xca, 0xc3, 0x44, + 0x48, 0x61, 0xf8, 0x9d, 0x55, 0xf5, 0xe8, 0x54, 0x56, 0xf9, 0x19, 0x36, 0x76, 0xa9, 0x0e, 0xa1, + 0x77, 0x2d, 0x62, 0x73, 0xd9, 0xac, 0xde, 0x02, 0xf2, 0x04, 0xfa, 0x2a, 0x49, 0x34, 0x37, 0xcd, + 0xa7, 0xd7, 0x20, 0xff, 0x0a, 0x86, 0xad, 0x41, 0xff, 0xd3, 0xea, 0x18, 0xc6, 0x69, 0xa9, 0xae, + 0xcd, 0x65, 0x98, 0xb0, 0xc8, 0xa8, 0xb2, 0xb1, 0x1c, 0xd9, 0xe2, 0x19, 0xd6, 0xea, 0x3c, 0x3a, + 0x62, 0x19, 0x6f, 0x8c, 0x2d, 0xf0, 0x8f, 0xe0, 0xde, 0x6e, 0xf8, 0x3a, 0xdb, 0x46, 0x55, 0x32, + 0xae, 0x8d, 0xdc, 0x3a, 0x9b, 0x45, 0xab, 0xfb, 0x30, 0x68, 0x3e, 0xe5, 0xd5, 0x77, 0x98, 0x44, + 0x2a, 0x6f, 0x6d, 0x75, 0xf5, 0xb0, 0xbd, 0xd6, 0x8b, 0xfa, 0xad, 0x5e, 0x38, 0x5f, 0x4f, 0x1a, + 0x42, 0xaa, 0x32, 0x26, 0xd3, 0x40, 0x95, 0xe9, 0x22, 0xe5, 0x12, 0x5f, 0xf2, 0xc2, 0xb6, 0x58, + 0x21, 0xf4, 0x5f, 0x7f, 0x85, 0xf7, 0x6d, 0xb0, 0xe9, 0x23, 0xff, 0xcd, 0x9f, 0x00, 0x00, 0x00, + 0xff, 0xff, 0x62, 0xb4, 0xef, 0x6b, 0x44, 0x04, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/label/label.pb.go b/vendor/google.golang.org/genproto/googleapis/api/label/label.pb.go new file mode 100644 index 000000000..82a31400d --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/label/label.pb.go @@ -0,0 +1,139 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/api/label.proto + +package label + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Value types that can be used as label values. +type LabelDescriptor_ValueType int32 + +const ( + // A variable-length string. This is the default. + LabelDescriptor_STRING LabelDescriptor_ValueType = 0 + // Boolean; true or false. + LabelDescriptor_BOOL LabelDescriptor_ValueType = 1 + // A 64-bit signed integer. + LabelDescriptor_INT64 LabelDescriptor_ValueType = 2 +) + +var LabelDescriptor_ValueType_name = map[int32]string{ + 0: "STRING", + 1: "BOOL", + 2: "INT64", +} + +var LabelDescriptor_ValueType_value = map[string]int32{ + "STRING": 0, + "BOOL": 1, + "INT64": 2, +} + +func (x LabelDescriptor_ValueType) String() string { + return proto.EnumName(LabelDescriptor_ValueType_name, int32(x)) +} + +func (LabelDescriptor_ValueType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_f372a463e25ba151, []int{0, 0} +} + +// A description of a label. +type LabelDescriptor struct { + // The label key. + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // The type of data that can be assigned to the label. + ValueType LabelDescriptor_ValueType `protobuf:"varint,2,opt,name=value_type,json=valueType,proto3,enum=google.api.LabelDescriptor_ValueType" json:"value_type,omitempty"` + // A human-readable description for the label. + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LabelDescriptor) Reset() { *m = LabelDescriptor{} } +func (m *LabelDescriptor) String() string { return proto.CompactTextString(m) } +func (*LabelDescriptor) ProtoMessage() {} +func (*LabelDescriptor) Descriptor() ([]byte, []int) { + return fileDescriptor_f372a463e25ba151, []int{0} +} + +func (m *LabelDescriptor) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LabelDescriptor.Unmarshal(m, b) +} +func (m *LabelDescriptor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LabelDescriptor.Marshal(b, m, deterministic) +} +func (m *LabelDescriptor) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelDescriptor.Merge(m, src) +} +func (m *LabelDescriptor) XXX_Size() int { + return xxx_messageInfo_LabelDescriptor.Size(m) +} +func (m *LabelDescriptor) XXX_DiscardUnknown() { + xxx_messageInfo_LabelDescriptor.DiscardUnknown(m) +} + +var xxx_messageInfo_LabelDescriptor proto.InternalMessageInfo + +func (m *LabelDescriptor) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *LabelDescriptor) GetValueType() LabelDescriptor_ValueType { + if m != nil { + return m.ValueType + } + return LabelDescriptor_STRING +} + +func (m *LabelDescriptor) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func init() { + proto.RegisterEnum("google.api.LabelDescriptor_ValueType", LabelDescriptor_ValueType_name, LabelDescriptor_ValueType_value) + proto.RegisterType((*LabelDescriptor)(nil), "google.api.LabelDescriptor") +} + +func init() { proto.RegisterFile("google/api/label.proto", fileDescriptor_f372a463e25ba151) } + +var fileDescriptor_f372a463e25ba151 = []byte{ + // 252 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4b, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x4f, 0x2c, 0xc8, 0xd4, 0xcf, 0x49, 0x4c, 0x4a, 0xcd, 0xd1, 0x2b, 0x28, 0xca, + 0x2f, 0xc9, 0x17, 0xe2, 0x82, 0x88, 0xeb, 0x25, 0x16, 0x64, 0x2a, 0xed, 0x64, 0xe4, 0xe2, 0xf7, + 0x01, 0xc9, 0xb9, 0xa4, 0x16, 0x27, 0x17, 0x65, 0x16, 0x94, 0xe4, 0x17, 0x09, 0x09, 0x70, 0x31, + 0x67, 0xa7, 0x56, 0x4a, 0x30, 0x2a, 0x30, 0x6a, 0x70, 0x06, 0x81, 0x98, 0x42, 0x2e, 0x5c, 0x5c, + 0x65, 0x89, 0x39, 0xa5, 0xa9, 0xf1, 0x25, 0x95, 0x05, 0xa9, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0x7c, + 0x46, 0xaa, 0x7a, 0x08, 0x63, 0xf4, 0xd0, 0x8c, 0xd0, 0x0b, 0x03, 0xa9, 0x0e, 0xa9, 0x2c, 0x48, + 0x0d, 0xe2, 0x2c, 0x83, 0x31, 0x85, 0x14, 0xb8, 0xb8, 0x53, 0xa0, 0x4a, 0x32, 0xf3, 0xf3, 0x24, + 0x98, 0xc1, 0xe6, 0x23, 0x0b, 0x29, 0xe9, 0x70, 0x71, 0xc2, 0x75, 0x0a, 0x71, 0x71, 0xb1, 0x05, + 0x87, 0x04, 0x79, 0xfa, 0xb9, 0x0b, 0x30, 0x08, 0x71, 0x70, 0xb1, 0x38, 0xf9, 0xfb, 0xfb, 0x08, + 0x30, 0x0a, 0x71, 0x72, 0xb1, 0x7a, 0xfa, 0x85, 0x98, 0x99, 0x08, 0x30, 0x39, 0xc5, 0x73, 0xf1, + 0x25, 0xe7, 0xe7, 0x22, 0x39, 0xc3, 0x89, 0x0b, 0xec, 0x8e, 0x00, 0x90, 0x2f, 0x03, 0x18, 0xa3, + 0x4c, 0xa1, 0x32, 0xe9, 0xf9, 0x39, 0x89, 0x79, 0xe9, 0x7a, 0xf9, 0x45, 0xe9, 0xfa, 0xe9, 0xa9, + 0x79, 0xe0, 0x30, 0xd0, 0x87, 0x48, 0x25, 0x16, 0x64, 0x16, 0x23, 0x82, 0xc7, 0x1a, 0x4c, 0xfe, + 0x60, 0x64, 0x5c, 0xc4, 0xc4, 0xe2, 0xee, 0x18, 0xe0, 0x99, 0xc4, 0x06, 0x56, 0x6b, 0x0c, 0x08, + 0x00, 0x00, 0xff, 0xff, 0x57, 0x04, 0xaa, 0x1f, 0x49, 0x01, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/metric/metric.pb.go b/vendor/google.golang.org/genproto/googleapis/api/metric/metric.pb.go new file mode 100644 index 000000000..800e512cc --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/metric/metric.pb.go @@ -0,0 +1,397 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/api/metric.proto + +package metric + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + label "google.golang.org/genproto/googleapis/api/label" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// The kind of measurement. It describes how the data is reported. +type MetricDescriptor_MetricKind int32 + +const ( + // Do not use this default value. + MetricDescriptor_METRIC_KIND_UNSPECIFIED MetricDescriptor_MetricKind = 0 + // An instantaneous measurement of a value. + MetricDescriptor_GAUGE MetricDescriptor_MetricKind = 1 + // The change in a value during a time interval. + MetricDescriptor_DELTA MetricDescriptor_MetricKind = 2 + // A value accumulated over a time interval. Cumulative + // measurements in a time series should have the same start time + // and increasing end times, until an event resets the cumulative + // value to zero and sets a new start time for the following + // points. + MetricDescriptor_CUMULATIVE MetricDescriptor_MetricKind = 3 +) + +var MetricDescriptor_MetricKind_name = map[int32]string{ + 0: "METRIC_KIND_UNSPECIFIED", + 1: "GAUGE", + 2: "DELTA", + 3: "CUMULATIVE", +} + +var MetricDescriptor_MetricKind_value = map[string]int32{ + "METRIC_KIND_UNSPECIFIED": 0, + "GAUGE": 1, + "DELTA": 2, + "CUMULATIVE": 3, +} + +func (x MetricDescriptor_MetricKind) String() string { + return proto.EnumName(MetricDescriptor_MetricKind_name, int32(x)) +} + +func (MetricDescriptor_MetricKind) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_927eaac1a24f8abb, []int{0, 0} +} + +// The value type of a metric. +type MetricDescriptor_ValueType int32 + +const ( + // Do not use this default value. + MetricDescriptor_VALUE_TYPE_UNSPECIFIED MetricDescriptor_ValueType = 0 + // The value is a boolean. + // This value type can be used only if the metric kind is `GAUGE`. + MetricDescriptor_BOOL MetricDescriptor_ValueType = 1 + // The value is a signed 64-bit integer. + MetricDescriptor_INT64 MetricDescriptor_ValueType = 2 + // The value is a double precision floating point number. + MetricDescriptor_DOUBLE MetricDescriptor_ValueType = 3 + // The value is a text string. + // This value type can be used only if the metric kind is `GAUGE`. + MetricDescriptor_STRING MetricDescriptor_ValueType = 4 + // The value is a [`Distribution`][google.api.Distribution]. + MetricDescriptor_DISTRIBUTION MetricDescriptor_ValueType = 5 + // The value is money. + MetricDescriptor_MONEY MetricDescriptor_ValueType = 6 +) + +var MetricDescriptor_ValueType_name = map[int32]string{ + 0: "VALUE_TYPE_UNSPECIFIED", + 1: "BOOL", + 2: "INT64", + 3: "DOUBLE", + 4: "STRING", + 5: "DISTRIBUTION", + 6: "MONEY", +} + +var MetricDescriptor_ValueType_value = map[string]int32{ + "VALUE_TYPE_UNSPECIFIED": 0, + "BOOL": 1, + "INT64": 2, + "DOUBLE": 3, + "STRING": 4, + "DISTRIBUTION": 5, + "MONEY": 6, +} + +func (x MetricDescriptor_ValueType) String() string { + return proto.EnumName(MetricDescriptor_ValueType_name, int32(x)) +} + +func (MetricDescriptor_ValueType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_927eaac1a24f8abb, []int{0, 1} +} + +// Defines a metric type and its schema. Once a metric descriptor is created, +// deleting or altering it stops data collection and makes the metric type's +// existing data unusable. +type MetricDescriptor struct { + // The resource name of the metric descriptor. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The metric type, including its DNS name prefix. The type is not + // URL-encoded. All user-defined custom metric types have the DNS name + // `custom.googleapis.com`. Metric types should use a natural hierarchical + // grouping. For example: + // + // "custom.googleapis.com/invoice/paid/amount" + // "appengine.googleapis.com/http/server/response_latencies" + Type string `protobuf:"bytes,8,opt,name=type,proto3" json:"type,omitempty"` + // The set of labels that can be used to describe a specific + // instance of this metric type. For example, the + // `appengine.googleapis.com/http/server/response_latencies` metric + // type has a label for the HTTP response code, `response_code`, so + // you can look at latencies for successful responses or just + // for responses that failed. + Labels []*label.LabelDescriptor `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty"` + // Whether the metric records instantaneous values, changes to a value, etc. + // Some combinations of `metric_kind` and `value_type` might not be supported. + MetricKind MetricDescriptor_MetricKind `protobuf:"varint,3,opt,name=metric_kind,json=metricKind,proto3,enum=google.api.MetricDescriptor_MetricKind" json:"metric_kind,omitempty"` + // Whether the measurement is an integer, a floating-point number, etc. + // Some combinations of `metric_kind` and `value_type` might not be supported. + ValueType MetricDescriptor_ValueType `protobuf:"varint,4,opt,name=value_type,json=valueType,proto3,enum=google.api.MetricDescriptor_ValueType" json:"value_type,omitempty"` + // The unit in which the metric value is reported. It is only applicable + // if the `value_type` is `INT64`, `DOUBLE`, or `DISTRIBUTION`. The + // supported units are a subset of [The Unified Code for Units of + // Measure](http://unitsofmeasure.org/ucum.html) standard: + // + // **Basic units (UNIT)** + // + // * `bit` bit + // * `By` byte + // * `s` second + // * `min` minute + // * `h` hour + // * `d` day + // + // **Prefixes (PREFIX)** + // + // * `k` kilo (10**3) + // * `M` mega (10**6) + // * `G` giga (10**9) + // * `T` tera (10**12) + // * `P` peta (10**15) + // * `E` exa (10**18) + // * `Z` zetta (10**21) + // * `Y` yotta (10**24) + // * `m` milli (10**-3) + // * `u` micro (10**-6) + // * `n` nano (10**-9) + // * `p` pico (10**-12) + // * `f` femto (10**-15) + // * `a` atto (10**-18) + // * `z` zepto (10**-21) + // * `y` yocto (10**-24) + // * `Ki` kibi (2**10) + // * `Mi` mebi (2**20) + // * `Gi` gibi (2**30) + // * `Ti` tebi (2**40) + // + // **Grammar** + // + // The grammar also includes these connectors: + // + // * `/` division (as an infix operator, e.g. `1/s`). + // * `.` multiplication (as an infix operator, e.g. `GBy.d`) + // + // The grammar for a unit is as follows: + // + // Expression = Component { "." Component } { "/" Component } ; + // + // Component = ( [ PREFIX ] UNIT | "%" ) [ Annotation ] + // | Annotation + // | "1" + // ; + // + // Annotation = "{" NAME "}" ; + // + // Notes: + // + // * `Annotation` is just a comment if it follows a `UNIT` and is + // equivalent to `1` if it is used alone. For examples, + // `{requests}/s == 1/s`, `By{transmitted}/s == By/s`. + // * `NAME` is a sequence of non-blank printable ASCII characters not + // containing '{' or '}'. + // * `1` represents dimensionless value 1, such as in `1/s`. + // * `%` represents dimensionless value 1/100, and annotates values giving + // a percentage. + Unit string `protobuf:"bytes,5,opt,name=unit,proto3" json:"unit,omitempty"` + // A detailed description of the metric, which can be used in documentation. + Description string `protobuf:"bytes,6,opt,name=description,proto3" json:"description,omitempty"` + // A concise name for the metric, which can be displayed in user interfaces. + // Use sentence case without an ending period, for example "Request count". + // This field is optional but it is recommended to be set for any metrics + // associated with user-visible concepts, such as Quota. + DisplayName string `protobuf:"bytes,7,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MetricDescriptor) Reset() { *m = MetricDescriptor{} } +func (m *MetricDescriptor) String() string { return proto.CompactTextString(m) } +func (*MetricDescriptor) ProtoMessage() {} +func (*MetricDescriptor) Descriptor() ([]byte, []int) { + return fileDescriptor_927eaac1a24f8abb, []int{0} +} + +func (m *MetricDescriptor) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MetricDescriptor.Unmarshal(m, b) +} +func (m *MetricDescriptor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MetricDescriptor.Marshal(b, m, deterministic) +} +func (m *MetricDescriptor) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricDescriptor.Merge(m, src) +} +func (m *MetricDescriptor) XXX_Size() int { + return xxx_messageInfo_MetricDescriptor.Size(m) +} +func (m *MetricDescriptor) XXX_DiscardUnknown() { + xxx_messageInfo_MetricDescriptor.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricDescriptor proto.InternalMessageInfo + +func (m *MetricDescriptor) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *MetricDescriptor) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *MetricDescriptor) GetLabels() []*label.LabelDescriptor { + if m != nil { + return m.Labels + } + return nil +} + +func (m *MetricDescriptor) GetMetricKind() MetricDescriptor_MetricKind { + if m != nil { + return m.MetricKind + } + return MetricDescriptor_METRIC_KIND_UNSPECIFIED +} + +func (m *MetricDescriptor) GetValueType() MetricDescriptor_ValueType { + if m != nil { + return m.ValueType + } + return MetricDescriptor_VALUE_TYPE_UNSPECIFIED +} + +func (m *MetricDescriptor) GetUnit() string { + if m != nil { + return m.Unit + } + return "" +} + +func (m *MetricDescriptor) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *MetricDescriptor) GetDisplayName() string { + if m != nil { + return m.DisplayName + } + return "" +} + +// A specific metric, identified by specifying values for all of the +// labels of a [`MetricDescriptor`][google.api.MetricDescriptor]. +type Metric struct { + // An existing metric type, see [google.api.MetricDescriptor][google.api.MetricDescriptor]. + // For example, `custom.googleapis.com/invoice/paid/amount`. + Type string `protobuf:"bytes,3,opt,name=type,proto3" json:"type,omitempty"` + // The set of label values that uniquely identify this metric. All + // labels listed in the `MetricDescriptor` must be assigned values. + Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Metric) Reset() { *m = Metric{} } +func (m *Metric) String() string { return proto.CompactTextString(m) } +func (*Metric) ProtoMessage() {} +func (*Metric) Descriptor() ([]byte, []int) { + return fileDescriptor_927eaac1a24f8abb, []int{1} +} + +func (m *Metric) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Metric.Unmarshal(m, b) +} +func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Metric.Marshal(b, m, deterministic) +} +func (m *Metric) XXX_Merge(src proto.Message) { + xxx_messageInfo_Metric.Merge(m, src) +} +func (m *Metric) XXX_Size() int { + return xxx_messageInfo_Metric.Size(m) +} +func (m *Metric) XXX_DiscardUnknown() { + xxx_messageInfo_Metric.DiscardUnknown(m) +} + +var xxx_messageInfo_Metric proto.InternalMessageInfo + +func (m *Metric) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *Metric) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func init() { + proto.RegisterEnum("google.api.MetricDescriptor_MetricKind", MetricDescriptor_MetricKind_name, MetricDescriptor_MetricKind_value) + proto.RegisterEnum("google.api.MetricDescriptor_ValueType", MetricDescriptor_ValueType_name, MetricDescriptor_ValueType_value) + proto.RegisterType((*MetricDescriptor)(nil), "google.api.MetricDescriptor") + proto.RegisterType((*Metric)(nil), "google.api.Metric") + proto.RegisterMapType((map[string]string)(nil), "google.api.Metric.LabelsEntry") +} + +func init() { proto.RegisterFile("google/api/metric.proto", fileDescriptor_927eaac1a24f8abb) } + +var fileDescriptor_927eaac1a24f8abb = []byte{ + // 506 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x53, 0x4d, 0x6f, 0xda, 0x40, + 0x10, 0xad, 0x3f, 0x70, 0xc3, 0x10, 0xa1, 0xd5, 0xaa, 0x4a, 0x2c, 0x22, 0x55, 0x94, 0x43, 0xcb, + 0x09, 0xa4, 0xa4, 0x4a, 0xbf, 0x4e, 0x80, 0xb7, 0xd4, 0x8a, 0xb1, 0x91, 0x63, 0x23, 0xa5, 0x17, + 0xcb, 0x81, 0x95, 0x65, 0xc5, 0xd8, 0xae, 0x71, 0x22, 0xf9, 0x57, 0xf4, 0x17, 0xf4, 0xd2, 0x5f, + 0x5a, 0xed, 0xae, 0x03, 0x16, 0x95, 0x72, 0xe2, 0xed, 0x9b, 0x37, 0x6f, 0x67, 0x96, 0x67, 0x38, + 0x8f, 0xb2, 0x2c, 0x4a, 0xe8, 0x38, 0xcc, 0xe3, 0xf1, 0x96, 0x96, 0x45, 0xbc, 0x1e, 0xe5, 0x45, + 0x56, 0x66, 0x18, 0x44, 0x61, 0x14, 0xe6, 0x71, 0xef, 0xac, 0x21, 0x4a, 0xc2, 0x7b, 0x9a, 0x08, + 0xcd, 0xe0, 0x8f, 0x0a, 0x68, 0xc1, 0x9b, 0x0c, 0xba, 0x5b, 0x17, 0x71, 0x5e, 0x66, 0x05, 0xc6, + 0xa0, 0xa6, 0xe1, 0x96, 0xea, 0x52, 0x5f, 0x1a, 0xb6, 0x5d, 0x8e, 0x19, 0x57, 0x56, 0x39, 0xd5, + 0x4f, 0x04, 0xc7, 0x30, 0xbe, 0x02, 0x8d, 0x7b, 0xed, 0x74, 0xb9, 0xaf, 0x0c, 0x3b, 0x97, 0x17, + 0xa3, 0xc3, 0x8d, 0x23, 0x8b, 0x55, 0x0e, 0xa6, 0x6e, 0x2d, 0xc5, 0x3f, 0xa0, 0x23, 0xa6, 0x0c, + 0x1e, 0xe2, 0x74, 0xa3, 0x2b, 0x7d, 0x69, 0xd8, 0xbd, 0xfc, 0xd0, 0xec, 0x3c, 0x9e, 0xa7, 0x26, + 0x6e, 0xe2, 0x74, 0xe3, 0xc2, 0x76, 0x8f, 0x31, 0x01, 0x78, 0x0a, 0x93, 0x47, 0x1a, 0xf0, 0xc1, + 0x54, 0x6e, 0xf4, 0xfe, 0x45, 0xa3, 0x15, 0x93, 0x7b, 0x55, 0x4e, 0xdd, 0xf6, 0xd3, 0x33, 0x64, + 0x9b, 0x3d, 0xa6, 0x71, 0xa9, 0xb7, 0xc4, 0x66, 0x0c, 0xe3, 0x3e, 0x74, 0x36, 0x75, 0x5b, 0x9c, + 0xa5, 0xba, 0xc6, 0x4b, 0x4d, 0x0a, 0xbf, 0x83, 0xd3, 0x4d, 0xbc, 0xcb, 0x93, 0xb0, 0x0a, 0xf8, + 0x5b, 0xbd, 0xae, 0x25, 0x82, 0xb3, 0xc3, 0x2d, 0x1d, 0x38, 0x00, 0x87, 0xc9, 0xf1, 0x05, 0x9c, + 0x2f, 0x88, 0xe7, 0x9a, 0xb3, 0xe0, 0xc6, 0xb4, 0x8d, 0xc0, 0xb7, 0x6f, 0x97, 0x64, 0x66, 0x7e, + 0x37, 0x89, 0x81, 0x5e, 0xe1, 0x36, 0xb4, 0xe6, 0x13, 0x7f, 0x4e, 0x90, 0xc4, 0xa0, 0x41, 0x2c, + 0x6f, 0x82, 0x64, 0xdc, 0x05, 0x98, 0xf9, 0x0b, 0xdf, 0x9a, 0x78, 0xe6, 0x8a, 0x20, 0x65, 0xf0, + 0x0b, 0xda, 0xfb, 0x0d, 0x70, 0x0f, 0xce, 0x56, 0x13, 0xcb, 0x27, 0x81, 0x77, 0xb7, 0x24, 0x47, + 0x76, 0x27, 0xa0, 0x4e, 0x1d, 0xc7, 0x12, 0x6e, 0xa6, 0xed, 0x5d, 0x7f, 0x44, 0x32, 0x06, 0xd0, + 0x0c, 0xc7, 0x9f, 0x5a, 0x04, 0x29, 0x0c, 0xdf, 0x7a, 0xae, 0x69, 0xcf, 0x91, 0x8a, 0x11, 0x9c, + 0x1a, 0x26, 0x3b, 0x4d, 0x7d, 0xcf, 0x74, 0x6c, 0xd4, 0x62, 0x4d, 0x0b, 0xc7, 0x26, 0x77, 0x48, + 0x1b, 0xfc, 0x96, 0x40, 0x13, 0x4b, 0xec, 0x13, 0xa0, 0x34, 0x12, 0x70, 0x7d, 0x94, 0x80, 0xb7, + 0xff, 0x3f, 0xbf, 0x08, 0xc2, 0x8e, 0xa4, 0x65, 0x51, 0x3d, 0x87, 0xa0, 0xf7, 0x05, 0x3a, 0x0d, + 0x1a, 0x23, 0x50, 0x1e, 0x68, 0x55, 0xe7, 0x8d, 0x41, 0xfc, 0x06, 0x5a, 0xfc, 0x1f, 0xd2, 0x65, + 0xce, 0x89, 0xc3, 0x57, 0xf9, 0xb3, 0x34, 0x0d, 0xa0, 0xbb, 0xce, 0xb6, 0x8d, 0x7b, 0xa6, 0x1d, + 0x71, 0xd1, 0x92, 0x05, 0x7a, 0x29, 0xfd, 0xfc, 0x54, 0x97, 0xa2, 0x2c, 0x09, 0xd3, 0x68, 0x94, + 0x15, 0xd1, 0x38, 0xa2, 0x29, 0x8f, 0xfb, 0x58, 0x94, 0xc2, 0x3c, 0xde, 0x35, 0x3e, 0x97, 0x6f, + 0xe2, 0xe7, 0xaf, 0xac, 0xce, 0x27, 0x4b, 0xf3, 0x5e, 0xe3, 0xd2, 0xab, 0x7f, 0x01, 0x00, 0x00, + 0xff, 0xff, 0x18, 0x04, 0x05, 0x82, 0x58, 0x03, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/monitoredres/monitored_resource.pb.go b/vendor/google.golang.org/genproto/googleapis/api/monitoredres/monitored_resource.pb.go new file mode 100644 index 000000000..aede16b14 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/monitoredres/monitored_resource.pb.go @@ -0,0 +1,294 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/api/monitored_resource.proto + +package monitoredres + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + _struct "github.com/golang/protobuf/ptypes/struct" + label "google.golang.org/genproto/googleapis/api/label" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// An object that describes the schema of a [MonitoredResource][google.api.MonitoredResource] object using a +// type name and a set of labels. For example, the monitored resource +// descriptor for Google Compute Engine VM instances has a type of +// `"gce_instance"` and specifies the use of the labels `"instance_id"` and +// `"zone"` to identify particular VM instances. +// +// Different APIs can support different monitored resource types. APIs generally +// provide a `list` method that returns the monitored resource descriptors used +// by the API. +type MonitoredResourceDescriptor struct { + // Optional. The resource name of the monitored resource descriptor: + // `"projects/{project_id}/monitoredResourceDescriptors/{type}"` where + // {type} is the value of the `type` field in this object and + // {project_id} is a project ID that provides API-specific context for + // accessing the type. APIs that do not use project information can use the + // resource name format `"monitoredResourceDescriptors/{type}"`. + Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"` + // Required. The monitored resource type. For example, the type + // `"cloudsql_database"` represents databases in Google Cloud SQL. + // The maximum length of this value is 256 characters. + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // Optional. A concise name for the monitored resource type that might be + // displayed in user interfaces. It should be a Title Cased Noun Phrase, + // without any article or other determiners. For example, + // `"Google Cloud SQL Database"`. + DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // Optional. A detailed description of the monitored resource type that might + // be used in documentation. + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + // Required. A set of labels used to describe instances of this monitored + // resource type. For example, an individual Google Cloud SQL database is + // identified by values for the labels `"database_id"` and `"zone"`. + Labels []*label.LabelDescriptor `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MonitoredResourceDescriptor) Reset() { *m = MonitoredResourceDescriptor{} } +func (m *MonitoredResourceDescriptor) String() string { return proto.CompactTextString(m) } +func (*MonitoredResourceDescriptor) ProtoMessage() {} +func (*MonitoredResourceDescriptor) Descriptor() ([]byte, []int) { + return fileDescriptor_6cd8bd738b08f2bf, []int{0} +} + +func (m *MonitoredResourceDescriptor) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MonitoredResourceDescriptor.Unmarshal(m, b) +} +func (m *MonitoredResourceDescriptor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MonitoredResourceDescriptor.Marshal(b, m, deterministic) +} +func (m *MonitoredResourceDescriptor) XXX_Merge(src proto.Message) { + xxx_messageInfo_MonitoredResourceDescriptor.Merge(m, src) +} +func (m *MonitoredResourceDescriptor) XXX_Size() int { + return xxx_messageInfo_MonitoredResourceDescriptor.Size(m) +} +func (m *MonitoredResourceDescriptor) XXX_DiscardUnknown() { + xxx_messageInfo_MonitoredResourceDescriptor.DiscardUnknown(m) +} + +var xxx_messageInfo_MonitoredResourceDescriptor proto.InternalMessageInfo + +func (m *MonitoredResourceDescriptor) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *MonitoredResourceDescriptor) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *MonitoredResourceDescriptor) GetDisplayName() string { + if m != nil { + return m.DisplayName + } + return "" +} + +func (m *MonitoredResourceDescriptor) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *MonitoredResourceDescriptor) GetLabels() []*label.LabelDescriptor { + if m != nil { + return m.Labels + } + return nil +} + +// An object representing a resource that can be used for monitoring, logging, +// billing, or other purposes. Examples include virtual machine instances, +// databases, and storage devices such as disks. The `type` field identifies a +// [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor] object that describes the resource's +// schema. Information in the `labels` field identifies the actual resource and +// its attributes according to the schema. For example, a particular Compute +// Engine VM instance could be represented by the following object, because the +// [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor] for `"gce_instance"` has labels +// `"instance_id"` and `"zone"`: +// +// { "type": "gce_instance", +// "labels": { "instance_id": "12345678901234", +// "zone": "us-central1-a" }} +type MonitoredResource struct { + // Required. The monitored resource type. This field must match + // the `type` field of a [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor] object. For + // example, the type of a Compute Engine VM instance is `gce_instance`. + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // Required. Values for all of the labels listed in the associated monitored + // resource descriptor. For example, Compute Engine VM instances use the + // labels `"project_id"`, `"instance_id"`, and `"zone"`. + Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MonitoredResource) Reset() { *m = MonitoredResource{} } +func (m *MonitoredResource) String() string { return proto.CompactTextString(m) } +func (*MonitoredResource) ProtoMessage() {} +func (*MonitoredResource) Descriptor() ([]byte, []int) { + return fileDescriptor_6cd8bd738b08f2bf, []int{1} +} + +func (m *MonitoredResource) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MonitoredResource.Unmarshal(m, b) +} +func (m *MonitoredResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MonitoredResource.Marshal(b, m, deterministic) +} +func (m *MonitoredResource) XXX_Merge(src proto.Message) { + xxx_messageInfo_MonitoredResource.Merge(m, src) +} +func (m *MonitoredResource) XXX_Size() int { + return xxx_messageInfo_MonitoredResource.Size(m) +} +func (m *MonitoredResource) XXX_DiscardUnknown() { + xxx_messageInfo_MonitoredResource.DiscardUnknown(m) +} + +var xxx_messageInfo_MonitoredResource proto.InternalMessageInfo + +func (m *MonitoredResource) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *MonitoredResource) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +// Auxiliary metadata for a [MonitoredResource][google.api.MonitoredResource] object. +// [MonitoredResource][google.api.MonitoredResource] objects contain the minimum set of information to +// uniquely identify a monitored resource instance. There is some other useful +// auxiliary metadata. Google Stackdriver Monitoring & Logging uses an ingestion +// pipeline to extract metadata for cloud resources of all types , and stores +// the metadata in this message. +type MonitoredResourceMetadata struct { + // Output only. Values for predefined system metadata labels. + // System labels are a kind of metadata extracted by Google Stackdriver. + // Stackdriver determines what system labels are useful and how to obtain + // their values. Some examples: "machine_image", "vpc", "subnet_id", + // "security_group", "name", etc. + // System label values can be only strings, Boolean values, or a list of + // strings. For example: + // + // { "name": "my-test-instance", + // "security_group": ["a", "b", "c"], + // "spot_instance": false } + SystemLabels *_struct.Struct `protobuf:"bytes,1,opt,name=system_labels,json=systemLabels,proto3" json:"system_labels,omitempty"` + // Output only. A map of user-defined metadata labels. + UserLabels map[string]string `protobuf:"bytes,2,rep,name=user_labels,json=userLabels,proto3" json:"user_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MonitoredResourceMetadata) Reset() { *m = MonitoredResourceMetadata{} } +func (m *MonitoredResourceMetadata) String() string { return proto.CompactTextString(m) } +func (*MonitoredResourceMetadata) ProtoMessage() {} +func (*MonitoredResourceMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_6cd8bd738b08f2bf, []int{2} +} + +func (m *MonitoredResourceMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MonitoredResourceMetadata.Unmarshal(m, b) +} +func (m *MonitoredResourceMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MonitoredResourceMetadata.Marshal(b, m, deterministic) +} +func (m *MonitoredResourceMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_MonitoredResourceMetadata.Merge(m, src) +} +func (m *MonitoredResourceMetadata) XXX_Size() int { + return xxx_messageInfo_MonitoredResourceMetadata.Size(m) +} +func (m *MonitoredResourceMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_MonitoredResourceMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_MonitoredResourceMetadata proto.InternalMessageInfo + +func (m *MonitoredResourceMetadata) GetSystemLabels() *_struct.Struct { + if m != nil { + return m.SystemLabels + } + return nil +} + +func (m *MonitoredResourceMetadata) GetUserLabels() map[string]string { + if m != nil { + return m.UserLabels + } + return nil +} + +func init() { + proto.RegisterType((*MonitoredResourceDescriptor)(nil), "google.api.MonitoredResourceDescriptor") + proto.RegisterType((*MonitoredResource)(nil), "google.api.MonitoredResource") + proto.RegisterMapType((map[string]string)(nil), "google.api.MonitoredResource.LabelsEntry") + proto.RegisterType((*MonitoredResourceMetadata)(nil), "google.api.MonitoredResourceMetadata") + proto.RegisterMapType((map[string]string)(nil), "google.api.MonitoredResourceMetadata.UserLabelsEntry") +} + +func init() { + proto.RegisterFile("google/api/monitored_resource.proto", fileDescriptor_6cd8bd738b08f2bf) +} + +var fileDescriptor_6cd8bd738b08f2bf = []byte{ + // 415 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0x4d, 0xab, 0xd3, 0x40, + 0x14, 0x65, 0xd2, 0x0f, 0xf0, 0xa6, 0x7e, 0x0d, 0x52, 0x63, 0xea, 0xa2, 0xd6, 0x4d, 0xdd, 0x24, + 0xd0, 0x22, 0xf8, 0xb9, 0x68, 0x55, 0x44, 0xb0, 0x52, 0x22, 0xba, 0x70, 0x13, 0xa6, 0xc9, 0x18, + 0x82, 0x49, 0x26, 0xcc, 0x4c, 0x84, 0xfc, 0x1d, 0xc1, 0xdf, 0xe1, 0x5f, 0x72, 0xe9, 0x52, 0x32, + 0x33, 0x69, 0xd3, 0x97, 0xc7, 0x83, 0xb7, 0xbb, 0xf7, 0xdc, 0x73, 0xcf, 0x3d, 0x27, 0x43, 0xe0, + 0x71, 0xc2, 0x58, 0x92, 0x51, 0x9f, 0x94, 0xa9, 0x9f, 0xb3, 0x22, 0x95, 0x8c, 0xd3, 0x38, 0xe4, + 0x54, 0xb0, 0x8a, 0x47, 0xd4, 0x2b, 0x39, 0x93, 0x0c, 0x83, 0x26, 0x79, 0xa4, 0x4c, 0xdd, 0x69, + 0x67, 0x21, 0x23, 0x07, 0x9a, 0x69, 0x8e, 0xfb, 0xd0, 0xe0, 0xaa, 0x3b, 0x54, 0xdf, 0x7d, 0x21, + 0x79, 0x15, 0x49, 0x3d, 0x5d, 0xfc, 0x41, 0x30, 0xdb, 0xb5, 0xf2, 0x81, 0x51, 0x7f, 0x4b, 0x45, + 0xc4, 0xd3, 0x52, 0x32, 0x8e, 0x31, 0x0c, 0x0b, 0x92, 0x53, 0x67, 0x34, 0x47, 0xcb, 0x1b, 0x81, + 0xaa, 0x1b, 0x4c, 0xd6, 0x25, 0x75, 0x90, 0xc6, 0x9a, 0x1a, 0x3f, 0x82, 0x49, 0x9c, 0x8a, 0x32, + 0x23, 0x75, 0xa8, 0xf8, 0x96, 0x9a, 0xd9, 0x06, 0xfb, 0xd4, 0xac, 0xcd, 0xc1, 0x8e, 0x8d, 0x70, + 0xca, 0x0a, 0x67, 0x60, 0x18, 0x27, 0x08, 0xaf, 0x61, 0xac, 0x9c, 0x0b, 0x67, 0x38, 0x1f, 0x2c, + 0xed, 0xd5, 0xcc, 0x3b, 0xe5, 0xf3, 0x3e, 0x36, 0x93, 0x93, 0xb3, 0xc0, 0x50, 0x17, 0xbf, 0x11, + 0xdc, 0xed, 0x25, 0xb8, 0xd4, 0xe3, 0xe6, 0x28, 0x6f, 0x29, 0xf9, 0x27, 0x5d, 0xf9, 0x9e, 0x84, + 0x3e, 0x28, 0xde, 0x15, 0x92, 0xd7, 0xed, 0x31, 0xf7, 0x39, 0xd8, 0x1d, 0x18, 0xdf, 0x81, 0xc1, + 0x0f, 0x5a, 0x9b, 0x23, 0x4d, 0x89, 0xef, 0xc1, 0xe8, 0x27, 0xc9, 0xaa, 0xf6, 0x03, 0xe8, 0xe6, + 0x85, 0xf5, 0x0c, 0x2d, 0xfe, 0x22, 0x78, 0xd0, 0x3b, 0xb2, 0xa3, 0x92, 0xc4, 0x44, 0x12, 0xfc, + 0x0a, 0x6e, 0x8a, 0x5a, 0x48, 0x9a, 0x87, 0xc6, 0x62, 0xa3, 0x69, 0xaf, 0xee, 0xb7, 0x16, 0xdb, + 0xd7, 0xf3, 0x3e, 0xab, 0xd7, 0x0b, 0x26, 0x9a, 0xad, 0xcd, 0xe0, 0xaf, 0x60, 0x57, 0x82, 0xf2, + 0xf0, 0x2c, 0xde, 0xd3, 0x2b, 0xe3, 0xb5, 0x97, 0xbd, 0x2f, 0x82, 0xf2, 0x6e, 0x54, 0xa8, 0x8e, + 0x80, 0xfb, 0x1a, 0x6e, 0x5f, 0x18, 0x5f, 0x27, 0xf2, 0xb6, 0x86, 0x5b, 0x11, 0xcb, 0x3b, 0x36, + 0xb6, 0xd3, 0x9e, 0x8f, 0x7d, 0x13, 0x6c, 0x8f, 0xbe, 0xbd, 0x31, 0xac, 0x84, 0x65, 0xa4, 0x48, + 0x3c, 0xc6, 0x13, 0x3f, 0xa1, 0x85, 0x8a, 0xed, 0xeb, 0x11, 0x29, 0x53, 0x71, 0xfe, 0x3b, 0x70, + 0x2a, 0x5e, 0x76, 0x9b, 0x7f, 0x08, 0xfd, 0xb2, 0x86, 0xef, 0x37, 0xfb, 0x0f, 0x87, 0xb1, 0xda, + 0x5c, 0xff, 0x0f, 0x00, 0x00, 0xff, 0xff, 0x10, 0x16, 0x7c, 0xe9, 0x47, 0x03, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/devtools/cloudtrace/v2/trace.pb.go b/vendor/google.golang.org/genproto/googleapis/devtools/cloudtrace/v2/trace.pb.go new file mode 100644 index 000000000..dd2d831ef --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/devtools/cloudtrace/v2/trace.pb.go @@ -0,0 +1,1411 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/devtools/cloudtrace/v2/trace.proto + +package cloudtrace + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + timestamp "github.com/golang/protobuf/ptypes/timestamp" + wrappers "github.com/golang/protobuf/ptypes/wrappers" + _ "google.golang.org/genproto/googleapis/api/annotations" + status "google.golang.org/genproto/googleapis/rpc/status" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Indicates whether the message was sent or received. +type Span_TimeEvent_MessageEvent_Type int32 + +const ( + // Unknown event type. + Span_TimeEvent_MessageEvent_TYPE_UNSPECIFIED Span_TimeEvent_MessageEvent_Type = 0 + // Indicates a sent message. + Span_TimeEvent_MessageEvent_SENT Span_TimeEvent_MessageEvent_Type = 1 + // Indicates a received message. + Span_TimeEvent_MessageEvent_RECEIVED Span_TimeEvent_MessageEvent_Type = 2 +) + +var Span_TimeEvent_MessageEvent_Type_name = map[int32]string{ + 0: "TYPE_UNSPECIFIED", + 1: "SENT", + 2: "RECEIVED", +} + +var Span_TimeEvent_MessageEvent_Type_value = map[string]int32{ + "TYPE_UNSPECIFIED": 0, + "SENT": 1, + "RECEIVED": 2, +} + +func (x Span_TimeEvent_MessageEvent_Type) String() string { + return proto.EnumName(Span_TimeEvent_MessageEvent_Type_name, int32(x)) +} + +func (Span_TimeEvent_MessageEvent_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_29869cc16dc8ce61, []int{0, 1, 1, 0} +} + +// The relationship of the current span relative to the linked span: child, +// parent, or unspecified. +type Span_Link_Type int32 + +const ( + // The relationship of the two spans is unknown. + Span_Link_TYPE_UNSPECIFIED Span_Link_Type = 0 + // The linked span is a child of the current span. + Span_Link_CHILD_LINKED_SPAN Span_Link_Type = 1 + // The linked span is a parent of the current span. + Span_Link_PARENT_LINKED_SPAN Span_Link_Type = 2 +) + +var Span_Link_Type_name = map[int32]string{ + 0: "TYPE_UNSPECIFIED", + 1: "CHILD_LINKED_SPAN", + 2: "PARENT_LINKED_SPAN", +} + +var Span_Link_Type_value = map[string]int32{ + "TYPE_UNSPECIFIED": 0, + "CHILD_LINKED_SPAN": 1, + "PARENT_LINKED_SPAN": 2, +} + +func (x Span_Link_Type) String() string { + return proto.EnumName(Span_Link_Type_name, int32(x)) +} + +func (Span_Link_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_29869cc16dc8ce61, []int{0, 3, 0} +} + +// A span represents a single operation within a trace. Spans can be +// nested to form a trace tree. Often, a trace contains a root span +// that describes the end-to-end latency, and one or more subspans for +// its sub-operations. A trace can also contain multiple root spans, +// or none at all. Spans do not need to be contiguous—there may be +// gaps or overlaps between spans in a trace. +type Span struct { + // The resource name of the span in the following format: + // + // projects/[PROJECT_ID]/traces/[TRACE_ID]/spans/[SPAN_ID] + // + // [TRACE_ID] is a unique identifier for a trace within a project; + // it is a 32-character hexadecimal encoding of a 16-byte array. + // + // [SPAN_ID] is a unique identifier for a span within a trace; it + // is a 16-character hexadecimal encoding of an 8-byte array. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The [SPAN_ID] portion of the span's resource name. + SpanId string `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"` + // The [SPAN_ID] of this span's parent span. If this is a root span, + // then this field must be empty. + ParentSpanId string `protobuf:"bytes,3,opt,name=parent_span_id,json=parentSpanId,proto3" json:"parent_span_id,omitempty"` + // A description of the span's operation (up to 128 bytes). + // Stackdriver Trace displays the description in the + // {% dynamic print site_values.console_name %}. + // For example, the display name can be a qualified method name or a file name + // and a line number where the operation is called. A best practice is to use + // the same display name within an application and at the same call point. + // This makes it easier to correlate spans in different traces. + DisplayName *TruncatableString `protobuf:"bytes,4,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // The start time of the span. On the client side, this is the time kept by + // the local machine where the span execution starts. On the server side, this + // is the time when the server's application handler starts running. + StartTime *timestamp.Timestamp `protobuf:"bytes,5,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` + // The end time of the span. On the client side, this is the time kept by + // the local machine where the span execution ends. On the server side, this + // is the time when the server application handler stops running. + EndTime *timestamp.Timestamp `protobuf:"bytes,6,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` + // A set of attributes on the span. You can have up to 32 attributes per + // span. + Attributes *Span_Attributes `protobuf:"bytes,7,opt,name=attributes,proto3" json:"attributes,omitempty"` + // Stack trace captured at the start of the span. + StackTrace *StackTrace `protobuf:"bytes,8,opt,name=stack_trace,json=stackTrace,proto3" json:"stack_trace,omitempty"` + // A set of time events. You can have up to 32 annotations and 128 message + // events per span. + TimeEvents *Span_TimeEvents `protobuf:"bytes,9,opt,name=time_events,json=timeEvents,proto3" json:"time_events,omitempty"` + // Links associated with the span. You can have up to 128 links per Span. + Links *Span_Links `protobuf:"bytes,10,opt,name=links,proto3" json:"links,omitempty"` + // An optional final status for this span. + Status *status.Status `protobuf:"bytes,11,opt,name=status,proto3" json:"status,omitempty"` + // (Optional) Set this parameter to indicate whether this span is in + // the same process as its parent. If you do not set this parameter, + // Stackdriver Trace is unable to take advantage of this helpful + // information. + SameProcessAsParentSpan *wrappers.BoolValue `protobuf:"bytes,12,opt,name=same_process_as_parent_span,json=sameProcessAsParentSpan,proto3" json:"same_process_as_parent_span,omitempty"` + // An optional number of child spans that were generated while this span + // was active. If set, allows implementation to detect missing child spans. + ChildSpanCount *wrappers.Int32Value `protobuf:"bytes,13,opt,name=child_span_count,json=childSpanCount,proto3" json:"child_span_count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span) Reset() { *m = Span{} } +func (m *Span) String() string { return proto.CompactTextString(m) } +func (*Span) ProtoMessage() {} +func (*Span) Descriptor() ([]byte, []int) { + return fileDescriptor_29869cc16dc8ce61, []int{0} +} + +func (m *Span) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span.Unmarshal(m, b) +} +func (m *Span) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span.Marshal(b, m, deterministic) +} +func (m *Span) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span.Merge(m, src) +} +func (m *Span) XXX_Size() int { + return xxx_messageInfo_Span.Size(m) +} +func (m *Span) XXX_DiscardUnknown() { + xxx_messageInfo_Span.DiscardUnknown(m) +} + +var xxx_messageInfo_Span proto.InternalMessageInfo + +func (m *Span) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Span) GetSpanId() string { + if m != nil { + return m.SpanId + } + return "" +} + +func (m *Span) GetParentSpanId() string { + if m != nil { + return m.ParentSpanId + } + return "" +} + +func (m *Span) GetDisplayName() *TruncatableString { + if m != nil { + return m.DisplayName + } + return nil +} + +func (m *Span) GetStartTime() *timestamp.Timestamp { + if m != nil { + return m.StartTime + } + return nil +} + +func (m *Span) GetEndTime() *timestamp.Timestamp { + if m != nil { + return m.EndTime + } + return nil +} + +func (m *Span) GetAttributes() *Span_Attributes { + if m != nil { + return m.Attributes + } + return nil +} + +func (m *Span) GetStackTrace() *StackTrace { + if m != nil { + return m.StackTrace + } + return nil +} + +func (m *Span) GetTimeEvents() *Span_TimeEvents { + if m != nil { + return m.TimeEvents + } + return nil +} + +func (m *Span) GetLinks() *Span_Links { + if m != nil { + return m.Links + } + return nil +} + +func (m *Span) GetStatus() *status.Status { + if m != nil { + return m.Status + } + return nil +} + +func (m *Span) GetSameProcessAsParentSpan() *wrappers.BoolValue { + if m != nil { + return m.SameProcessAsParentSpan + } + return nil +} + +func (m *Span) GetChildSpanCount() *wrappers.Int32Value { + if m != nil { + return m.ChildSpanCount + } + return nil +} + +// A set of attributes, each in the format `[KEY]:[VALUE]`. +type Span_Attributes struct { + // The set of attributes. Each attribute's key can be up to 128 bytes + // long. The value can be a string up to 256 bytes, an integer, or the + // Boolean values `true` and `false`. For example: + // + // "/instance_id": "my-instance" + // "/http/user_agent": "" + // "/http/request_bytes": 300 + // "abc.com/myattribute": true + AttributeMap map[string]*AttributeValue `protobuf:"bytes,1,rep,name=attribute_map,json=attributeMap,proto3" json:"attribute_map,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // The number of attributes that were discarded. Attributes can be discarded + // because their keys are too long or because there are too many attributes. + // If this value is 0 then all attributes are valid. + DroppedAttributesCount int32 `protobuf:"varint,2,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span_Attributes) Reset() { *m = Span_Attributes{} } +func (m *Span_Attributes) String() string { return proto.CompactTextString(m) } +func (*Span_Attributes) ProtoMessage() {} +func (*Span_Attributes) Descriptor() ([]byte, []int) { + return fileDescriptor_29869cc16dc8ce61, []int{0, 0} +} + +func (m *Span_Attributes) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span_Attributes.Unmarshal(m, b) +} +func (m *Span_Attributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span_Attributes.Marshal(b, m, deterministic) +} +func (m *Span_Attributes) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span_Attributes.Merge(m, src) +} +func (m *Span_Attributes) XXX_Size() int { + return xxx_messageInfo_Span_Attributes.Size(m) +} +func (m *Span_Attributes) XXX_DiscardUnknown() { + xxx_messageInfo_Span_Attributes.DiscardUnknown(m) +} + +var xxx_messageInfo_Span_Attributes proto.InternalMessageInfo + +func (m *Span_Attributes) GetAttributeMap() map[string]*AttributeValue { + if m != nil { + return m.AttributeMap + } + return nil +} + +func (m *Span_Attributes) GetDroppedAttributesCount() int32 { + if m != nil { + return m.DroppedAttributesCount + } + return 0 +} + +// A time-stamped annotation or message event in the Span. +type Span_TimeEvent struct { + // The timestamp indicating the time the event occurred. + Time *timestamp.Timestamp `protobuf:"bytes,1,opt,name=time,proto3" json:"time,omitempty"` + // A `TimeEvent` can contain either an `Annotation` object or a + // `MessageEvent` object, but not both. + // + // Types that are valid to be assigned to Value: + // *Span_TimeEvent_Annotation_ + // *Span_TimeEvent_MessageEvent_ + Value isSpan_TimeEvent_Value `protobuf_oneof:"value"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span_TimeEvent) Reset() { *m = Span_TimeEvent{} } +func (m *Span_TimeEvent) String() string { return proto.CompactTextString(m) } +func (*Span_TimeEvent) ProtoMessage() {} +func (*Span_TimeEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_29869cc16dc8ce61, []int{0, 1} +} + +func (m *Span_TimeEvent) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span_TimeEvent.Unmarshal(m, b) +} +func (m *Span_TimeEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span_TimeEvent.Marshal(b, m, deterministic) +} +func (m *Span_TimeEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span_TimeEvent.Merge(m, src) +} +func (m *Span_TimeEvent) XXX_Size() int { + return xxx_messageInfo_Span_TimeEvent.Size(m) +} +func (m *Span_TimeEvent) XXX_DiscardUnknown() { + xxx_messageInfo_Span_TimeEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_Span_TimeEvent proto.InternalMessageInfo + +func (m *Span_TimeEvent) GetTime() *timestamp.Timestamp { + if m != nil { + return m.Time + } + return nil +} + +type isSpan_TimeEvent_Value interface { + isSpan_TimeEvent_Value() +} + +type Span_TimeEvent_Annotation_ struct { + Annotation *Span_TimeEvent_Annotation `protobuf:"bytes,2,opt,name=annotation,proto3,oneof"` +} + +type Span_TimeEvent_MessageEvent_ struct { + MessageEvent *Span_TimeEvent_MessageEvent `protobuf:"bytes,3,opt,name=message_event,json=messageEvent,proto3,oneof"` +} + +func (*Span_TimeEvent_Annotation_) isSpan_TimeEvent_Value() {} + +func (*Span_TimeEvent_MessageEvent_) isSpan_TimeEvent_Value() {} + +func (m *Span_TimeEvent) GetValue() isSpan_TimeEvent_Value { + if m != nil { + return m.Value + } + return nil +} + +func (m *Span_TimeEvent) GetAnnotation() *Span_TimeEvent_Annotation { + if x, ok := m.GetValue().(*Span_TimeEvent_Annotation_); ok { + return x.Annotation + } + return nil +} + +func (m *Span_TimeEvent) GetMessageEvent() *Span_TimeEvent_MessageEvent { + if x, ok := m.GetValue().(*Span_TimeEvent_MessageEvent_); ok { + return x.MessageEvent + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Span_TimeEvent) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Span_TimeEvent_OneofMarshaler, _Span_TimeEvent_OneofUnmarshaler, _Span_TimeEvent_OneofSizer, []interface{}{ + (*Span_TimeEvent_Annotation_)(nil), + (*Span_TimeEvent_MessageEvent_)(nil), + } +} + +func _Span_TimeEvent_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Span_TimeEvent) + // value + switch x := m.Value.(type) { + case *Span_TimeEvent_Annotation_: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Annotation); err != nil { + return err + } + case *Span_TimeEvent_MessageEvent_: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.MessageEvent); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Span_TimeEvent.Value has unexpected type %T", x) + } + return nil +} + +func _Span_TimeEvent_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Span_TimeEvent) + switch tag { + case 2: // value.annotation + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Span_TimeEvent_Annotation) + err := b.DecodeMessage(msg) + m.Value = &Span_TimeEvent_Annotation_{msg} + return true, err + case 3: // value.message_event + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Span_TimeEvent_MessageEvent) + err := b.DecodeMessage(msg) + m.Value = &Span_TimeEvent_MessageEvent_{msg} + return true, err + default: + return false, nil + } +} + +func _Span_TimeEvent_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Span_TimeEvent) + // value + switch x := m.Value.(type) { + case *Span_TimeEvent_Annotation_: + s := proto.Size(x.Annotation) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *Span_TimeEvent_MessageEvent_: + s := proto.Size(x.MessageEvent) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Text annotation with a set of attributes. +type Span_TimeEvent_Annotation struct { + // A user-supplied message describing the event. The maximum length for + // the description is 256 bytes. + Description *TruncatableString `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` + // A set of attributes on the annotation. You can have up to 4 attributes + // per Annotation. + Attributes *Span_Attributes `protobuf:"bytes,2,opt,name=attributes,proto3" json:"attributes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span_TimeEvent_Annotation) Reset() { *m = Span_TimeEvent_Annotation{} } +func (m *Span_TimeEvent_Annotation) String() string { return proto.CompactTextString(m) } +func (*Span_TimeEvent_Annotation) ProtoMessage() {} +func (*Span_TimeEvent_Annotation) Descriptor() ([]byte, []int) { + return fileDescriptor_29869cc16dc8ce61, []int{0, 1, 0} +} + +func (m *Span_TimeEvent_Annotation) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span_TimeEvent_Annotation.Unmarshal(m, b) +} +func (m *Span_TimeEvent_Annotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span_TimeEvent_Annotation.Marshal(b, m, deterministic) +} +func (m *Span_TimeEvent_Annotation) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span_TimeEvent_Annotation.Merge(m, src) +} +func (m *Span_TimeEvent_Annotation) XXX_Size() int { + return xxx_messageInfo_Span_TimeEvent_Annotation.Size(m) +} +func (m *Span_TimeEvent_Annotation) XXX_DiscardUnknown() { + xxx_messageInfo_Span_TimeEvent_Annotation.DiscardUnknown(m) +} + +var xxx_messageInfo_Span_TimeEvent_Annotation proto.InternalMessageInfo + +func (m *Span_TimeEvent_Annotation) GetDescription() *TruncatableString { + if m != nil { + return m.Description + } + return nil +} + +func (m *Span_TimeEvent_Annotation) GetAttributes() *Span_Attributes { + if m != nil { + return m.Attributes + } + return nil +} + +// An event describing a message sent/received between Spans. +type Span_TimeEvent_MessageEvent struct { + // Type of MessageEvent. Indicates whether the message was sent or + // received. + Type Span_TimeEvent_MessageEvent_Type `protobuf:"varint,1,opt,name=type,proto3,enum=google.devtools.cloudtrace.v2.Span_TimeEvent_MessageEvent_Type" json:"type,omitempty"` + // An identifier for the MessageEvent's message that can be used to match + // SENT and RECEIVED MessageEvents. It is recommended to be unique within + // a Span. + Id int64 `protobuf:"varint,2,opt,name=id,proto3" json:"id,omitempty"` + // The number of uncompressed bytes sent or received. + UncompressedSizeBytes int64 `protobuf:"varint,3,opt,name=uncompressed_size_bytes,json=uncompressedSizeBytes,proto3" json:"uncompressed_size_bytes,omitempty"` + // The number of compressed bytes sent or received. If missing assumed to + // be the same size as uncompressed. + CompressedSizeBytes int64 `protobuf:"varint,4,opt,name=compressed_size_bytes,json=compressedSizeBytes,proto3" json:"compressed_size_bytes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span_TimeEvent_MessageEvent) Reset() { *m = Span_TimeEvent_MessageEvent{} } +func (m *Span_TimeEvent_MessageEvent) String() string { return proto.CompactTextString(m) } +func (*Span_TimeEvent_MessageEvent) ProtoMessage() {} +func (*Span_TimeEvent_MessageEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_29869cc16dc8ce61, []int{0, 1, 1} +} + +func (m *Span_TimeEvent_MessageEvent) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span_TimeEvent_MessageEvent.Unmarshal(m, b) +} +func (m *Span_TimeEvent_MessageEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span_TimeEvent_MessageEvent.Marshal(b, m, deterministic) +} +func (m *Span_TimeEvent_MessageEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span_TimeEvent_MessageEvent.Merge(m, src) +} +func (m *Span_TimeEvent_MessageEvent) XXX_Size() int { + return xxx_messageInfo_Span_TimeEvent_MessageEvent.Size(m) +} +func (m *Span_TimeEvent_MessageEvent) XXX_DiscardUnknown() { + xxx_messageInfo_Span_TimeEvent_MessageEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_Span_TimeEvent_MessageEvent proto.InternalMessageInfo + +func (m *Span_TimeEvent_MessageEvent) GetType() Span_TimeEvent_MessageEvent_Type { + if m != nil { + return m.Type + } + return Span_TimeEvent_MessageEvent_TYPE_UNSPECIFIED +} + +func (m *Span_TimeEvent_MessageEvent) GetId() int64 { + if m != nil { + return m.Id + } + return 0 +} + +func (m *Span_TimeEvent_MessageEvent) GetUncompressedSizeBytes() int64 { + if m != nil { + return m.UncompressedSizeBytes + } + return 0 +} + +func (m *Span_TimeEvent_MessageEvent) GetCompressedSizeBytes() int64 { + if m != nil { + return m.CompressedSizeBytes + } + return 0 +} + +// A collection of `TimeEvent`s. A `TimeEvent` is a time-stamped annotation +// on the span, consisting of either user-supplied key:value pairs, or +// details of a message sent/received between Spans. +type Span_TimeEvents struct { + // A collection of `TimeEvent`s. + TimeEvent []*Span_TimeEvent `protobuf:"bytes,1,rep,name=time_event,json=timeEvent,proto3" json:"time_event,omitempty"` + // The number of dropped annotations in all the included time events. + // If the value is 0, then no annotations were dropped. + DroppedAnnotationsCount int32 `protobuf:"varint,2,opt,name=dropped_annotations_count,json=droppedAnnotationsCount,proto3" json:"dropped_annotations_count,omitempty"` + // The number of dropped message events in all the included time events. + // If the value is 0, then no message events were dropped. + DroppedMessageEventsCount int32 `protobuf:"varint,3,opt,name=dropped_message_events_count,json=droppedMessageEventsCount,proto3" json:"dropped_message_events_count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span_TimeEvents) Reset() { *m = Span_TimeEvents{} } +func (m *Span_TimeEvents) String() string { return proto.CompactTextString(m) } +func (*Span_TimeEvents) ProtoMessage() {} +func (*Span_TimeEvents) Descriptor() ([]byte, []int) { + return fileDescriptor_29869cc16dc8ce61, []int{0, 2} +} + +func (m *Span_TimeEvents) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span_TimeEvents.Unmarshal(m, b) +} +func (m *Span_TimeEvents) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span_TimeEvents.Marshal(b, m, deterministic) +} +func (m *Span_TimeEvents) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span_TimeEvents.Merge(m, src) +} +func (m *Span_TimeEvents) XXX_Size() int { + return xxx_messageInfo_Span_TimeEvents.Size(m) +} +func (m *Span_TimeEvents) XXX_DiscardUnknown() { + xxx_messageInfo_Span_TimeEvents.DiscardUnknown(m) +} + +var xxx_messageInfo_Span_TimeEvents proto.InternalMessageInfo + +func (m *Span_TimeEvents) GetTimeEvent() []*Span_TimeEvent { + if m != nil { + return m.TimeEvent + } + return nil +} + +func (m *Span_TimeEvents) GetDroppedAnnotationsCount() int32 { + if m != nil { + return m.DroppedAnnotationsCount + } + return 0 +} + +func (m *Span_TimeEvents) GetDroppedMessageEventsCount() int32 { + if m != nil { + return m.DroppedMessageEventsCount + } + return 0 +} + +// A pointer from the current span to another span in the same trace or in a +// different trace. For example, this can be used in batching operations, +// where a single batch handler processes multiple requests from different +// traces or when the handler receives a request from a different project. +type Span_Link struct { + // The [TRACE_ID] for a trace within a project. + TraceId string `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"` + // The [SPAN_ID] for a span within a trace. + SpanId string `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"` + // The relationship of the current span relative to the linked span. + Type Span_Link_Type `protobuf:"varint,3,opt,name=type,proto3,enum=google.devtools.cloudtrace.v2.Span_Link_Type" json:"type,omitempty"` + // A set of attributes on the link. You have have up to 32 attributes per + // link. + Attributes *Span_Attributes `protobuf:"bytes,4,opt,name=attributes,proto3" json:"attributes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span_Link) Reset() { *m = Span_Link{} } +func (m *Span_Link) String() string { return proto.CompactTextString(m) } +func (*Span_Link) ProtoMessage() {} +func (*Span_Link) Descriptor() ([]byte, []int) { + return fileDescriptor_29869cc16dc8ce61, []int{0, 3} +} + +func (m *Span_Link) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span_Link.Unmarshal(m, b) +} +func (m *Span_Link) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span_Link.Marshal(b, m, deterministic) +} +func (m *Span_Link) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span_Link.Merge(m, src) +} +func (m *Span_Link) XXX_Size() int { + return xxx_messageInfo_Span_Link.Size(m) +} +func (m *Span_Link) XXX_DiscardUnknown() { + xxx_messageInfo_Span_Link.DiscardUnknown(m) +} + +var xxx_messageInfo_Span_Link proto.InternalMessageInfo + +func (m *Span_Link) GetTraceId() string { + if m != nil { + return m.TraceId + } + return "" +} + +func (m *Span_Link) GetSpanId() string { + if m != nil { + return m.SpanId + } + return "" +} + +func (m *Span_Link) GetType() Span_Link_Type { + if m != nil { + return m.Type + } + return Span_Link_TYPE_UNSPECIFIED +} + +func (m *Span_Link) GetAttributes() *Span_Attributes { + if m != nil { + return m.Attributes + } + return nil +} + +// A collection of links, which are references from this span to a span +// in the same or different trace. +type Span_Links struct { + // A collection of links. + Link []*Span_Link `protobuf:"bytes,1,rep,name=link,proto3" json:"link,omitempty"` + // The number of dropped links after the maximum size was enforced. If + // this value is 0, then no links were dropped. + DroppedLinksCount int32 `protobuf:"varint,2,opt,name=dropped_links_count,json=droppedLinksCount,proto3" json:"dropped_links_count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span_Links) Reset() { *m = Span_Links{} } +func (m *Span_Links) String() string { return proto.CompactTextString(m) } +func (*Span_Links) ProtoMessage() {} +func (*Span_Links) Descriptor() ([]byte, []int) { + return fileDescriptor_29869cc16dc8ce61, []int{0, 4} +} + +func (m *Span_Links) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span_Links.Unmarshal(m, b) +} +func (m *Span_Links) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span_Links.Marshal(b, m, deterministic) +} +func (m *Span_Links) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span_Links.Merge(m, src) +} +func (m *Span_Links) XXX_Size() int { + return xxx_messageInfo_Span_Links.Size(m) +} +func (m *Span_Links) XXX_DiscardUnknown() { + xxx_messageInfo_Span_Links.DiscardUnknown(m) +} + +var xxx_messageInfo_Span_Links proto.InternalMessageInfo + +func (m *Span_Links) GetLink() []*Span_Link { + if m != nil { + return m.Link + } + return nil +} + +func (m *Span_Links) GetDroppedLinksCount() int32 { + if m != nil { + return m.DroppedLinksCount + } + return 0 +} + +// The allowed types for [VALUE] in a `[KEY]:[VALUE]` attribute. +type AttributeValue struct { + // The type of the value. + // + // Types that are valid to be assigned to Value: + // *AttributeValue_StringValue + // *AttributeValue_IntValue + // *AttributeValue_BoolValue + Value isAttributeValue_Value `protobuf_oneof:"value"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AttributeValue) Reset() { *m = AttributeValue{} } +func (m *AttributeValue) String() string { return proto.CompactTextString(m) } +func (*AttributeValue) ProtoMessage() {} +func (*AttributeValue) Descriptor() ([]byte, []int) { + return fileDescriptor_29869cc16dc8ce61, []int{1} +} + +func (m *AttributeValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AttributeValue.Unmarshal(m, b) +} +func (m *AttributeValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AttributeValue.Marshal(b, m, deterministic) +} +func (m *AttributeValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_AttributeValue.Merge(m, src) +} +func (m *AttributeValue) XXX_Size() int { + return xxx_messageInfo_AttributeValue.Size(m) +} +func (m *AttributeValue) XXX_DiscardUnknown() { + xxx_messageInfo_AttributeValue.DiscardUnknown(m) +} + +var xxx_messageInfo_AttributeValue proto.InternalMessageInfo + +type isAttributeValue_Value interface { + isAttributeValue_Value() +} + +type AttributeValue_StringValue struct { + StringValue *TruncatableString `protobuf:"bytes,1,opt,name=string_value,json=stringValue,proto3,oneof"` +} + +type AttributeValue_IntValue struct { + IntValue int64 `protobuf:"varint,2,opt,name=int_value,json=intValue,proto3,oneof"` +} + +type AttributeValue_BoolValue struct { + BoolValue bool `protobuf:"varint,3,opt,name=bool_value,json=boolValue,proto3,oneof"` +} + +func (*AttributeValue_StringValue) isAttributeValue_Value() {} + +func (*AttributeValue_IntValue) isAttributeValue_Value() {} + +func (*AttributeValue_BoolValue) isAttributeValue_Value() {} + +func (m *AttributeValue) GetValue() isAttributeValue_Value { + if m != nil { + return m.Value + } + return nil +} + +func (m *AttributeValue) GetStringValue() *TruncatableString { + if x, ok := m.GetValue().(*AttributeValue_StringValue); ok { + return x.StringValue + } + return nil +} + +func (m *AttributeValue) GetIntValue() int64 { + if x, ok := m.GetValue().(*AttributeValue_IntValue); ok { + return x.IntValue + } + return 0 +} + +func (m *AttributeValue) GetBoolValue() bool { + if x, ok := m.GetValue().(*AttributeValue_BoolValue); ok { + return x.BoolValue + } + return false +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*AttributeValue) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _AttributeValue_OneofMarshaler, _AttributeValue_OneofUnmarshaler, _AttributeValue_OneofSizer, []interface{}{ + (*AttributeValue_StringValue)(nil), + (*AttributeValue_IntValue)(nil), + (*AttributeValue_BoolValue)(nil), + } +} + +func _AttributeValue_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*AttributeValue) + // value + switch x := m.Value.(type) { + case *AttributeValue_StringValue: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.StringValue); err != nil { + return err + } + case *AttributeValue_IntValue: + b.EncodeVarint(2<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.IntValue)) + case *AttributeValue_BoolValue: + t := uint64(0) + if x.BoolValue { + t = 1 + } + b.EncodeVarint(3<<3 | proto.WireVarint) + b.EncodeVarint(t) + case nil: + default: + return fmt.Errorf("AttributeValue.Value has unexpected type %T", x) + } + return nil +} + +func _AttributeValue_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*AttributeValue) + switch tag { + case 1: // value.string_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(TruncatableString) + err := b.DecodeMessage(msg) + m.Value = &AttributeValue_StringValue{msg} + return true, err + case 2: // value.int_value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Value = &AttributeValue_IntValue{int64(x)} + return true, err + case 3: // value.bool_value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Value = &AttributeValue_BoolValue{x != 0} + return true, err + default: + return false, nil + } +} + +func _AttributeValue_OneofSizer(msg proto.Message) (n int) { + m := msg.(*AttributeValue) + // value + switch x := m.Value.(type) { + case *AttributeValue_StringValue: + s := proto.Size(x.StringValue) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *AttributeValue_IntValue: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(x.IntValue)) + case *AttributeValue_BoolValue: + n += 1 // tag and wire + n += 1 + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// A call stack appearing in a trace. +type StackTrace struct { + // Stack frames in this stack trace. A maximum of 128 frames are allowed. + StackFrames *StackTrace_StackFrames `protobuf:"bytes,1,opt,name=stack_frames,json=stackFrames,proto3" json:"stack_frames,omitempty"` + // The hash ID is used to conserve network bandwidth for duplicate + // stack traces within a single trace. + // + // Often multiple spans will have identical stack traces. + // The first occurrence of a stack trace should contain both the + // `stackFrame` content and a value in `stackTraceHashId`. + // + // Subsequent spans within the same request can refer + // to that stack trace by only setting `stackTraceHashId`. + StackTraceHashId int64 `protobuf:"varint,2,opt,name=stack_trace_hash_id,json=stackTraceHashId,proto3" json:"stack_trace_hash_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StackTrace) Reset() { *m = StackTrace{} } +func (m *StackTrace) String() string { return proto.CompactTextString(m) } +func (*StackTrace) ProtoMessage() {} +func (*StackTrace) Descriptor() ([]byte, []int) { + return fileDescriptor_29869cc16dc8ce61, []int{2} +} + +func (m *StackTrace) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StackTrace.Unmarshal(m, b) +} +func (m *StackTrace) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StackTrace.Marshal(b, m, deterministic) +} +func (m *StackTrace) XXX_Merge(src proto.Message) { + xxx_messageInfo_StackTrace.Merge(m, src) +} +func (m *StackTrace) XXX_Size() int { + return xxx_messageInfo_StackTrace.Size(m) +} +func (m *StackTrace) XXX_DiscardUnknown() { + xxx_messageInfo_StackTrace.DiscardUnknown(m) +} + +var xxx_messageInfo_StackTrace proto.InternalMessageInfo + +func (m *StackTrace) GetStackFrames() *StackTrace_StackFrames { + if m != nil { + return m.StackFrames + } + return nil +} + +func (m *StackTrace) GetStackTraceHashId() int64 { + if m != nil { + return m.StackTraceHashId + } + return 0 +} + +// Represents a single stack frame in a stack trace. +type StackTrace_StackFrame struct { + // The fully-qualified name that uniquely identifies the function or + // method that is active in this frame (up to 1024 bytes). + FunctionName *TruncatableString `protobuf:"bytes,1,opt,name=function_name,json=functionName,proto3" json:"function_name,omitempty"` + // An un-mangled function name, if `function_name` is + // [mangled](http://www.avabodh.com/cxxin/namemangling.html). The name can + // be fully-qualified (up to 1024 bytes). + OriginalFunctionName *TruncatableString `protobuf:"bytes,2,opt,name=original_function_name,json=originalFunctionName,proto3" json:"original_function_name,omitempty"` + // The name of the source file where the function call appears (up to 256 + // bytes). + FileName *TruncatableString `protobuf:"bytes,3,opt,name=file_name,json=fileName,proto3" json:"file_name,omitempty"` + // The line number in `file_name` where the function call appears. + LineNumber int64 `protobuf:"varint,4,opt,name=line_number,json=lineNumber,proto3" json:"line_number,omitempty"` + // The column number where the function call appears, if available. + // This is important in JavaScript because of its anonymous functions. + ColumnNumber int64 `protobuf:"varint,5,opt,name=column_number,json=columnNumber,proto3" json:"column_number,omitempty"` + // The binary module from where the code was loaded. + LoadModule *Module `protobuf:"bytes,6,opt,name=load_module,json=loadModule,proto3" json:"load_module,omitempty"` + // The version of the deployed source code (up to 128 bytes). + SourceVersion *TruncatableString `protobuf:"bytes,7,opt,name=source_version,json=sourceVersion,proto3" json:"source_version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StackTrace_StackFrame) Reset() { *m = StackTrace_StackFrame{} } +func (m *StackTrace_StackFrame) String() string { return proto.CompactTextString(m) } +func (*StackTrace_StackFrame) ProtoMessage() {} +func (*StackTrace_StackFrame) Descriptor() ([]byte, []int) { + return fileDescriptor_29869cc16dc8ce61, []int{2, 0} +} + +func (m *StackTrace_StackFrame) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StackTrace_StackFrame.Unmarshal(m, b) +} +func (m *StackTrace_StackFrame) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StackTrace_StackFrame.Marshal(b, m, deterministic) +} +func (m *StackTrace_StackFrame) XXX_Merge(src proto.Message) { + xxx_messageInfo_StackTrace_StackFrame.Merge(m, src) +} +func (m *StackTrace_StackFrame) XXX_Size() int { + return xxx_messageInfo_StackTrace_StackFrame.Size(m) +} +func (m *StackTrace_StackFrame) XXX_DiscardUnknown() { + xxx_messageInfo_StackTrace_StackFrame.DiscardUnknown(m) +} + +var xxx_messageInfo_StackTrace_StackFrame proto.InternalMessageInfo + +func (m *StackTrace_StackFrame) GetFunctionName() *TruncatableString { + if m != nil { + return m.FunctionName + } + return nil +} + +func (m *StackTrace_StackFrame) GetOriginalFunctionName() *TruncatableString { + if m != nil { + return m.OriginalFunctionName + } + return nil +} + +func (m *StackTrace_StackFrame) GetFileName() *TruncatableString { + if m != nil { + return m.FileName + } + return nil +} + +func (m *StackTrace_StackFrame) GetLineNumber() int64 { + if m != nil { + return m.LineNumber + } + return 0 +} + +func (m *StackTrace_StackFrame) GetColumnNumber() int64 { + if m != nil { + return m.ColumnNumber + } + return 0 +} + +func (m *StackTrace_StackFrame) GetLoadModule() *Module { + if m != nil { + return m.LoadModule + } + return nil +} + +func (m *StackTrace_StackFrame) GetSourceVersion() *TruncatableString { + if m != nil { + return m.SourceVersion + } + return nil +} + +// A collection of stack frames, which can be truncated. +type StackTrace_StackFrames struct { + // Stack frames in this call stack. + Frame []*StackTrace_StackFrame `protobuf:"bytes,1,rep,name=frame,proto3" json:"frame,omitempty"` + // The number of stack frames that were dropped because there + // were too many stack frames. + // If this value is 0, then no stack frames were dropped. + DroppedFramesCount int32 `protobuf:"varint,2,opt,name=dropped_frames_count,json=droppedFramesCount,proto3" json:"dropped_frames_count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StackTrace_StackFrames) Reset() { *m = StackTrace_StackFrames{} } +func (m *StackTrace_StackFrames) String() string { return proto.CompactTextString(m) } +func (*StackTrace_StackFrames) ProtoMessage() {} +func (*StackTrace_StackFrames) Descriptor() ([]byte, []int) { + return fileDescriptor_29869cc16dc8ce61, []int{2, 1} +} + +func (m *StackTrace_StackFrames) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StackTrace_StackFrames.Unmarshal(m, b) +} +func (m *StackTrace_StackFrames) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StackTrace_StackFrames.Marshal(b, m, deterministic) +} +func (m *StackTrace_StackFrames) XXX_Merge(src proto.Message) { + xxx_messageInfo_StackTrace_StackFrames.Merge(m, src) +} +func (m *StackTrace_StackFrames) XXX_Size() int { + return xxx_messageInfo_StackTrace_StackFrames.Size(m) +} +func (m *StackTrace_StackFrames) XXX_DiscardUnknown() { + xxx_messageInfo_StackTrace_StackFrames.DiscardUnknown(m) +} + +var xxx_messageInfo_StackTrace_StackFrames proto.InternalMessageInfo + +func (m *StackTrace_StackFrames) GetFrame() []*StackTrace_StackFrame { + if m != nil { + return m.Frame + } + return nil +} + +func (m *StackTrace_StackFrames) GetDroppedFramesCount() int32 { + if m != nil { + return m.DroppedFramesCount + } + return 0 +} + +// Binary module. +type Module struct { + // For example: main binary, kernel modules, and dynamic libraries + // such as libc.so, sharedlib.so (up to 256 bytes). + Module *TruncatableString `protobuf:"bytes,1,opt,name=module,proto3" json:"module,omitempty"` + // A unique identifier for the module, usually a hash of its + // contents (up to 128 bytes). + BuildId *TruncatableString `protobuf:"bytes,2,opt,name=build_id,json=buildId,proto3" json:"build_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Module) Reset() { *m = Module{} } +func (m *Module) String() string { return proto.CompactTextString(m) } +func (*Module) ProtoMessage() {} +func (*Module) Descriptor() ([]byte, []int) { + return fileDescriptor_29869cc16dc8ce61, []int{3} +} + +func (m *Module) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Module.Unmarshal(m, b) +} +func (m *Module) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Module.Marshal(b, m, deterministic) +} +func (m *Module) XXX_Merge(src proto.Message) { + xxx_messageInfo_Module.Merge(m, src) +} +func (m *Module) XXX_Size() int { + return xxx_messageInfo_Module.Size(m) +} +func (m *Module) XXX_DiscardUnknown() { + xxx_messageInfo_Module.DiscardUnknown(m) +} + +var xxx_messageInfo_Module proto.InternalMessageInfo + +func (m *Module) GetModule() *TruncatableString { + if m != nil { + return m.Module + } + return nil +} + +func (m *Module) GetBuildId() *TruncatableString { + if m != nil { + return m.BuildId + } + return nil +} + +// Represents a string that might be shortened to a specified length. +type TruncatableString struct { + // The shortened string. For example, if the original string is 500 + // bytes long and the limit of the string is 128 bytes, then + // `value` contains the first 128 bytes of the 500-byte string. + // + // Truncation always happens on a UTF8 character boundary. If there + // are multi-byte characters in the string, then the length of the + // shortened string might be less than the size limit. + Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + // The number of bytes removed from the original string. If this + // value is 0, then the string was not shortened. + TruncatedByteCount int32 `protobuf:"varint,2,opt,name=truncated_byte_count,json=truncatedByteCount,proto3" json:"truncated_byte_count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TruncatableString) Reset() { *m = TruncatableString{} } +func (m *TruncatableString) String() string { return proto.CompactTextString(m) } +func (*TruncatableString) ProtoMessage() {} +func (*TruncatableString) Descriptor() ([]byte, []int) { + return fileDescriptor_29869cc16dc8ce61, []int{4} +} + +func (m *TruncatableString) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TruncatableString.Unmarshal(m, b) +} +func (m *TruncatableString) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TruncatableString.Marshal(b, m, deterministic) +} +func (m *TruncatableString) XXX_Merge(src proto.Message) { + xxx_messageInfo_TruncatableString.Merge(m, src) +} +func (m *TruncatableString) XXX_Size() int { + return xxx_messageInfo_TruncatableString.Size(m) +} +func (m *TruncatableString) XXX_DiscardUnknown() { + xxx_messageInfo_TruncatableString.DiscardUnknown(m) +} + +var xxx_messageInfo_TruncatableString proto.InternalMessageInfo + +func (m *TruncatableString) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +func (m *TruncatableString) GetTruncatedByteCount() int32 { + if m != nil { + return m.TruncatedByteCount + } + return 0 +} + +func init() { + proto.RegisterEnum("google.devtools.cloudtrace.v2.Span_TimeEvent_MessageEvent_Type", Span_TimeEvent_MessageEvent_Type_name, Span_TimeEvent_MessageEvent_Type_value) + proto.RegisterEnum("google.devtools.cloudtrace.v2.Span_Link_Type", Span_Link_Type_name, Span_Link_Type_value) + proto.RegisterType((*Span)(nil), "google.devtools.cloudtrace.v2.Span") + proto.RegisterType((*Span_Attributes)(nil), "google.devtools.cloudtrace.v2.Span.Attributes") + proto.RegisterMapType((map[string]*AttributeValue)(nil), "google.devtools.cloudtrace.v2.Span.Attributes.AttributeMapEntry") + proto.RegisterType((*Span_TimeEvent)(nil), "google.devtools.cloudtrace.v2.Span.TimeEvent") + proto.RegisterType((*Span_TimeEvent_Annotation)(nil), "google.devtools.cloudtrace.v2.Span.TimeEvent.Annotation") + proto.RegisterType((*Span_TimeEvent_MessageEvent)(nil), "google.devtools.cloudtrace.v2.Span.TimeEvent.MessageEvent") + proto.RegisterType((*Span_TimeEvents)(nil), "google.devtools.cloudtrace.v2.Span.TimeEvents") + proto.RegisterType((*Span_Link)(nil), "google.devtools.cloudtrace.v2.Span.Link") + proto.RegisterType((*Span_Links)(nil), "google.devtools.cloudtrace.v2.Span.Links") + proto.RegisterType((*AttributeValue)(nil), "google.devtools.cloudtrace.v2.AttributeValue") + proto.RegisterType((*StackTrace)(nil), "google.devtools.cloudtrace.v2.StackTrace") + proto.RegisterType((*StackTrace_StackFrame)(nil), "google.devtools.cloudtrace.v2.StackTrace.StackFrame") + proto.RegisterType((*StackTrace_StackFrames)(nil), "google.devtools.cloudtrace.v2.StackTrace.StackFrames") + proto.RegisterType((*Module)(nil), "google.devtools.cloudtrace.v2.Module") + proto.RegisterType((*TruncatableString)(nil), "google.devtools.cloudtrace.v2.TruncatableString") +} + +func init() { + proto.RegisterFile("google/devtools/cloudtrace/v2/trace.proto", fileDescriptor_29869cc16dc8ce61) +} + +var fileDescriptor_29869cc16dc8ce61 = []byte{ + // 1425 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x57, 0x4b, 0x6f, 0xdb, 0xc6, + 0x16, 0x36, 0xf5, 0xd6, 0x91, 0x6c, 0xc8, 0x13, 0x3b, 0x56, 0x94, 0xe4, 0x26, 0xd7, 0xf7, 0x16, + 0x70, 0x0a, 0x98, 0x0a, 0x94, 0xa4, 0x48, 0xd3, 0x02, 0xa9, 0x1f, 0x72, 0xa4, 0xc4, 0x56, 0x05, + 0x4a, 0x71, 0xd3, 0x34, 0x00, 0x31, 0x22, 0xc7, 0x32, 0x11, 0x8a, 0x24, 0x38, 0x43, 0x17, 0xce, + 0xae, 0xeb, 0xae, 0xbb, 0x29, 0x50, 0x74, 0x59, 0x20, 0xab, 0xfc, 0x8e, 0x2e, 0xba, 0xed, 0x7f, + 0xe9, 0xaa, 0x98, 0x07, 0x49, 0x29, 0x2f, 0xdb, 0xca, 0x6e, 0x66, 0xce, 0xf9, 0x3e, 0x9e, 0x33, + 0x73, 0x5e, 0x84, 0x5b, 0x63, 0xdf, 0x1f, 0xbb, 0xa4, 0x69, 0x93, 0x13, 0xe6, 0xfb, 0x2e, 0x6d, + 0x5a, 0xae, 0x1f, 0xd9, 0x2c, 0xc4, 0x16, 0x69, 0x9e, 0xb4, 0x9a, 0x62, 0xa1, 0x07, 0xa1, 0xcf, + 0x7c, 0x74, 0x5d, 0xaa, 0xea, 0xb1, 0xaa, 0x9e, 0xaa, 0xea, 0x27, 0xad, 0xc6, 0x35, 0xc5, 0x84, + 0x03, 0xa7, 0x89, 0x3d, 0xcf, 0x67, 0x98, 0x39, 0xbe, 0x47, 0x25, 0xb8, 0x71, 0x43, 0x49, 0xc5, + 0x6e, 0x14, 0x1d, 0x35, 0x99, 0x33, 0x21, 0x94, 0xe1, 0x49, 0xa0, 0x14, 0xfe, 0xf3, 0xb6, 0xc2, + 0x8f, 0x21, 0x0e, 0x02, 0x12, 0xc6, 0x04, 0x6b, 0x4a, 0x1e, 0x06, 0x56, 0x93, 0x32, 0xcc, 0x22, + 0x25, 0x58, 0xff, 0x07, 0x41, 0x6e, 0x10, 0x60, 0x0f, 0x21, 0xc8, 0x79, 0x78, 0x42, 0xea, 0xda, + 0x4d, 0x6d, 0xa3, 0x6c, 0x88, 0x35, 0x5a, 0x83, 0x22, 0x0d, 0xb0, 0x67, 0x3a, 0x76, 0x3d, 0x23, + 0x8e, 0x0b, 0x7c, 0xdb, 0xb5, 0xd1, 0xff, 0x61, 0x29, 0xc0, 0x21, 0xf1, 0x98, 0x19, 0xcb, 0xb3, + 0x42, 0x5e, 0x95, 0xa7, 0x03, 0xa9, 0x35, 0x80, 0xaa, 0xed, 0xd0, 0xc0, 0xc5, 0xa7, 0xa6, 0xa0, + 0xce, 0xdd, 0xd4, 0x36, 0x2a, 0xad, 0xdb, 0xfa, 0x47, 0x6f, 0x42, 0x1f, 0x86, 0x91, 0x67, 0x61, + 0x86, 0x47, 0x2e, 0x19, 0xb0, 0xd0, 0xf1, 0xc6, 0x46, 0x45, 0xb1, 0xf4, 0xb8, 0x4d, 0x5f, 0x02, + 0x50, 0x86, 0x43, 0x66, 0xf2, 0x2b, 0xa8, 0xe7, 0x05, 0x65, 0x23, 0xa6, 0x8c, 0xdd, 0xd7, 0x87, + 0xf1, 0xfd, 0x18, 0x65, 0xa1, 0xcd, 0xf7, 0xe8, 0x1e, 0x94, 0x88, 0x67, 0x4b, 0x60, 0xe1, 0x4c, + 0x60, 0x91, 0x78, 0xb6, 0x80, 0xf5, 0x00, 0x30, 0x63, 0xa1, 0x33, 0x8a, 0x18, 0xa1, 0xf5, 0xa2, + 0x00, 0xea, 0x67, 0x38, 0xc1, 0x6f, 0x40, 0xdf, 0x4a, 0x50, 0xc6, 0x14, 0x03, 0x7a, 0x0c, 0x15, + 0xca, 0xb0, 0xf5, 0xd2, 0x14, 0xda, 0xf5, 0x92, 0x20, 0xbc, 0x75, 0x16, 0x21, 0x47, 0x0c, 0xf9, + 0xce, 0x00, 0x9a, 0xac, 0xd1, 0xb7, 0x50, 0xe1, 0xee, 0x98, 0xe4, 0x84, 0x78, 0x8c, 0xd6, 0xcb, + 0xe7, 0x37, 0x8e, 0xbb, 0xd6, 0x16, 0x28, 0x03, 0x58, 0xb2, 0x46, 0x0f, 0x21, 0xef, 0x3a, 0xde, + 0x4b, 0x5a, 0x87, 0xf3, 0x99, 0xc5, 0xa9, 0xf6, 0x39, 0xc0, 0x90, 0x38, 0xf4, 0x39, 0x14, 0x64, + 0x80, 0xd5, 0x2b, 0x82, 0x01, 0xc5, 0x0c, 0x61, 0x60, 0x71, 0x2f, 0x58, 0x44, 0x0d, 0xa5, 0x81, + 0x9e, 0xc1, 0x55, 0x8a, 0x27, 0xc4, 0x0c, 0x42, 0xdf, 0x22, 0x94, 0x9a, 0x98, 0x9a, 0x53, 0x61, + 0x55, 0xaf, 0x7e, 0xe0, 0x8d, 0xb6, 0x7d, 0xdf, 0x3d, 0xc4, 0x6e, 0x44, 0x8c, 0x35, 0x0e, 0xef, + 0x4b, 0xf4, 0x16, 0xed, 0x27, 0xc1, 0x87, 0xda, 0x50, 0xb3, 0x8e, 0x1d, 0xd7, 0x96, 0xf1, 0x69, + 0xf9, 0x91, 0xc7, 0xea, 0x8b, 0x82, 0xee, 0xea, 0x3b, 0x74, 0x5d, 0x8f, 0xdd, 0x69, 0x49, 0xbe, + 0x25, 0x01, 0xe2, 0x0c, 0x3b, 0x1c, 0xd2, 0xf8, 0x2d, 0x03, 0x90, 0xbe, 0x22, 0x22, 0xb0, 0x98, + 0xbc, 0xa3, 0x39, 0xc1, 0x41, 0x5d, 0xbb, 0x99, 0xdd, 0xa8, 0xb4, 0xbe, 0xb9, 0x58, 0x30, 0xa4, + 0xcb, 0x03, 0x1c, 0xb4, 0x3d, 0x16, 0x9e, 0x1a, 0x55, 0x3c, 0x75, 0x84, 0xee, 0x43, 0xdd, 0x0e, + 0xfd, 0x20, 0x20, 0xb6, 0x99, 0x86, 0x8d, 0x72, 0x82, 0xe7, 0x61, 0xde, 0xb8, 0xac, 0xe4, 0x29, + 0xa9, 0xb4, 0xd7, 0x83, 0xe5, 0x77, 0xc8, 0x51, 0x0d, 0xb2, 0x2f, 0xc9, 0xa9, 0x4a, 0x6c, 0xbe, + 0x44, 0x3b, 0x90, 0x3f, 0xe1, 0xfe, 0x0a, 0xb6, 0x4a, 0x6b, 0xf3, 0x0c, 0xfb, 0x13, 0x4a, 0x79, + 0x49, 0x12, 0xfb, 0x20, 0x73, 0x5f, 0x6b, 0xfc, 0x95, 0x87, 0x72, 0x12, 0x48, 0x48, 0x87, 0x9c, + 0xc8, 0x2d, 0xed, 0xcc, 0xdc, 0x12, 0x7a, 0xe8, 0x39, 0x40, 0x5a, 0xea, 0x94, 0x2d, 0xf7, 0x2f, + 0x14, 0xbb, 0xfa, 0x56, 0x82, 0xef, 0x2c, 0x18, 0x53, 0x6c, 0x08, 0xc3, 0xe2, 0x84, 0x50, 0x8a, + 0xc7, 0x2a, 0x37, 0x44, 0x81, 0xaa, 0xb4, 0x1e, 0x5c, 0x8c, 0xfe, 0x40, 0x52, 0x88, 0x4d, 0x67, + 0xc1, 0xa8, 0x4e, 0xa6, 0xf6, 0x8d, 0x37, 0x1a, 0x40, 0xfa, 0x7d, 0x64, 0x40, 0xc5, 0x26, 0xd4, + 0x0a, 0x9d, 0x40, 0xb8, 0xa3, 0xcd, 0x5d, 0xec, 0x52, 0x92, 0xb7, 0x4a, 0x4f, 0xe6, 0x53, 0x4b, + 0x4f, 0xe3, 0x97, 0x0c, 0x54, 0xa7, 0x7d, 0x42, 0x03, 0xc8, 0xb1, 0xd3, 0x40, 0x3e, 0xd9, 0x52, + 0xeb, 0xe1, 0xfc, 0xb7, 0xa3, 0x0f, 0x4f, 0x03, 0x62, 0x08, 0x32, 0xb4, 0x04, 0x19, 0xd5, 0x31, + 0xb2, 0x46, 0xc6, 0xb1, 0xd1, 0x17, 0xb0, 0x16, 0x79, 0x96, 0x3f, 0x09, 0x42, 0x42, 0x29, 0xb1, + 0x4d, 0xea, 0xbc, 0x22, 0xe6, 0xe8, 0x94, 0xbb, 0x94, 0x15, 0x4a, 0xab, 0xd3, 0xe2, 0x81, 0xf3, + 0x8a, 0x6c, 0x73, 0x21, 0x6a, 0xc1, 0xea, 0xfb, 0x51, 0x39, 0x81, 0xba, 0xf4, 0x1e, 0xcc, 0xfa, + 0x5d, 0xc8, 0x71, 0x4b, 0xd0, 0x0a, 0xd4, 0x86, 0xdf, 0xf7, 0xdb, 0xe6, 0xd3, 0xde, 0xa0, 0xdf, + 0xde, 0xe9, 0xee, 0x75, 0xdb, 0xbb, 0xb5, 0x05, 0x54, 0x82, 0xdc, 0xa0, 0xdd, 0x1b, 0xd6, 0x34, + 0x54, 0x85, 0x92, 0xd1, 0xde, 0x69, 0x77, 0x0f, 0xdb, 0xbb, 0xb5, 0xcc, 0x76, 0x51, 0x25, 0x44, + 0xe3, 0x6f, 0x0d, 0x20, 0xad, 0x8c, 0x68, 0x1f, 0x20, 0x2d, 0xaf, 0x2a, 0xdb, 0x37, 0x2f, 0x74, + 0x49, 0x46, 0x39, 0x29, 0xae, 0xe8, 0x01, 0x5c, 0x49, 0xf2, 0x3a, 0x6d, 0xf1, 0x33, 0x89, 0xbd, + 0x16, 0x27, 0x76, 0x2a, 0x17, 0x99, 0x8d, 0x1e, 0xc2, 0xb5, 0x18, 0x3b, 0x13, 0xd7, 0x31, 0x3c, + 0x2b, 0xe0, 0x31, 0xff, 0xf4, 0xcb, 0xa8, 0xd2, 0xf0, 0x6b, 0x06, 0x72, 0xbc, 0x50, 0xa3, 0x2b, + 0x50, 0x12, 0xb6, 0xf2, 0xae, 0x2d, 0x6b, 0x42, 0x51, 0xec, 0xbb, 0xf6, 0x87, 0xfb, 0xfd, 0x96, + 0x0a, 0x93, 0xac, 0x08, 0x93, 0xcd, 0xf3, 0x36, 0x85, 0xe9, 0xa0, 0x98, 0x0d, 0xe5, 0xdc, 0xa7, + 0x86, 0xf2, 0xfa, 0x93, 0x8f, 0x3e, 0xf4, 0x2a, 0x2c, 0xef, 0x74, 0xba, 0xfb, 0xbb, 0xe6, 0x7e, + 0xb7, 0xf7, 0xa4, 0xbd, 0x6b, 0x0e, 0xfa, 0x5b, 0xbd, 0x9a, 0x86, 0x2e, 0x03, 0xea, 0x6f, 0x19, + 0xed, 0xde, 0x70, 0xe6, 0x3c, 0xd3, 0x88, 0x20, 0x2f, 0x9a, 0x18, 0xfa, 0x1a, 0x72, 0xbc, 0x8d, + 0xa9, 0xa7, 0xde, 0x38, 0xaf, 0xa3, 0x86, 0x40, 0x21, 0x1d, 0x2e, 0xc5, 0x8f, 0x24, 0x9a, 0xe1, + 0xcc, 0xd3, 0x2e, 0x2b, 0x91, 0xf8, 0x90, 0x78, 0x93, 0xf5, 0x37, 0x1a, 0x2c, 0xcd, 0x16, 0x57, + 0xf4, 0x14, 0xaa, 0x54, 0x14, 0x02, 0x53, 0x56, 0xe8, 0x39, 0xcb, 0x48, 0x67, 0xc1, 0xa8, 0x48, + 0x1e, 0x49, 0x7b, 0x1d, 0xca, 0x8e, 0xc7, 0xcc, 0xb4, 0xea, 0x67, 0x3b, 0x0b, 0x46, 0xc9, 0xf1, + 0x98, 0x14, 0xdf, 0x00, 0x18, 0xf9, 0xbe, 0xab, 0xe4, 0xfc, 0x95, 0x4b, 0x9d, 0x05, 0xa3, 0x3c, + 0x8a, 0x1b, 0x6d, 0x92, 0x20, 0xeb, 0x7f, 0x14, 0x00, 0xd2, 0x59, 0x04, 0x3d, 0xe3, 0xe6, 0xf2, + 0x59, 0xe6, 0x28, 0xc4, 0x13, 0x42, 0x95, 0xb9, 0xf7, 0xce, 0x3d, 0xcc, 0xc8, 0xe5, 0x9e, 0x00, + 0x1b, 0x72, 0x2c, 0x92, 0x1b, 0xb4, 0x09, 0x97, 0xa6, 0xa6, 0x24, 0xf3, 0x18, 0xd3, 0x63, 0x33, + 0xa9, 0x2a, 0xb5, 0x74, 0x04, 0xea, 0x60, 0x7a, 0xdc, 0xb5, 0x1b, 0x3f, 0xe5, 0x94, 0x5d, 0x02, + 0x8e, 0x9e, 0xc2, 0xe2, 0x51, 0xe4, 0x59, 0x3c, 0x81, 0xcc, 0x64, 0xac, 0x9d, 0xa7, 0x1c, 0x57, + 0x63, 0x1a, 0x31, 0x7c, 0x1e, 0xc1, 0x65, 0x3f, 0x74, 0xc6, 0x8e, 0x87, 0x5d, 0x73, 0x96, 0x3f, + 0x33, 0x27, 0xff, 0x4a, 0xcc, 0xb7, 0x37, 0xfd, 0x9d, 0x03, 0x28, 0x1f, 0x39, 0x2e, 0x91, 0xd4, + 0xd9, 0x39, 0xa9, 0x4b, 0x9c, 0x42, 0xd0, 0xdd, 0x80, 0x8a, 0xeb, 0x78, 0xc4, 0xf4, 0xa2, 0xc9, + 0x88, 0x84, 0xaa, 0x7c, 0x02, 0x3f, 0xea, 0x89, 0x13, 0xf4, 0x3f, 0x58, 0xb4, 0x7c, 0x37, 0x9a, + 0x78, 0xb1, 0x4a, 0x5e, 0xa8, 0x54, 0xe5, 0xa1, 0x52, 0xda, 0x83, 0x8a, 0xeb, 0x63, 0xdb, 0x9c, + 0xf8, 0x76, 0xe4, 0xc6, 0x13, 0xf4, 0x67, 0x67, 0x98, 0x75, 0x20, 0x94, 0x0d, 0xe0, 0x48, 0xb9, + 0x46, 0xdf, 0xc1, 0x12, 0xf5, 0xa3, 0xd0, 0x22, 0xe6, 0x09, 0x09, 0x29, 0xef, 0x95, 0xc5, 0x39, + 0x3d, 0x5c, 0x94, 0x3c, 0x87, 0x92, 0xa6, 0xf1, 0xb3, 0x06, 0x95, 0xa9, 0x78, 0x42, 0x8f, 0x21, + 0x2f, 0xc2, 0x52, 0x65, 0xf3, 0xdd, 0x79, 0xa2, 0xd2, 0x90, 0x14, 0xe8, 0x36, 0xac, 0xc4, 0xa9, + 0x2d, 0x43, 0x7d, 0x26, 0xb7, 0x91, 0x92, 0xc9, 0x0f, 0xcb, 0xe4, 0xfe, 0x5d, 0x83, 0x82, 0xf2, + 0xb8, 0x03, 0x05, 0x75, 0x69, 0xf3, 0x86, 0xa1, 0xc2, 0xa3, 0x27, 0x50, 0x1a, 0x45, 0x7c, 0xae, + 0x55, 0xa9, 0x30, 0x0f, 0x57, 0x51, 0x30, 0x74, 0xed, 0xf5, 0x1f, 0x60, 0xf9, 0x1d, 0x29, 0x5a, + 0x89, 0x67, 0x43, 0xd9, 0x1b, 0xe4, 0x86, 0xbb, 0xcf, 0xa4, 0x2a, 0xb1, 0x45, 0x13, 0x9e, 0x75, + 0x3f, 0x91, 0xf1, 0x26, 0x2c, 0xdc, 0xdf, 0x7e, 0xad, 0xc1, 0x7f, 0x2d, 0x7f, 0xf2, 0x71, 0xeb, + 0xb6, 0x41, 0xdc, 0x77, 0x9f, 0x4f, 0x88, 0x7d, 0xed, 0xf9, 0x23, 0xa5, 0x3c, 0xf6, 0x5d, 0xec, + 0x8d, 0x75, 0x3f, 0x1c, 0x37, 0xc7, 0xc4, 0x13, 0xf3, 0x63, 0x53, 0x8a, 0x70, 0xe0, 0xd0, 0x0f, + 0xfc, 0x6d, 0x7f, 0x95, 0xee, 0x5e, 0x67, 0x56, 0x1f, 0x49, 0xa6, 0x1d, 0x7e, 0xa6, 0xcb, 0x47, + 0x3d, 0x6c, 0xfd, 0x19, 0x9f, 0xbf, 0x10, 0xe7, 0x2f, 0xc4, 0xf9, 0x8b, 0xc3, 0xd6, 0xa8, 0x20, + 0xbe, 0x71, 0xe7, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x7b, 0x3c, 0x8a, 0x77, 0xd0, 0x0f, 0x00, + 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/devtools/cloudtrace/v2/tracing.pb.go b/vendor/google.golang.org/genproto/googleapis/devtools/cloudtrace/v2/tracing.pb.go new file mode 100644 index 000000000..a684ff1bc --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/devtools/cloudtrace/v2/tracing.pb.go @@ -0,0 +1,227 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/devtools/cloudtrace/v2/tracing.proto + +package cloudtrace + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + empty "github.com/golang/protobuf/ptypes/empty" + _ "github.com/golang/protobuf/ptypes/timestamp" + context "golang.org/x/net/context" + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// The request message for the `BatchWriteSpans` method. +type BatchWriteSpansRequest struct { + // Required. The name of the project where the spans belong. The format is + // `projects/[PROJECT_ID]`. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // A list of new spans. The span names must not match existing + // spans, or the results are undefined. + Spans []*Span `protobuf:"bytes,2,rep,name=spans,proto3" json:"spans,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BatchWriteSpansRequest) Reset() { *m = BatchWriteSpansRequest{} } +func (m *BatchWriteSpansRequest) String() string { return proto.CompactTextString(m) } +func (*BatchWriteSpansRequest) ProtoMessage() {} +func (*BatchWriteSpansRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_d1f9b588db05fdc6, []int{0} +} + +func (m *BatchWriteSpansRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BatchWriteSpansRequest.Unmarshal(m, b) +} +func (m *BatchWriteSpansRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BatchWriteSpansRequest.Marshal(b, m, deterministic) +} +func (m *BatchWriteSpansRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_BatchWriteSpansRequest.Merge(m, src) +} +func (m *BatchWriteSpansRequest) XXX_Size() int { + return xxx_messageInfo_BatchWriteSpansRequest.Size(m) +} +func (m *BatchWriteSpansRequest) XXX_DiscardUnknown() { + xxx_messageInfo_BatchWriteSpansRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_BatchWriteSpansRequest proto.InternalMessageInfo + +func (m *BatchWriteSpansRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *BatchWriteSpansRequest) GetSpans() []*Span { + if m != nil { + return m.Spans + } + return nil +} + +func init() { + proto.RegisterType((*BatchWriteSpansRequest)(nil), "google.devtools.cloudtrace.v2.BatchWriteSpansRequest") +} + +func init() { + proto.RegisterFile("google/devtools/cloudtrace/v2/tracing.proto", fileDescriptor_d1f9b588db05fdc6) +} + +var fileDescriptor_d1f9b588db05fdc6 = []byte{ + // 404 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x52, 0xdd, 0x6a, 0xdb, 0x30, + 0x14, 0x46, 0xde, 0x0f, 0x4c, 0x1b, 0x0c, 0x04, 0x0b, 0xc1, 0xdb, 0x58, 0xe6, 0x0d, 0x96, 0x64, + 0x43, 0x02, 0x8f, 0x5d, 0x2c, 0x63, 0x37, 0x09, 0x23, 0xb7, 0x21, 0x19, 0x19, 0x8c, 0xdc, 0x28, + 0x8e, 0xa6, 0x69, 0xd8, 0x92, 0x67, 0x29, 0x86, 0x52, 0x7a, 0xd3, 0x9b, 0x3e, 0x40, 0xfb, 0x14, + 0xa5, 0xd0, 0xf7, 0xe8, 0x6d, 0x5f, 0xa1, 0x0f, 0x52, 0x24, 0xd9, 0x0d, 0x84, 0x34, 0xc9, 0x9d, + 0xce, 0x39, 0xdf, 0xf9, 0xce, 0xf7, 0x7d, 0x36, 0xfc, 0xc8, 0x95, 0xe2, 0x29, 0x23, 0x0b, 0x56, + 0x1a, 0xa5, 0x52, 0x4d, 0x92, 0x54, 0x2d, 0x17, 0xa6, 0xa0, 0x09, 0x23, 0x65, 0x4c, 0xec, 0x43, + 0x48, 0x8e, 0xf3, 0x42, 0x19, 0x85, 0x5e, 0x7b, 0x30, 0xae, 0xc1, 0x78, 0x05, 0xc6, 0x65, 0x1c, + 0xbe, 0xaa, 0xb8, 0x68, 0x2e, 0x08, 0x95, 0x52, 0x19, 0x6a, 0x84, 0x92, 0xda, 0x2f, 0x87, 0x9d, + 0xdd, 0x97, 0x58, 0x05, 0x7d, 0x59, 0x41, 0x5d, 0x35, 0x5f, 0xfe, 0x21, 0x2c, 0xcb, 0xcd, 0x41, + 0x35, 0x7c, 0xb3, 0x3e, 0x34, 0x22, 0x63, 0xda, 0xd0, 0x2c, 0xf7, 0x80, 0x88, 0xc3, 0x46, 0x9f, + 0x9a, 0xe4, 0xef, 0xaf, 0x42, 0x18, 0x36, 0xc9, 0xa9, 0xd4, 0x63, 0xf6, 0x7f, 0xc9, 0xb4, 0x41, + 0x08, 0x3e, 0x94, 0x34, 0x63, 0x4d, 0xd0, 0x02, 0xed, 0x27, 0x63, 0xf7, 0x46, 0x5f, 0xe1, 0x23, + 0x6d, 0x31, 0xcd, 0xa0, 0xf5, 0xa0, 0xfd, 0x34, 0x7e, 0x87, 0xb7, 0x7a, 0xc4, 0x96, 0x6f, 0xec, + 0x37, 0xe2, 0xcb, 0x00, 0x3e, 0xfb, 0x69, 0x07, 0x13, 0x56, 0x94, 0x22, 0x61, 0xe8, 0x0c, 0xc0, + 0xe7, 0x6b, 0xa7, 0xd1, 0x97, 0x1d, 0x84, 0x9b, 0xa5, 0x86, 0x8d, 0x7a, 0xad, 0xb6, 0x89, 0x7f, + 0xd8, 0x0c, 0xa2, 0xf8, 0xf8, 0xfa, 0xe6, 0x34, 0xf8, 0x14, 0x7d, 0xb0, 0x99, 0x1d, 0x5a, 0x07, + 0xdf, 0xf3, 0x42, 0xfd, 0x63, 0x89, 0xd1, 0xa4, 0x7b, 0xe4, 0x53, 0xd4, 0xbd, 0xf9, 0x1d, 0x69, + 0x0f, 0x74, 0xd1, 0x09, 0x80, 0x70, 0x50, 0x30, 0xea, 0x4f, 0xa0, 0x7d, 0x2c, 0x86, 0xfb, 0x80, + 0x22, 0xe2, 0xc4, 0x74, 0xa2, 0xf7, 0x9b, 0xc4, 0x54, 0x5a, 0xac, 0x2a, 0x17, 0x57, 0x0f, 0x74, + 0xfb, 0x17, 0x00, 0xbe, 0x4d, 0x54, 0xb6, 0x9d, 0xbb, 0xef, 0x42, 0x15, 0x92, 0x8f, 0xac, 0xf5, + 0x11, 0xf8, 0x3d, 0xac, 0xe0, 0x5c, 0xa5, 0x54, 0x72, 0xac, 0x0a, 0x4e, 0x38, 0x93, 0x2e, 0x18, + 0xe2, 0x47, 0x34, 0x17, 0xfa, 0x9e, 0x1f, 0xeb, 0xdb, 0xaa, 0x3a, 0x0f, 0x5e, 0x0c, 0x3d, 0xd3, + 0xc0, 0xf6, 0xb0, 0xfb, 0x76, 0x78, 0x1a, 0x5f, 0xd5, 0xfd, 0x99, 0xeb, 0xcf, 0x5c, 0x7f, 0x36, + 0x8d, 0xe7, 0x8f, 0xdd, 0x8d, 0xcf, 0xb7, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbd, 0x94, 0x51, 0x1d, + 0x25, 0x03, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// TraceServiceClient is the client API for TraceService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type TraceServiceClient interface { + // Sends new spans to new or existing traces. You cannot update + // existing spans. + BatchWriteSpans(ctx context.Context, in *BatchWriteSpansRequest, opts ...grpc.CallOption) (*empty.Empty, error) + // Creates a new span. + CreateSpan(ctx context.Context, in *Span, opts ...grpc.CallOption) (*Span, error) +} + +type traceServiceClient struct { + cc *grpc.ClientConn +} + +func NewTraceServiceClient(cc *grpc.ClientConn) TraceServiceClient { + return &traceServiceClient{cc} +} + +func (c *traceServiceClient) BatchWriteSpans(ctx context.Context, in *BatchWriteSpansRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, "/google.devtools.cloudtrace.v2.TraceService/BatchWriteSpans", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *traceServiceClient) CreateSpan(ctx context.Context, in *Span, opts ...grpc.CallOption) (*Span, error) { + out := new(Span) + err := c.cc.Invoke(ctx, "/google.devtools.cloudtrace.v2.TraceService/CreateSpan", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// TraceServiceServer is the server API for TraceService service. +type TraceServiceServer interface { + // Sends new spans to new or existing traces. You cannot update + // existing spans. + BatchWriteSpans(context.Context, *BatchWriteSpansRequest) (*empty.Empty, error) + // Creates a new span. + CreateSpan(context.Context, *Span) (*Span, error) +} + +func RegisterTraceServiceServer(s *grpc.Server, srv TraceServiceServer) { + s.RegisterService(&_TraceService_serviceDesc, srv) +} + +func _TraceService_BatchWriteSpans_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(BatchWriteSpansRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TraceServiceServer).BatchWriteSpans(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.devtools.cloudtrace.v2.TraceService/BatchWriteSpans", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TraceServiceServer).BatchWriteSpans(ctx, req.(*BatchWriteSpansRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TraceService_CreateSpan_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Span) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TraceServiceServer).CreateSpan(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.devtools.cloudtrace.v2.TraceService/CreateSpan", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TraceServiceServer).CreateSpan(ctx, req.(*Span)) + } + return interceptor(ctx, in, info, handler) +} + +var _TraceService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.devtools.cloudtrace.v2.TraceService", + HandlerType: (*TraceServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "BatchWriteSpans", + Handler: _TraceService_BatchWriteSpans_Handler, + }, + { + MethodName: "CreateSpan", + Handler: _TraceService_CreateSpan_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/devtools/cloudtrace/v2/tracing.proto", +} diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/alert.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/alert.pb.go new file mode 100644 index 000000000..f30ec3953 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/alert.pb.go @@ -0,0 +1,966 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/monitoring/v3/alert.proto + +package monitoring + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + duration "github.com/golang/protobuf/ptypes/duration" + wrappers "github.com/golang/protobuf/ptypes/wrappers" + _ "google.golang.org/genproto/googleapis/api/annotations" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Operators for combining conditions. +type AlertPolicy_ConditionCombinerType int32 + +const ( + // An unspecified combiner. + AlertPolicy_COMBINE_UNSPECIFIED AlertPolicy_ConditionCombinerType = 0 + // Combine conditions using the logical `AND` operator. An + // incident is created only if all conditions are met + // simultaneously. This combiner is satisfied if all conditions are + // met, even if they are met on completely different resources. + AlertPolicy_AND AlertPolicy_ConditionCombinerType = 1 + // Combine conditions using the logical `OR` operator. An incident + // is created if any of the listed conditions is met. + AlertPolicy_OR AlertPolicy_ConditionCombinerType = 2 + // Combine conditions using logical `AND` operator, but unlike the regular + // `AND` option, an incident is created only if all conditions are met + // simultaneously on at least one resource. + AlertPolicy_AND_WITH_MATCHING_RESOURCE AlertPolicy_ConditionCombinerType = 3 +) + +var AlertPolicy_ConditionCombinerType_name = map[int32]string{ + 0: "COMBINE_UNSPECIFIED", + 1: "AND", + 2: "OR", + 3: "AND_WITH_MATCHING_RESOURCE", +} + +var AlertPolicy_ConditionCombinerType_value = map[string]int32{ + "COMBINE_UNSPECIFIED": 0, + "AND": 1, + "OR": 2, + "AND_WITH_MATCHING_RESOURCE": 3, +} + +func (x AlertPolicy_ConditionCombinerType) String() string { + return proto.EnumName(AlertPolicy_ConditionCombinerType_name, int32(x)) +} + +func (AlertPolicy_ConditionCombinerType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_014ef0e1a0f00a00, []int{0, 0} +} + +// A description of the conditions under which some aspect of your system is +// considered to be "unhealthy" and the ways to notify people or services about +// this state. For an overview of alert policies, see +// [Introduction to Alerting](/monitoring/alerts/). +type AlertPolicy struct { + // Required if the policy exists. The resource name for this policy. The + // syntax is: + // + // projects/[PROJECT_ID]/alertPolicies/[ALERT_POLICY_ID] + // + // `[ALERT_POLICY_ID]` is assigned by Stackdriver Monitoring when the policy + // is created. When calling the + // [alertPolicies.create][google.monitoring.v3.AlertPolicyService.CreateAlertPolicy] + // method, do not include the `name` field in the alerting policy passed as + // part of the request. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // A short name or phrase used to identify the policy in dashboards, + // notifications, and incidents. To avoid confusion, don't use the same + // display name for multiple policies in the same project. The name is + // limited to 512 Unicode characters. + DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // Documentation that is included with notifications and incidents related to + // this policy. Best practice is for the documentation to include information + // to help responders understand, mitigate, escalate, and correct the + // underlying problems detected by the alerting policy. Notification channels + // that have limited capacity might not show this documentation. + Documentation *AlertPolicy_Documentation `protobuf:"bytes,13,opt,name=documentation,proto3" json:"documentation,omitempty"` + // User-supplied key/value data to be used for organizing and + // identifying the `AlertPolicy` objects. + // + // The field can contain up to 64 entries. Each key and value is limited to + // 63 Unicode characters or 128 bytes, whichever is smaller. Labels and + // values can contain only lowercase letters, numerals, underscores, and + // dashes. Keys must begin with a letter. + UserLabels map[string]string `protobuf:"bytes,16,rep,name=user_labels,json=userLabels,proto3" json:"user_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // A list of conditions for the policy. The conditions are combined by AND or + // OR according to the `combiner` field. If the combined conditions evaluate + // to true, then an incident is created. A policy can have from one to six + // conditions. + Conditions []*AlertPolicy_Condition `protobuf:"bytes,12,rep,name=conditions,proto3" json:"conditions,omitempty"` + // How to combine the results of multiple conditions + // to determine if an incident should be opened. + Combiner AlertPolicy_ConditionCombinerType `protobuf:"varint,6,opt,name=combiner,proto3,enum=google.monitoring.v3.AlertPolicy_ConditionCombinerType" json:"combiner,omitempty"` + // Whether or not the policy is enabled. On write, the default interpretation + // if unset is that the policy is enabled. On read, clients should not make + // any assumption about the state if it has not been populated. The + // field should always be populated on List and Get operations, unless + // a field projection has been specified that strips it out. + Enabled *wrappers.BoolValue `protobuf:"bytes,17,opt,name=enabled,proto3" json:"enabled,omitempty"` + // Identifies the notification channels to which notifications should be sent + // when incidents are opened or closed or when new violations occur on + // an already opened incident. Each element of this array corresponds to + // the `name` field in each of the + // [`NotificationChannel`][google.monitoring.v3.NotificationChannel] + // objects that are returned from the [`ListNotificationChannels`] + // [google.monitoring.v3.NotificationChannelService.ListNotificationChannels] + // method. The syntax of the entries in this field is: + // + // projects/[PROJECT_ID]/notificationChannels/[CHANNEL_ID] + NotificationChannels []string `protobuf:"bytes,14,rep,name=notification_channels,json=notificationChannels,proto3" json:"notification_channels,omitempty"` + // A read-only record of the creation of the alerting policy. If provided + // in a call to create or update, this field will be ignored. + CreationRecord *MutationRecord `protobuf:"bytes,10,opt,name=creation_record,json=creationRecord,proto3" json:"creation_record,omitempty"` + // A read-only record of the most recent change to the alerting policy. If + // provided in a call to create or update, this field will be ignored. + MutationRecord *MutationRecord `protobuf:"bytes,11,opt,name=mutation_record,json=mutationRecord,proto3" json:"mutation_record,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AlertPolicy) Reset() { *m = AlertPolicy{} } +func (m *AlertPolicy) String() string { return proto.CompactTextString(m) } +func (*AlertPolicy) ProtoMessage() {} +func (*AlertPolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_014ef0e1a0f00a00, []int{0} +} + +func (m *AlertPolicy) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AlertPolicy.Unmarshal(m, b) +} +func (m *AlertPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AlertPolicy.Marshal(b, m, deterministic) +} +func (m *AlertPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_AlertPolicy.Merge(m, src) +} +func (m *AlertPolicy) XXX_Size() int { + return xxx_messageInfo_AlertPolicy.Size(m) +} +func (m *AlertPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_AlertPolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_AlertPolicy proto.InternalMessageInfo + +func (m *AlertPolicy) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *AlertPolicy) GetDisplayName() string { + if m != nil { + return m.DisplayName + } + return "" +} + +func (m *AlertPolicy) GetDocumentation() *AlertPolicy_Documentation { + if m != nil { + return m.Documentation + } + return nil +} + +func (m *AlertPolicy) GetUserLabels() map[string]string { + if m != nil { + return m.UserLabels + } + return nil +} + +func (m *AlertPolicy) GetConditions() []*AlertPolicy_Condition { + if m != nil { + return m.Conditions + } + return nil +} + +func (m *AlertPolicy) GetCombiner() AlertPolicy_ConditionCombinerType { + if m != nil { + return m.Combiner + } + return AlertPolicy_COMBINE_UNSPECIFIED +} + +func (m *AlertPolicy) GetEnabled() *wrappers.BoolValue { + if m != nil { + return m.Enabled + } + return nil +} + +func (m *AlertPolicy) GetNotificationChannels() []string { + if m != nil { + return m.NotificationChannels + } + return nil +} + +func (m *AlertPolicy) GetCreationRecord() *MutationRecord { + if m != nil { + return m.CreationRecord + } + return nil +} + +func (m *AlertPolicy) GetMutationRecord() *MutationRecord { + if m != nil { + return m.MutationRecord + } + return nil +} + +// A content string and a MIME type that describes the content string's +// format. +type AlertPolicy_Documentation struct { + // The text of the documentation, interpreted according to `mime_type`. + // The content may not exceed 8,192 Unicode characters and may not exceed + // more than 10,240 bytes when encoded in UTF-8 format, whichever is + // smaller. + Content string `protobuf:"bytes,1,opt,name=content,proto3" json:"content,omitempty"` + // The format of the `content` field. Presently, only the value + // `"text/markdown"` is supported. See + // [Markdown](https://en.wikipedia.org/wiki/Markdown) for more information. + MimeType string `protobuf:"bytes,2,opt,name=mime_type,json=mimeType,proto3" json:"mime_type,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AlertPolicy_Documentation) Reset() { *m = AlertPolicy_Documentation{} } +func (m *AlertPolicy_Documentation) String() string { return proto.CompactTextString(m) } +func (*AlertPolicy_Documentation) ProtoMessage() {} +func (*AlertPolicy_Documentation) Descriptor() ([]byte, []int) { + return fileDescriptor_014ef0e1a0f00a00, []int{0, 0} +} + +func (m *AlertPolicy_Documentation) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AlertPolicy_Documentation.Unmarshal(m, b) +} +func (m *AlertPolicy_Documentation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AlertPolicy_Documentation.Marshal(b, m, deterministic) +} +func (m *AlertPolicy_Documentation) XXX_Merge(src proto.Message) { + xxx_messageInfo_AlertPolicy_Documentation.Merge(m, src) +} +func (m *AlertPolicy_Documentation) XXX_Size() int { + return xxx_messageInfo_AlertPolicy_Documentation.Size(m) +} +func (m *AlertPolicy_Documentation) XXX_DiscardUnknown() { + xxx_messageInfo_AlertPolicy_Documentation.DiscardUnknown(m) +} + +var xxx_messageInfo_AlertPolicy_Documentation proto.InternalMessageInfo + +func (m *AlertPolicy_Documentation) GetContent() string { + if m != nil { + return m.Content + } + return "" +} + +func (m *AlertPolicy_Documentation) GetMimeType() string { + if m != nil { + return m.MimeType + } + return "" +} + +// A condition is a true/false test that determines when an alerting policy +// should open an incident. If a condition evaluates to true, it signifies +// that something is wrong. +type AlertPolicy_Condition struct { + // Required if the condition exists. The unique resource name for this + // condition. Its syntax is: + // + // projects/[PROJECT_ID]/alertPolicies/[POLICY_ID]/conditions/[CONDITION_ID] + // + // `[CONDITION_ID]` is assigned by Stackdriver Monitoring when the + // condition is created as part of a new or updated alerting policy. + // + // When calling the + // [alertPolicies.create][google.monitoring.v3.AlertPolicyService.CreateAlertPolicy] + // method, do not include the `name` field in the conditions of the + // requested alerting policy. Stackdriver Monitoring creates the + // condition identifiers and includes them in the new policy. + // + // When calling the + // [alertPolicies.update][google.monitoring.v3.AlertPolicyService.UpdateAlertPolicy] + // method to update a policy, including a condition `name` causes the + // existing condition to be updated. Conditions without names are added to + // the updated policy. Existing conditions are deleted if they are not + // updated. + // + // Best practice is to preserve `[CONDITION_ID]` if you make only small + // changes, such as those to condition thresholds, durations, or trigger + // values. Otherwise, treat the change as a new condition and let the + // existing condition be deleted. + Name string `protobuf:"bytes,12,opt,name=name,proto3" json:"name,omitempty"` + // A short name or phrase used to identify the condition in dashboards, + // notifications, and incidents. To avoid confusion, don't use the same + // display name for multiple conditions in the same policy. + DisplayName string `protobuf:"bytes,6,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // Only one of the following condition types will be specified. + // + // Types that are valid to be assigned to Condition: + // *AlertPolicy_Condition_ConditionThreshold + // *AlertPolicy_Condition_ConditionAbsent + Condition isAlertPolicy_Condition_Condition `protobuf_oneof:"condition"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AlertPolicy_Condition) Reset() { *m = AlertPolicy_Condition{} } +func (m *AlertPolicy_Condition) String() string { return proto.CompactTextString(m) } +func (*AlertPolicy_Condition) ProtoMessage() {} +func (*AlertPolicy_Condition) Descriptor() ([]byte, []int) { + return fileDescriptor_014ef0e1a0f00a00, []int{0, 1} +} + +func (m *AlertPolicy_Condition) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AlertPolicy_Condition.Unmarshal(m, b) +} +func (m *AlertPolicy_Condition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AlertPolicy_Condition.Marshal(b, m, deterministic) +} +func (m *AlertPolicy_Condition) XXX_Merge(src proto.Message) { + xxx_messageInfo_AlertPolicy_Condition.Merge(m, src) +} +func (m *AlertPolicy_Condition) XXX_Size() int { + return xxx_messageInfo_AlertPolicy_Condition.Size(m) +} +func (m *AlertPolicy_Condition) XXX_DiscardUnknown() { + xxx_messageInfo_AlertPolicy_Condition.DiscardUnknown(m) +} + +var xxx_messageInfo_AlertPolicy_Condition proto.InternalMessageInfo + +func (m *AlertPolicy_Condition) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *AlertPolicy_Condition) GetDisplayName() string { + if m != nil { + return m.DisplayName + } + return "" +} + +type isAlertPolicy_Condition_Condition interface { + isAlertPolicy_Condition_Condition() +} + +type AlertPolicy_Condition_ConditionThreshold struct { + ConditionThreshold *AlertPolicy_Condition_MetricThreshold `protobuf:"bytes,1,opt,name=condition_threshold,json=conditionThreshold,proto3,oneof"` +} + +type AlertPolicy_Condition_ConditionAbsent struct { + ConditionAbsent *AlertPolicy_Condition_MetricAbsence `protobuf:"bytes,2,opt,name=condition_absent,json=conditionAbsent,proto3,oneof"` +} + +func (*AlertPolicy_Condition_ConditionThreshold) isAlertPolicy_Condition_Condition() {} + +func (*AlertPolicy_Condition_ConditionAbsent) isAlertPolicy_Condition_Condition() {} + +func (m *AlertPolicy_Condition) GetCondition() isAlertPolicy_Condition_Condition { + if m != nil { + return m.Condition + } + return nil +} + +func (m *AlertPolicy_Condition) GetConditionThreshold() *AlertPolicy_Condition_MetricThreshold { + if x, ok := m.GetCondition().(*AlertPolicy_Condition_ConditionThreshold); ok { + return x.ConditionThreshold + } + return nil +} + +func (m *AlertPolicy_Condition) GetConditionAbsent() *AlertPolicy_Condition_MetricAbsence { + if x, ok := m.GetCondition().(*AlertPolicy_Condition_ConditionAbsent); ok { + return x.ConditionAbsent + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*AlertPolicy_Condition) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _AlertPolicy_Condition_OneofMarshaler, _AlertPolicy_Condition_OneofUnmarshaler, _AlertPolicy_Condition_OneofSizer, []interface{}{ + (*AlertPolicy_Condition_ConditionThreshold)(nil), + (*AlertPolicy_Condition_ConditionAbsent)(nil), + } +} + +func _AlertPolicy_Condition_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*AlertPolicy_Condition) + // condition + switch x := m.Condition.(type) { + case *AlertPolicy_Condition_ConditionThreshold: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ConditionThreshold); err != nil { + return err + } + case *AlertPolicy_Condition_ConditionAbsent: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ConditionAbsent); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("AlertPolicy_Condition.Condition has unexpected type %T", x) + } + return nil +} + +func _AlertPolicy_Condition_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*AlertPolicy_Condition) + switch tag { + case 1: // condition.condition_threshold + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(AlertPolicy_Condition_MetricThreshold) + err := b.DecodeMessage(msg) + m.Condition = &AlertPolicy_Condition_ConditionThreshold{msg} + return true, err + case 2: // condition.condition_absent + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(AlertPolicy_Condition_MetricAbsence) + err := b.DecodeMessage(msg) + m.Condition = &AlertPolicy_Condition_ConditionAbsent{msg} + return true, err + default: + return false, nil + } +} + +func _AlertPolicy_Condition_OneofSizer(msg proto.Message) (n int) { + m := msg.(*AlertPolicy_Condition) + // condition + switch x := m.Condition.(type) { + case *AlertPolicy_Condition_ConditionThreshold: + s := proto.Size(x.ConditionThreshold) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *AlertPolicy_Condition_ConditionAbsent: + s := proto.Size(x.ConditionAbsent) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Specifies how many time series must fail a predicate to trigger a +// condition. If not specified, then a `{count: 1}` trigger is used. +type AlertPolicy_Condition_Trigger struct { + // A type of trigger. + // + // Types that are valid to be assigned to Type: + // *AlertPolicy_Condition_Trigger_Count + // *AlertPolicy_Condition_Trigger_Percent + Type isAlertPolicy_Condition_Trigger_Type `protobuf_oneof:"type"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AlertPolicy_Condition_Trigger) Reset() { *m = AlertPolicy_Condition_Trigger{} } +func (m *AlertPolicy_Condition_Trigger) String() string { return proto.CompactTextString(m) } +func (*AlertPolicy_Condition_Trigger) ProtoMessage() {} +func (*AlertPolicy_Condition_Trigger) Descriptor() ([]byte, []int) { + return fileDescriptor_014ef0e1a0f00a00, []int{0, 1, 0} +} + +func (m *AlertPolicy_Condition_Trigger) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AlertPolicy_Condition_Trigger.Unmarshal(m, b) +} +func (m *AlertPolicy_Condition_Trigger) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AlertPolicy_Condition_Trigger.Marshal(b, m, deterministic) +} +func (m *AlertPolicy_Condition_Trigger) XXX_Merge(src proto.Message) { + xxx_messageInfo_AlertPolicy_Condition_Trigger.Merge(m, src) +} +func (m *AlertPolicy_Condition_Trigger) XXX_Size() int { + return xxx_messageInfo_AlertPolicy_Condition_Trigger.Size(m) +} +func (m *AlertPolicy_Condition_Trigger) XXX_DiscardUnknown() { + xxx_messageInfo_AlertPolicy_Condition_Trigger.DiscardUnknown(m) +} + +var xxx_messageInfo_AlertPolicy_Condition_Trigger proto.InternalMessageInfo + +type isAlertPolicy_Condition_Trigger_Type interface { + isAlertPolicy_Condition_Trigger_Type() +} + +type AlertPolicy_Condition_Trigger_Count struct { + Count int32 `protobuf:"varint,1,opt,name=count,proto3,oneof"` +} + +type AlertPolicy_Condition_Trigger_Percent struct { + Percent float64 `protobuf:"fixed64,2,opt,name=percent,proto3,oneof"` +} + +func (*AlertPolicy_Condition_Trigger_Count) isAlertPolicy_Condition_Trigger_Type() {} + +func (*AlertPolicy_Condition_Trigger_Percent) isAlertPolicy_Condition_Trigger_Type() {} + +func (m *AlertPolicy_Condition_Trigger) GetType() isAlertPolicy_Condition_Trigger_Type { + if m != nil { + return m.Type + } + return nil +} + +func (m *AlertPolicy_Condition_Trigger) GetCount() int32 { + if x, ok := m.GetType().(*AlertPolicy_Condition_Trigger_Count); ok { + return x.Count + } + return 0 +} + +func (m *AlertPolicy_Condition_Trigger) GetPercent() float64 { + if x, ok := m.GetType().(*AlertPolicy_Condition_Trigger_Percent); ok { + return x.Percent + } + return 0 +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*AlertPolicy_Condition_Trigger) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _AlertPolicy_Condition_Trigger_OneofMarshaler, _AlertPolicy_Condition_Trigger_OneofUnmarshaler, _AlertPolicy_Condition_Trigger_OneofSizer, []interface{}{ + (*AlertPolicy_Condition_Trigger_Count)(nil), + (*AlertPolicy_Condition_Trigger_Percent)(nil), + } +} + +func _AlertPolicy_Condition_Trigger_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*AlertPolicy_Condition_Trigger) + // type + switch x := m.Type.(type) { + case *AlertPolicy_Condition_Trigger_Count: + b.EncodeVarint(1<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.Count)) + case *AlertPolicy_Condition_Trigger_Percent: + b.EncodeVarint(2<<3 | proto.WireFixed64) + b.EncodeFixed64(math.Float64bits(x.Percent)) + case nil: + default: + return fmt.Errorf("AlertPolicy_Condition_Trigger.Type has unexpected type %T", x) + } + return nil +} + +func _AlertPolicy_Condition_Trigger_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*AlertPolicy_Condition_Trigger) + switch tag { + case 1: // type.count + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Type = &AlertPolicy_Condition_Trigger_Count{int32(x)} + return true, err + case 2: // type.percent + if wire != proto.WireFixed64 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed64() + m.Type = &AlertPolicy_Condition_Trigger_Percent{math.Float64frombits(x)} + return true, err + default: + return false, nil + } +} + +func _AlertPolicy_Condition_Trigger_OneofSizer(msg proto.Message) (n int) { + m := msg.(*AlertPolicy_Condition_Trigger) + // type + switch x := m.Type.(type) { + case *AlertPolicy_Condition_Trigger_Count: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(x.Count)) + case *AlertPolicy_Condition_Trigger_Percent: + n += 1 // tag and wire + n += 8 + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// A condition type that compares a collection of time series +// against a threshold. +type AlertPolicy_Condition_MetricThreshold struct { + // A [filter](/monitoring/api/v3/filters) that + // identifies which time series should be compared with the threshold. + // + // The filter is similar to the one that is specified in the + // [`MetricService.ListTimeSeries` + // request](/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) (that + // call is useful to verify the time series that will be retrieved / + // processed) and must specify the metric type and optionally may contain + // restrictions on resource type, resource labels, and metric labels. + // This field may not exceed 2048 Unicode characters in length. + Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"` + // Specifies the alignment of data points in individual time series as + // well as how to combine the retrieved time series together (such as + // when aggregating multiple streams on each resource to a single + // stream for each resource or when aggregating streams across all + // members of a group of resrouces). Multiple aggregations + // are applied in the order specified. + // + // This field is similar to the one in the + // [`MetricService.ListTimeSeries` request](/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list). + // It is advisable to use the `ListTimeSeries` method when debugging this field. + Aggregations []*Aggregation `protobuf:"bytes,8,rep,name=aggregations,proto3" json:"aggregations,omitempty"` + // A [filter](/monitoring/api/v3/filters) that identifies a time + // series that should be used as the denominator of a ratio that will be + // compared with the threshold. If a `denominator_filter` is specified, + // the time series specified by the `filter` field will be used as the + // numerator. + // + // The filter is similar to the one that is specified in the + // [`MetricService.ListTimeSeries` + // request](/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) (that + // call is useful to verify the time series that will be retrieved / + // processed) and must specify the metric type and optionally may contain + // restrictions on resource type, resource labels, and metric labels. + // This field may not exceed 2048 Unicode characters in length. + DenominatorFilter string `protobuf:"bytes,9,opt,name=denominator_filter,json=denominatorFilter,proto3" json:"denominator_filter,omitempty"` + // Specifies the alignment of data points in individual time series + // selected by `denominatorFilter` as + // well as how to combine the retrieved time series together (such as + // when aggregating multiple streams on each resource to a single + // stream for each resource or when aggregating streams across all + // members of a group of resources). + // + // When computing ratios, the `aggregations` and + // `denominator_aggregations` fields must use the same alignment period + // and produce time series that have the same periodicity and labels. + // + // This field is similar to the one in the + // [`MetricService.ListTimeSeries` + // request](/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list). It + // is advisable to use the `ListTimeSeries` method when debugging this + // field. + DenominatorAggregations []*Aggregation `protobuf:"bytes,10,rep,name=denominator_aggregations,json=denominatorAggregations,proto3" json:"denominator_aggregations,omitempty"` + // The comparison to apply between the time series (indicated by `filter` + // and `aggregation`) and the threshold (indicated by `threshold_value`). + // The comparison is applied on each time series, with the time series + // on the left-hand side and the threshold on the right-hand side. + // + // Only `COMPARISON_LT` and `COMPARISON_GT` are supported currently. + Comparison ComparisonType `protobuf:"varint,4,opt,name=comparison,proto3,enum=google.monitoring.v3.ComparisonType" json:"comparison,omitempty"` + // A value against which to compare the time series. + ThresholdValue float64 `protobuf:"fixed64,5,opt,name=threshold_value,json=thresholdValue,proto3" json:"threshold_value,omitempty"` + // The amount of time that a time series must violate the + // threshold to be considered failing. Currently, only values + // that are a multiple of a minute--e.g., 0, 60, 120, or 300 + // seconds--are supported. If an invalid value is given, an + // error will be returned. When choosing a duration, it is useful to + // keep in mind the frequency of the underlying time series data + // (which may also be affected by any alignments specified in the + // `aggregations` field); a good duration is long enough so that a single + // outlier does not generate spurious alerts, but short enough that + // unhealthy states are detected and alerted on quickly. + Duration *duration.Duration `protobuf:"bytes,6,opt,name=duration,proto3" json:"duration,omitempty"` + // The number/percent of time series for which the comparison must hold + // in order for the condition to trigger. If unspecified, then the + // condition will trigger if the comparison is true for any of the + // time series that have been identified by `filter` and `aggregations`, + // or by the ratio, if `denominator_filter` and `denominator_aggregations` + // are specified. + Trigger *AlertPolicy_Condition_Trigger `protobuf:"bytes,7,opt,name=trigger,proto3" json:"trigger,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AlertPolicy_Condition_MetricThreshold) Reset() { *m = AlertPolicy_Condition_MetricThreshold{} } +func (m *AlertPolicy_Condition_MetricThreshold) String() string { return proto.CompactTextString(m) } +func (*AlertPolicy_Condition_MetricThreshold) ProtoMessage() {} +func (*AlertPolicy_Condition_MetricThreshold) Descriptor() ([]byte, []int) { + return fileDescriptor_014ef0e1a0f00a00, []int{0, 1, 1} +} + +func (m *AlertPolicy_Condition_MetricThreshold) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AlertPolicy_Condition_MetricThreshold.Unmarshal(m, b) +} +func (m *AlertPolicy_Condition_MetricThreshold) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AlertPolicy_Condition_MetricThreshold.Marshal(b, m, deterministic) +} +func (m *AlertPolicy_Condition_MetricThreshold) XXX_Merge(src proto.Message) { + xxx_messageInfo_AlertPolicy_Condition_MetricThreshold.Merge(m, src) +} +func (m *AlertPolicy_Condition_MetricThreshold) XXX_Size() int { + return xxx_messageInfo_AlertPolicy_Condition_MetricThreshold.Size(m) +} +func (m *AlertPolicy_Condition_MetricThreshold) XXX_DiscardUnknown() { + xxx_messageInfo_AlertPolicy_Condition_MetricThreshold.DiscardUnknown(m) +} + +var xxx_messageInfo_AlertPolicy_Condition_MetricThreshold proto.InternalMessageInfo + +func (m *AlertPolicy_Condition_MetricThreshold) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +func (m *AlertPolicy_Condition_MetricThreshold) GetAggregations() []*Aggregation { + if m != nil { + return m.Aggregations + } + return nil +} + +func (m *AlertPolicy_Condition_MetricThreshold) GetDenominatorFilter() string { + if m != nil { + return m.DenominatorFilter + } + return "" +} + +func (m *AlertPolicy_Condition_MetricThreshold) GetDenominatorAggregations() []*Aggregation { + if m != nil { + return m.DenominatorAggregations + } + return nil +} + +func (m *AlertPolicy_Condition_MetricThreshold) GetComparison() ComparisonType { + if m != nil { + return m.Comparison + } + return ComparisonType_COMPARISON_UNSPECIFIED +} + +func (m *AlertPolicy_Condition_MetricThreshold) GetThresholdValue() float64 { + if m != nil { + return m.ThresholdValue + } + return 0 +} + +func (m *AlertPolicy_Condition_MetricThreshold) GetDuration() *duration.Duration { + if m != nil { + return m.Duration + } + return nil +} + +func (m *AlertPolicy_Condition_MetricThreshold) GetTrigger() *AlertPolicy_Condition_Trigger { + if m != nil { + return m.Trigger + } + return nil +} + +// A condition type that checks that monitored resources +// are reporting data. The configuration defines a metric and +// a set of monitored resources. The predicate is considered in violation +// when a time series for the specified metric of a monitored +// resource does not include any data in the specified `duration`. +type AlertPolicy_Condition_MetricAbsence struct { + // A [filter](/monitoring/api/v3/filters) that + // identifies which time series should be compared with the threshold. + // + // The filter is similar to the one that is specified in the + // [`MetricService.ListTimeSeries` + // request](/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) (that + // call is useful to verify the time series that will be retrieved / + // processed) and must specify the metric type and optionally may contain + // restrictions on resource type, resource labels, and metric labels. + // This field may not exceed 2048 Unicode characters in length. + Filter string `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"` + // Specifies the alignment of data points in individual time series as + // well as how to combine the retrieved time series together (such as + // when aggregating multiple streams on each resource to a single + // stream for each resource or when aggregating streams across all + // members of a group of resrouces). Multiple aggregations + // are applied in the order specified. + // + // This field is similar to the + // one in the [`MetricService.ListTimeSeries` request](/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list). + // It is advisable to use the `ListTimeSeries` method when debugging this field. + Aggregations []*Aggregation `protobuf:"bytes,5,rep,name=aggregations,proto3" json:"aggregations,omitempty"` + // The amount of time that a time series must fail to report new + // data to be considered failing. Currently, only values that + // are a multiple of a minute--e.g. 60, 120, or 300 + // seconds--are supported. If an invalid value is given, an + // error will be returned. The `Duration.nanos` field is + // ignored. + Duration *duration.Duration `protobuf:"bytes,2,opt,name=duration,proto3" json:"duration,omitempty"` + // The number/percent of time series for which the comparison must hold + // in order for the condition to trigger. If unspecified, then the + // condition will trigger if the comparison is true for any of the + // time series that have been identified by `filter` and `aggregations`. + Trigger *AlertPolicy_Condition_Trigger `protobuf:"bytes,3,opt,name=trigger,proto3" json:"trigger,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AlertPolicy_Condition_MetricAbsence) Reset() { *m = AlertPolicy_Condition_MetricAbsence{} } +func (m *AlertPolicy_Condition_MetricAbsence) String() string { return proto.CompactTextString(m) } +func (*AlertPolicy_Condition_MetricAbsence) ProtoMessage() {} +func (*AlertPolicy_Condition_MetricAbsence) Descriptor() ([]byte, []int) { + return fileDescriptor_014ef0e1a0f00a00, []int{0, 1, 2} +} + +func (m *AlertPolicy_Condition_MetricAbsence) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AlertPolicy_Condition_MetricAbsence.Unmarshal(m, b) +} +func (m *AlertPolicy_Condition_MetricAbsence) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AlertPolicy_Condition_MetricAbsence.Marshal(b, m, deterministic) +} +func (m *AlertPolicy_Condition_MetricAbsence) XXX_Merge(src proto.Message) { + xxx_messageInfo_AlertPolicy_Condition_MetricAbsence.Merge(m, src) +} +func (m *AlertPolicy_Condition_MetricAbsence) XXX_Size() int { + return xxx_messageInfo_AlertPolicy_Condition_MetricAbsence.Size(m) +} +func (m *AlertPolicy_Condition_MetricAbsence) XXX_DiscardUnknown() { + xxx_messageInfo_AlertPolicy_Condition_MetricAbsence.DiscardUnknown(m) +} + +var xxx_messageInfo_AlertPolicy_Condition_MetricAbsence proto.InternalMessageInfo + +func (m *AlertPolicy_Condition_MetricAbsence) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +func (m *AlertPolicy_Condition_MetricAbsence) GetAggregations() []*Aggregation { + if m != nil { + return m.Aggregations + } + return nil +} + +func (m *AlertPolicy_Condition_MetricAbsence) GetDuration() *duration.Duration { + if m != nil { + return m.Duration + } + return nil +} + +func (m *AlertPolicy_Condition_MetricAbsence) GetTrigger() *AlertPolicy_Condition_Trigger { + if m != nil { + return m.Trigger + } + return nil +} + +func init() { + proto.RegisterEnum("google.monitoring.v3.AlertPolicy_ConditionCombinerType", AlertPolicy_ConditionCombinerType_name, AlertPolicy_ConditionCombinerType_value) + proto.RegisterType((*AlertPolicy)(nil), "google.monitoring.v3.AlertPolicy") + proto.RegisterMapType((map[string]string)(nil), "google.monitoring.v3.AlertPolicy.UserLabelsEntry") + proto.RegisterType((*AlertPolicy_Documentation)(nil), "google.monitoring.v3.AlertPolicy.Documentation") + proto.RegisterType((*AlertPolicy_Condition)(nil), "google.monitoring.v3.AlertPolicy.Condition") + proto.RegisterType((*AlertPolicy_Condition_Trigger)(nil), "google.monitoring.v3.AlertPolicy.Condition.Trigger") + proto.RegisterType((*AlertPolicy_Condition_MetricThreshold)(nil), "google.monitoring.v3.AlertPolicy.Condition.MetricThreshold") + proto.RegisterType((*AlertPolicy_Condition_MetricAbsence)(nil), "google.monitoring.v3.AlertPolicy.Condition.MetricAbsence") +} + +func init() { proto.RegisterFile("google/monitoring/v3/alert.proto", fileDescriptor_014ef0e1a0f00a00) } + +var fileDescriptor_014ef0e1a0f00a00 = []byte{ + // 941 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xeb, 0x6e, 0xe3, 0x44, + 0x14, 0xae, 0x93, 0xe6, 0x76, 0xd2, 0x36, 0xd9, 0xd9, 0xee, 0xae, 0x31, 0x68, 0x95, 0xae, 0x90, + 0x88, 0x40, 0x38, 0x22, 0x01, 0x71, 0x59, 0x81, 0x94, 0x5b, 0x37, 0x11, 0x24, 0xad, 0xa6, 0x69, + 0x91, 0x50, 0x25, 0xcb, 0x71, 0xa6, 0xae, 0x85, 0x3d, 0x63, 0x4d, 0xec, 0xa2, 0xbc, 0x0e, 0x3f, + 0x79, 0x14, 0x1e, 0x81, 0x7f, 0xbc, 0x02, 0xe2, 0x01, 0x90, 0xc7, 0x63, 0xc7, 0xe9, 0xa6, 0xbb, + 0x64, 0xf7, 0x5f, 0xce, 0x9c, 0xef, 0x7c, 0xe7, 0xf6, 0xcd, 0x38, 0xd0, 0xb0, 0x19, 0xb3, 0x5d, + 0xd2, 0xf2, 0x18, 0x75, 0x02, 0xc6, 0x1d, 0x6a, 0xb7, 0xee, 0x3a, 0x2d, 0xd3, 0x25, 0x3c, 0xd0, + 0x7d, 0xce, 0x02, 0x86, 0x8e, 0x63, 0x84, 0xbe, 0x46, 0xe8, 0x77, 0x1d, 0xed, 0x23, 0x19, 0x67, + 0xfa, 0x4e, 0xcb, 0xa4, 0x94, 0x05, 0x66, 0xe0, 0x30, 0xba, 0x8c, 0x63, 0xb4, 0x93, 0xad, 0xac, + 0x16, 0xf3, 0x3c, 0x46, 0x25, 0xe4, 0xd3, 0xad, 0x10, 0x2f, 0x8c, 0x89, 0x0c, 0x4e, 0x2c, 0xc6, + 0x17, 0x12, 0xfb, 0x5c, 0x62, 0x85, 0x35, 0x0f, 0x6f, 0x5a, 0x8b, 0x90, 0x0b, 0xd8, 0x43, 0xfe, + 0xdf, 0xb8, 0xe9, 0xfb, 0x84, 0xcb, 0x72, 0x5e, 0xfc, 0x5d, 0x83, 0x6a, 0x37, 0x6a, 0xe9, 0x9c, + 0xb9, 0x8e, 0xb5, 0x42, 0x08, 0xf6, 0xa9, 0xe9, 0x11, 0x55, 0x69, 0x28, 0xcd, 0x0a, 0x16, 0xbf, + 0xd1, 0x09, 0x1c, 0x2c, 0x9c, 0xa5, 0xef, 0x9a, 0x2b, 0x43, 0xf8, 0x72, 0xc2, 0x57, 0x95, 0x67, + 0xd3, 0x08, 0x72, 0x09, 0x87, 0x0b, 0x66, 0x85, 0x1e, 0xa1, 0x71, 0x91, 0xea, 0x61, 0x43, 0x69, + 0x56, 0xdb, 0x2d, 0x7d, 0xdb, 0x84, 0xf4, 0x4c, 0x42, 0x7d, 0x90, 0x0d, 0xc3, 0x9b, 0x2c, 0x08, + 0x43, 0x35, 0x5c, 0x12, 0x6e, 0xb8, 0xe6, 0x9c, 0xb8, 0x4b, 0xb5, 0xde, 0xc8, 0x37, 0xab, 0xed, + 0x2f, 0xde, 0x4e, 0x7a, 0xb9, 0x24, 0xfc, 0x27, 0x11, 0x33, 0xa4, 0x01, 0x5f, 0x61, 0x08, 0xd3, + 0x03, 0xf4, 0x23, 0x80, 0xc5, 0xe8, 0xc2, 0x11, 0x4b, 0x51, 0x0f, 0x04, 0xe5, 0x67, 0x6f, 0xa7, + 0xec, 0x27, 0x31, 0x38, 0x13, 0x8e, 0x2e, 0xa0, 0x6c, 0x31, 0x6f, 0xee, 0x50, 0xc2, 0xd5, 0x62, + 0x43, 0x69, 0x1e, 0xb5, 0xbf, 0xde, 0x81, 0xaa, 0x2f, 0x43, 0x67, 0x2b, 0x9f, 0xe0, 0x94, 0x08, + 0x7d, 0x09, 0x25, 0x42, 0xcd, 0xb9, 0x4b, 0x16, 0xea, 0x23, 0x31, 0x46, 0x2d, 0xe1, 0x4c, 0xb6, + 0xa8, 0xf7, 0x18, 0x73, 0xaf, 0x4c, 0x37, 0x24, 0x38, 0x81, 0xa2, 0x0e, 0x3c, 0xa1, 0x2c, 0x70, + 0x6e, 0x1c, 0x2b, 0x96, 0x89, 0x75, 0x6b, 0x52, 0x1a, 0x4d, 0xed, 0xa8, 0x91, 0x6f, 0x56, 0xf0, + 0x71, 0xd6, 0xd9, 0x97, 0x3e, 0x34, 0x81, 0x9a, 0xc5, 0x49, 0x56, 0x57, 0x2a, 0x88, 0x94, 0x1f, + 0x6f, 0x6f, 0x63, 0x22, 0x45, 0x88, 0x05, 0x16, 0x1f, 0x25, 0xc1, 0xb1, 0x1d, 0xd1, 0xdd, 0x93, + 0xa9, 0x5a, 0xdd, 0x85, 0xce, 0xdb, 0xb0, 0xb5, 0x53, 0x38, 0xdc, 0x90, 0x07, 0x52, 0xa1, 0x64, + 0x31, 0x1a, 0x10, 0x1a, 0x48, 0x81, 0x26, 0x26, 0xfa, 0x10, 0x2a, 0x9e, 0xe3, 0x11, 0x23, 0x58, + 0xf9, 0x89, 0x40, 0xcb, 0xd1, 0x41, 0x34, 0x5a, 0xed, 0xaf, 0x32, 0x54, 0xd2, 0xa1, 0xa7, 0x12, + 0x3f, 0x78, 0x83, 0xc4, 0x8b, 0xaf, 0x4b, 0x9c, 0xc2, 0xe3, 0x74, 0xf1, 0x46, 0x70, 0xcb, 0xc9, + 0xf2, 0x96, 0xb9, 0x0b, 0x51, 0x47, 0xb5, 0xfd, 0x72, 0x87, 0xad, 0xeb, 0x13, 0x12, 0x70, 0xc7, + 0x9a, 0x25, 0x14, 0xa3, 0x3d, 0x8c, 0x52, 0xe6, 0xf4, 0x14, 0xdd, 0x40, 0x7d, 0x9d, 0xcf, 0x9c, + 0x2f, 0xa3, 0xa6, 0x73, 0x22, 0xd9, 0xb7, 0xbb, 0x27, 0xeb, 0x46, 0xf1, 0x16, 0x19, 0xed, 0xe1, + 0x5a, 0x4a, 0x2a, 0xce, 0x02, 0x6d, 0x08, 0xa5, 0x19, 0x77, 0x6c, 0x9b, 0x70, 0xf4, 0x14, 0x0a, + 0x16, 0x0b, 0xe5, 0x70, 0x0b, 0xa3, 0x3d, 0x1c, 0x9b, 0x48, 0x83, 0x92, 0x4f, 0xb8, 0x95, 0x54, + 0xa0, 0x8c, 0xf6, 0x70, 0x72, 0xd0, 0x2b, 0xc2, 0x7e, 0x34, 0x73, 0xed, 0x9f, 0x3c, 0xd4, 0xee, + 0x35, 0x86, 0x9e, 0x42, 0xf1, 0xc6, 0x71, 0x03, 0xc2, 0xe5, 0x46, 0xa4, 0x85, 0x86, 0x70, 0x60, + 0xda, 0x36, 0x27, 0x76, 0xfc, 0x32, 0xaa, 0x65, 0x71, 0x09, 0x4f, 0x1e, 0x68, 0x6b, 0x8d, 0xc4, + 0x1b, 0x61, 0xe8, 0x73, 0x40, 0x0b, 0x42, 0x99, 0xe7, 0x50, 0x33, 0x60, 0xdc, 0x90, 0xa9, 0x2a, + 0x22, 0xd5, 0xa3, 0x8c, 0xe7, 0x34, 0xce, 0x7a, 0x0d, 0x6a, 0x16, 0xbe, 0x51, 0x01, 0xfc, 0xdf, + 0x0a, 0x9e, 0x65, 0x28, 0xba, 0xd9, 0x62, 0x06, 0xd1, 0xb3, 0xe2, 0xf9, 0x26, 0x77, 0x96, 0x8c, + 0xaa, 0xfb, 0xe2, 0x2d, 0x78, 0x40, 0xf5, 0xfd, 0x14, 0x27, 0x2e, 0x7e, 0x26, 0x0e, 0x7d, 0x02, + 0xb5, 0x54, 0x5a, 0xc6, 0x5d, 0x74, 0xc1, 0xd5, 0x42, 0x34, 0x71, 0x7c, 0x94, 0x1e, 0x8b, 0x6b, + 0x8f, 0xbe, 0x82, 0x72, 0xf2, 0xd2, 0x0b, 0xb1, 0x56, 0xdb, 0x1f, 0xbc, 0xf6, 0x48, 0x0c, 0x24, + 0x00, 0xa7, 0x50, 0x34, 0x81, 0x52, 0x10, 0x2f, 0x5b, 0x2d, 0x89, 0xa8, 0xce, 0x2e, 0x5a, 0x92, + 0x3a, 0xc1, 0x09, 0x87, 0xf6, 0xaf, 0x02, 0x87, 0x1b, 0x02, 0xcb, 0xac, 0x5c, 0x79, 0xe3, 0xca, + 0x0b, 0xef, 0xb6, 0xf2, 0x6c, 0xdb, 0xb9, 0x77, 0x6a, 0x3b, 0xff, 0xfe, 0x6d, 0xf7, 0xaa, 0x50, + 0x49, 0x6f, 0x91, 0xf6, 0x3d, 0xd4, 0xee, 0x7d, 0x6e, 0x50, 0x1d, 0xf2, 0xbf, 0x92, 0x95, 0x9c, + 0x40, 0xf4, 0x13, 0x1d, 0x43, 0x21, 0xde, 0x66, 0x7c, 0x11, 0x62, 0xe3, 0xbb, 0xdc, 0x37, 0xca, + 0x0b, 0x13, 0x9e, 0x6c, 0xfd, 0x1e, 0xa0, 0x67, 0xf0, 0xb8, 0x7f, 0x36, 0xe9, 0x8d, 0xa7, 0x43, + 0xe3, 0x72, 0x7a, 0x71, 0x3e, 0xec, 0x8f, 0x4f, 0xc7, 0xc3, 0x41, 0x7d, 0x0f, 0x95, 0x20, 0xdf, + 0x9d, 0x0e, 0xea, 0x0a, 0x2a, 0x42, 0xee, 0x0c, 0xd7, 0x73, 0xe8, 0x39, 0x68, 0xdd, 0xe9, 0xc0, + 0xf8, 0x79, 0x3c, 0x1b, 0x19, 0x93, 0xee, 0xac, 0x3f, 0x1a, 0x4f, 0x5f, 0x19, 0x78, 0x78, 0x71, + 0x76, 0x89, 0xfb, 0xc3, 0x7a, 0xbe, 0xf7, 0xbb, 0x02, 0xaa, 0xc5, 0xbc, 0xad, 0x2d, 0xf7, 0x20, + 0xee, 0x39, 0x1a, 0xde, 0xb9, 0xf2, 0xcb, 0x0f, 0x12, 0x63, 0x33, 0xd7, 0xa4, 0xb6, 0xce, 0xb8, + 0xdd, 0xb2, 0x09, 0x15, 0xa3, 0x6d, 0xc5, 0x2e, 0xd3, 0x77, 0x96, 0x9b, 0xff, 0x4c, 0x5e, 0xae, + 0xad, 0x3f, 0x72, 0xda, 0xab, 0x98, 0xa0, 0xef, 0xb2, 0x70, 0xa1, 0x4f, 0xd6, 0xa9, 0xae, 0x3a, + 0x7f, 0x26, 0xce, 0x6b, 0xe1, 0xbc, 0x5e, 0x3b, 0xaf, 0xaf, 0x3a, 0xf3, 0xa2, 0x48, 0xd2, 0xf9, + 0x2f, 0x00, 0x00, 0xff, 0xff, 0x66, 0xb5, 0x16, 0x64, 0x76, 0x09, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/alert_service.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/alert_service.pb.go new file mode 100644 index 000000000..eea9624c0 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/alert_service.pb.go @@ -0,0 +1,672 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/monitoring/v3/alert_service.proto + +package monitoring + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + empty "github.com/golang/protobuf/ptypes/empty" + context "golang.org/x/net/context" + _ "google.golang.org/genproto/googleapis/api/annotations" + field_mask "google.golang.org/genproto/protobuf/field_mask" + grpc "google.golang.org/grpc" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// The protocol for the `CreateAlertPolicy` request. +type CreateAlertPolicyRequest struct { + // The project in which to create the alerting policy. The format is + // `projects/[PROJECT_ID]`. + // + // Note that this field names the parent container in which the alerting + // policy will be written, not the name of the created policy. The alerting + // policy that is returned will have a name that contains a normalized + // representation of this name as a prefix but adds a suffix of the form + // `/alertPolicies/[POLICY_ID]`, identifying the policy in the container. + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + // The requested alerting policy. You should omit the `name` field in this + // policy. The name will be returned in the new policy, including + // a new [ALERT_POLICY_ID] value. + AlertPolicy *AlertPolicy `protobuf:"bytes,2,opt,name=alert_policy,json=alertPolicy,proto3" json:"alert_policy,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateAlertPolicyRequest) Reset() { *m = CreateAlertPolicyRequest{} } +func (m *CreateAlertPolicyRequest) String() string { return proto.CompactTextString(m) } +func (*CreateAlertPolicyRequest) ProtoMessage() {} +func (*CreateAlertPolicyRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_c45362b2a456d1bf, []int{0} +} + +func (m *CreateAlertPolicyRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateAlertPolicyRequest.Unmarshal(m, b) +} +func (m *CreateAlertPolicyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateAlertPolicyRequest.Marshal(b, m, deterministic) +} +func (m *CreateAlertPolicyRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateAlertPolicyRequest.Merge(m, src) +} +func (m *CreateAlertPolicyRequest) XXX_Size() int { + return xxx_messageInfo_CreateAlertPolicyRequest.Size(m) +} +func (m *CreateAlertPolicyRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateAlertPolicyRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateAlertPolicyRequest proto.InternalMessageInfo + +func (m *CreateAlertPolicyRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *CreateAlertPolicyRequest) GetAlertPolicy() *AlertPolicy { + if m != nil { + return m.AlertPolicy + } + return nil +} + +// The protocol for the `GetAlertPolicy` request. +type GetAlertPolicyRequest struct { + // The alerting policy to retrieve. The format is + // + // projects/[PROJECT_ID]/alertPolicies/[ALERT_POLICY_ID] + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetAlertPolicyRequest) Reset() { *m = GetAlertPolicyRequest{} } +func (m *GetAlertPolicyRequest) String() string { return proto.CompactTextString(m) } +func (*GetAlertPolicyRequest) ProtoMessage() {} +func (*GetAlertPolicyRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_c45362b2a456d1bf, []int{1} +} + +func (m *GetAlertPolicyRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetAlertPolicyRequest.Unmarshal(m, b) +} +func (m *GetAlertPolicyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetAlertPolicyRequest.Marshal(b, m, deterministic) +} +func (m *GetAlertPolicyRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetAlertPolicyRequest.Merge(m, src) +} +func (m *GetAlertPolicyRequest) XXX_Size() int { + return xxx_messageInfo_GetAlertPolicyRequest.Size(m) +} +func (m *GetAlertPolicyRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetAlertPolicyRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetAlertPolicyRequest proto.InternalMessageInfo + +func (m *GetAlertPolicyRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// The protocol for the `ListAlertPolicies` request. +type ListAlertPoliciesRequest struct { + // The project whose alert policies are to be listed. The format is + // + // projects/[PROJECT_ID] + // + // Note that this field names the parent container in which the alerting + // policies to be listed are stored. To retrieve a single alerting policy + // by name, use the + // [GetAlertPolicy][google.monitoring.v3.AlertPolicyService.GetAlertPolicy] + // operation, instead. + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + // If provided, this field specifies the criteria that must be met by + // alert policies to be included in the response. + // + // For more details, see [sorting and + // filtering](/monitoring/api/v3/sorting-and-filtering). + Filter string `protobuf:"bytes,5,opt,name=filter,proto3" json:"filter,omitempty"` + // A comma-separated list of fields by which to sort the result. Supports + // the same set of field references as the `filter` field. Entries can be + // prefixed with a minus sign to sort by the field in descending order. + // + // For more details, see [sorting and + // filtering](/monitoring/api/v3/sorting-and-filtering). + OrderBy string `protobuf:"bytes,6,opt,name=order_by,json=orderBy,proto3" json:"order_by,omitempty"` + // The maximum number of results to return in a single response. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // If this field is not empty then it must contain the `nextPageToken` value + // returned by a previous call to this method. Using this field causes the + // method to return more results from the previous method call. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListAlertPoliciesRequest) Reset() { *m = ListAlertPoliciesRequest{} } +func (m *ListAlertPoliciesRequest) String() string { return proto.CompactTextString(m) } +func (*ListAlertPoliciesRequest) ProtoMessage() {} +func (*ListAlertPoliciesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_c45362b2a456d1bf, []int{2} +} + +func (m *ListAlertPoliciesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListAlertPoliciesRequest.Unmarshal(m, b) +} +func (m *ListAlertPoliciesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListAlertPoliciesRequest.Marshal(b, m, deterministic) +} +func (m *ListAlertPoliciesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListAlertPoliciesRequest.Merge(m, src) +} +func (m *ListAlertPoliciesRequest) XXX_Size() int { + return xxx_messageInfo_ListAlertPoliciesRequest.Size(m) +} +func (m *ListAlertPoliciesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListAlertPoliciesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListAlertPoliciesRequest proto.InternalMessageInfo + +func (m *ListAlertPoliciesRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ListAlertPoliciesRequest) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +func (m *ListAlertPoliciesRequest) GetOrderBy() string { + if m != nil { + return m.OrderBy + } + return "" +} + +func (m *ListAlertPoliciesRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListAlertPoliciesRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// The protocol for the `ListAlertPolicies` response. +type ListAlertPoliciesResponse struct { + // The returned alert policies. + AlertPolicies []*AlertPolicy `protobuf:"bytes,3,rep,name=alert_policies,json=alertPolicies,proto3" json:"alert_policies,omitempty"` + // If there might be more results than were returned, then this field is set + // to a non-empty value. To see the additional results, + // use that value as `pageToken` in the next call to this method. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListAlertPoliciesResponse) Reset() { *m = ListAlertPoliciesResponse{} } +func (m *ListAlertPoliciesResponse) String() string { return proto.CompactTextString(m) } +func (*ListAlertPoliciesResponse) ProtoMessage() {} +func (*ListAlertPoliciesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_c45362b2a456d1bf, []int{3} +} + +func (m *ListAlertPoliciesResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListAlertPoliciesResponse.Unmarshal(m, b) +} +func (m *ListAlertPoliciesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListAlertPoliciesResponse.Marshal(b, m, deterministic) +} +func (m *ListAlertPoliciesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListAlertPoliciesResponse.Merge(m, src) +} +func (m *ListAlertPoliciesResponse) XXX_Size() int { + return xxx_messageInfo_ListAlertPoliciesResponse.Size(m) +} +func (m *ListAlertPoliciesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListAlertPoliciesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListAlertPoliciesResponse proto.InternalMessageInfo + +func (m *ListAlertPoliciesResponse) GetAlertPolicies() []*AlertPolicy { + if m != nil { + return m.AlertPolicies + } + return nil +} + +func (m *ListAlertPoliciesResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// The protocol for the `UpdateAlertPolicy` request. +type UpdateAlertPolicyRequest struct { + // Optional. A list of alerting policy field names. If this field is not + // empty, each listed field in the existing alerting policy is set to the + // value of the corresponding field in the supplied policy (`alert_policy`), + // or to the field's default value if the field is not in the supplied + // alerting policy. Fields not listed retain their previous value. + // + // Examples of valid field masks include `display_name`, `documentation`, + // `documentation.content`, `documentation.mime_type`, `user_labels`, + // `user_label.nameofkey`, `enabled`, `conditions`, `combiner`, etc. + // + // If this field is empty, then the supplied alerting policy replaces the + // existing policy. It is the same as deleting the existing policy and + // adding the supplied policy, except for the following: + // + // + The new policy will have the same `[ALERT_POLICY_ID]` as the former + // policy. This gives you continuity with the former policy in your + // notifications and incidents. + // + Conditions in the new policy will keep their former `[CONDITION_ID]` if + // the supplied condition includes the `name` field with that + // `[CONDITION_ID]`. If the supplied condition omits the `name` field, + // then a new `[CONDITION_ID]` is created. + UpdateMask *field_mask.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` + // Required. The updated alerting policy or the updated values for the + // fields listed in `update_mask`. + // If `update_mask` is not empty, any fields in this policy that are + // not in `update_mask` are ignored. + AlertPolicy *AlertPolicy `protobuf:"bytes,3,opt,name=alert_policy,json=alertPolicy,proto3" json:"alert_policy,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateAlertPolicyRequest) Reset() { *m = UpdateAlertPolicyRequest{} } +func (m *UpdateAlertPolicyRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateAlertPolicyRequest) ProtoMessage() {} +func (*UpdateAlertPolicyRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_c45362b2a456d1bf, []int{4} +} + +func (m *UpdateAlertPolicyRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateAlertPolicyRequest.Unmarshal(m, b) +} +func (m *UpdateAlertPolicyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateAlertPolicyRequest.Marshal(b, m, deterministic) +} +func (m *UpdateAlertPolicyRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateAlertPolicyRequest.Merge(m, src) +} +func (m *UpdateAlertPolicyRequest) XXX_Size() int { + return xxx_messageInfo_UpdateAlertPolicyRequest.Size(m) +} +func (m *UpdateAlertPolicyRequest) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateAlertPolicyRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateAlertPolicyRequest proto.InternalMessageInfo + +func (m *UpdateAlertPolicyRequest) GetUpdateMask() *field_mask.FieldMask { + if m != nil { + return m.UpdateMask + } + return nil +} + +func (m *UpdateAlertPolicyRequest) GetAlertPolicy() *AlertPolicy { + if m != nil { + return m.AlertPolicy + } + return nil +} + +// The protocol for the `DeleteAlertPolicy` request. +type DeleteAlertPolicyRequest struct { + // The alerting policy to delete. The format is: + // + // projects/[PROJECT_ID]/alertPolicies/[ALERT_POLICY_ID] + // + // For more information, see [AlertPolicy][google.monitoring.v3.AlertPolicy]. + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteAlertPolicyRequest) Reset() { *m = DeleteAlertPolicyRequest{} } +func (m *DeleteAlertPolicyRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteAlertPolicyRequest) ProtoMessage() {} +func (*DeleteAlertPolicyRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_c45362b2a456d1bf, []int{5} +} + +func (m *DeleteAlertPolicyRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteAlertPolicyRequest.Unmarshal(m, b) +} +func (m *DeleteAlertPolicyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteAlertPolicyRequest.Marshal(b, m, deterministic) +} +func (m *DeleteAlertPolicyRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteAlertPolicyRequest.Merge(m, src) +} +func (m *DeleteAlertPolicyRequest) XXX_Size() int { + return xxx_messageInfo_DeleteAlertPolicyRequest.Size(m) +} +func (m *DeleteAlertPolicyRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteAlertPolicyRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteAlertPolicyRequest proto.InternalMessageInfo + +func (m *DeleteAlertPolicyRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func init() { + proto.RegisterType((*CreateAlertPolicyRequest)(nil), "google.monitoring.v3.CreateAlertPolicyRequest") + proto.RegisterType((*GetAlertPolicyRequest)(nil), "google.monitoring.v3.GetAlertPolicyRequest") + proto.RegisterType((*ListAlertPoliciesRequest)(nil), "google.monitoring.v3.ListAlertPoliciesRequest") + proto.RegisterType((*ListAlertPoliciesResponse)(nil), "google.monitoring.v3.ListAlertPoliciesResponse") + proto.RegisterType((*UpdateAlertPolicyRequest)(nil), "google.monitoring.v3.UpdateAlertPolicyRequest") + proto.RegisterType((*DeleteAlertPolicyRequest)(nil), "google.monitoring.v3.DeleteAlertPolicyRequest") +} + +func init() { + proto.RegisterFile("google/monitoring/v3/alert_service.proto", fileDescriptor_c45362b2a456d1bf) +} + +var fileDescriptor_c45362b2a456d1bf = []byte{ + // 656 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x55, 0x41, 0x6f, 0xd3, 0x4c, + 0x10, 0x95, 0x93, 0x36, 0x5f, 0xbb, 0xfd, 0x5a, 0x94, 0x15, 0x54, 0xae, 0x0b, 0x52, 0x30, 0x2a, + 0x54, 0xad, 0xb0, 0xa5, 0xf8, 0x04, 0x15, 0x48, 0xa4, 0x85, 0xf6, 0x40, 0xa5, 0x28, 0x85, 0x1e, + 0x50, 0xa4, 0x68, 0x93, 0x4c, 0xac, 0x25, 0x8e, 0xd7, 0x78, 0x37, 0x11, 0x29, 0xea, 0x85, 0x23, + 0x12, 0xe2, 0xc0, 0x99, 0x03, 0x47, 0x38, 0x20, 0x7e, 0x07, 0x57, 0xfe, 0x02, 0x3f, 0x04, 0x79, + 0xed, 0x34, 0x76, 0x6d, 0xab, 0x16, 0xb7, 0xcc, 0xce, 0xdb, 0x99, 0xb7, 0x6f, 0xde, 0x38, 0x68, + 0xdb, 0x66, 0xcc, 0x76, 0xc0, 0x1c, 0x31, 0x97, 0x0a, 0xe6, 0x53, 0xd7, 0x36, 0x27, 0x96, 0x49, + 0x1c, 0xf0, 0x45, 0x87, 0x83, 0x3f, 0xa1, 0x3d, 0x30, 0x3c, 0x9f, 0x09, 0x86, 0xaf, 0x87, 0x48, + 0x63, 0x8e, 0x34, 0x26, 0x96, 0x76, 0x33, 0xba, 0x4f, 0x3c, 0x6a, 0x12, 0xd7, 0x65, 0x82, 0x08, + 0xca, 0x5c, 0x1e, 0xde, 0xd1, 0x6a, 0xf9, 0xd5, 0x23, 0xc4, 0x66, 0x84, 0x90, 0x51, 0x77, 0x3c, + 0x30, 0x61, 0xe4, 0x89, 0xe9, 0xa5, 0xeb, 0x17, 0xc9, 0x01, 0x05, 0xa7, 0xdf, 0x19, 0x11, 0x3e, + 0x0c, 0x11, 0xba, 0x40, 0xea, 0xbe, 0x0f, 0x44, 0xc0, 0x93, 0xa0, 0x66, 0x93, 0x39, 0xb4, 0x37, + 0x6d, 0xc1, 0x9b, 0x31, 0x70, 0x81, 0x31, 0x5a, 0x70, 0xc9, 0x08, 0xd4, 0x72, 0x4d, 0xd9, 0x5e, + 0x6e, 0xc9, 0xdf, 0xf8, 0x00, 0xfd, 0x1f, 0xbe, 0xcd, 0x93, 0x50, 0xb5, 0x54, 0x53, 0xb6, 0x57, + 0xea, 0xb7, 0x8d, 0xac, 0xb7, 0x19, 0xf1, 0x9a, 0x2b, 0x64, 0x1e, 0xe8, 0xbb, 0xe8, 0xc6, 0x21, + 0x88, 0x62, 0x2d, 0xf5, 0x2f, 0x0a, 0x52, 0x9f, 0x53, 0x1e, 0x83, 0x53, 0xe0, 0x97, 0x2f, 0x2c, + 0xc4, 0x38, 0xae, 0xa3, 0xca, 0x80, 0x3a, 0x02, 0x7c, 0x75, 0x51, 0x9e, 0x46, 0x11, 0xde, 0x40, + 0x4b, 0xcc, 0xef, 0x83, 0xdf, 0xe9, 0x4e, 0xd5, 0x8a, 0xcc, 0xfc, 0x27, 0xe3, 0xc6, 0x14, 0x6f, + 0xa2, 0x65, 0x8f, 0xd8, 0xd0, 0xe1, 0xf4, 0x0c, 0xe4, 0x9b, 0x16, 0x5b, 0x4b, 0xc1, 0xc1, 0x09, + 0x3d, 0x03, 0x7c, 0x0b, 0x21, 0x99, 0x14, 0x6c, 0x08, 0x6e, 0x44, 0x4d, 0xc2, 0x5f, 0x04, 0x07, + 0xfa, 0x47, 0x05, 0x6d, 0x64, 0xf0, 0xe3, 0x1e, 0x73, 0x39, 0xe0, 0x23, 0xb4, 0x16, 0x13, 0x8c, + 0x02, 0x57, 0xcb, 0xb5, 0x72, 0x31, 0xc9, 0x56, 0x49, 0xbc, 0x22, 0xbe, 0x8b, 0xae, 0xb9, 0xf0, + 0x56, 0x74, 0x62, 0x5c, 0x4a, 0x92, 0xcb, 0x6a, 0x70, 0xdc, 0xbc, 0xe0, 0x13, 0xe8, 0xf5, 0xd2, + 0xeb, 0x67, 0xcf, 0x74, 0x0f, 0xad, 0x8c, 0x65, 0x4e, 0x9a, 0x20, 0x1a, 0x9f, 0x36, 0xe3, 0x32, + 0xf3, 0x89, 0xf1, 0x2c, 0xf0, 0xc9, 0x31, 0xe1, 0xc3, 0x16, 0x0a, 0xe1, 0xc1, 0xef, 0xd4, 0xf0, + 0xcb, 0xff, 0x34, 0x7c, 0x03, 0xa9, 0x07, 0xe0, 0x40, 0x51, 0xcb, 0xd5, 0x7f, 0x54, 0x10, 0x8e, + 0x41, 0x4f, 0xc2, 0xa5, 0xc2, 0x5f, 0x15, 0x54, 0x4d, 0xc9, 0x8e, 0x8d, 0x6c, 0x32, 0x79, 0xfe, + 0xd1, 0xcc, 0xc2, 0xf8, 0x70, 0x9e, 0xfa, 0xee, 0xfb, 0xdf, 0x7f, 0x3e, 0x97, 0xb6, 0xf0, 0x9d, + 0x60, 0x11, 0xdf, 0x05, 0x04, 0x1f, 0x79, 0x3e, 0x7b, 0x0d, 0x3d, 0xc1, 0xcd, 0x9d, 0x73, 0x33, + 0x39, 0xb2, 0x4f, 0x0a, 0x5a, 0x4b, 0x1a, 0x1d, 0xef, 0x66, 0x37, 0xcc, 0x5c, 0x07, 0xed, 0x6a, + 0x69, 0xf5, 0xfb, 0x92, 0xcf, 0x3d, 0xbc, 0x95, 0xc5, 0x27, 0x49, 0xc7, 0xdc, 0x39, 0x97, 0xaa, + 0xa5, 0x16, 0x3e, 0x4f, 0xb5, 0xbc, 0x2f, 0x43, 0x11, 0x5e, 0x0f, 0x24, 0x2f, 0x4b, 0x2f, 0xa2, + 0xd3, 0xc3, 0x84, 0xad, 0xf0, 0x07, 0x05, 0x55, 0x53, 0x0e, 0xc9, 0xe3, 0x98, 0x67, 0x25, 0x6d, + 0x3d, 0x65, 0xea, 0xa7, 0xc1, 0x97, 0x71, 0x26, 0xd8, 0x4e, 0x41, 0xc1, 0x7e, 0x2a, 0xa8, 0x9a, + 0xda, 0xa6, 0x3c, 0x32, 0x79, 0x6b, 0x57, 0x44, 0xb0, 0x23, 0xc9, 0xab, 0x51, 0xaf, 0x4b, 0x5e, + 0x71, 0x41, 0x8c, 0xab, 0x48, 0x26, 0xf5, 0x6b, 0x7c, 0x53, 0x90, 0xda, 0x63, 0xa3, 0xcc, 0x96, + 0x8d, 0xaa, 0xec, 0x19, 0x2d, 0x51, 0x33, 0x90, 0xa6, 0xa9, 0xbc, 0x7a, 0x1c, 0x41, 0x6d, 0xe6, + 0x10, 0xd7, 0x36, 0x98, 0x6f, 0x9b, 0x36, 0xb8, 0x52, 0x38, 0x33, 0x4c, 0x11, 0x8f, 0xf2, 0xe4, + 0xbf, 0xd0, 0xde, 0x3c, 0xfa, 0x5e, 0xd2, 0x0e, 0xc3, 0x02, 0xfb, 0x0e, 0x1b, 0xf7, 0x8d, 0xe3, + 0x79, 0xc7, 0x53, 0xeb, 0xd7, 0x2c, 0xd9, 0x96, 0xc9, 0xf6, 0x3c, 0xd9, 0x3e, 0xb5, 0xba, 0x15, + 0xd9, 0xc4, 0xfa, 0x1b, 0x00, 0x00, 0xff, 0xff, 0x6f, 0x1f, 0xe6, 0xf0, 0x47, 0x07, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// AlertPolicyServiceClient is the client API for AlertPolicyService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type AlertPolicyServiceClient interface { + // Lists the existing alerting policies for the project. + ListAlertPolicies(ctx context.Context, in *ListAlertPoliciesRequest, opts ...grpc.CallOption) (*ListAlertPoliciesResponse, error) + // Gets a single alerting policy. + GetAlertPolicy(ctx context.Context, in *GetAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error) + // Creates a new alerting policy. + CreateAlertPolicy(ctx context.Context, in *CreateAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error) + // Deletes an alerting policy. + DeleteAlertPolicy(ctx context.Context, in *DeleteAlertPolicyRequest, opts ...grpc.CallOption) (*empty.Empty, error) + // Updates an alerting policy. You can either replace the entire policy with + // a new one or replace only certain fields in the current alerting policy by + // specifying the fields to be updated via `updateMask`. Returns the + // updated alerting policy. + UpdateAlertPolicy(ctx context.Context, in *UpdateAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error) +} + +type alertPolicyServiceClient struct { + cc *grpc.ClientConn +} + +func NewAlertPolicyServiceClient(cc *grpc.ClientConn) AlertPolicyServiceClient { + return &alertPolicyServiceClient{cc} +} + +func (c *alertPolicyServiceClient) ListAlertPolicies(ctx context.Context, in *ListAlertPoliciesRequest, opts ...grpc.CallOption) (*ListAlertPoliciesResponse, error) { + out := new(ListAlertPoliciesResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.AlertPolicyService/ListAlertPolicies", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *alertPolicyServiceClient) GetAlertPolicy(ctx context.Context, in *GetAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error) { + out := new(AlertPolicy) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.AlertPolicyService/GetAlertPolicy", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *alertPolicyServiceClient) CreateAlertPolicy(ctx context.Context, in *CreateAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error) { + out := new(AlertPolicy) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.AlertPolicyService/CreateAlertPolicy", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *alertPolicyServiceClient) DeleteAlertPolicy(ctx context.Context, in *DeleteAlertPolicyRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.AlertPolicyService/DeleteAlertPolicy", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *alertPolicyServiceClient) UpdateAlertPolicy(ctx context.Context, in *UpdateAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error) { + out := new(AlertPolicy) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.AlertPolicyService/UpdateAlertPolicy", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// AlertPolicyServiceServer is the server API for AlertPolicyService service. +type AlertPolicyServiceServer interface { + // Lists the existing alerting policies for the project. + ListAlertPolicies(context.Context, *ListAlertPoliciesRequest) (*ListAlertPoliciesResponse, error) + // Gets a single alerting policy. + GetAlertPolicy(context.Context, *GetAlertPolicyRequest) (*AlertPolicy, error) + // Creates a new alerting policy. + CreateAlertPolicy(context.Context, *CreateAlertPolicyRequest) (*AlertPolicy, error) + // Deletes an alerting policy. + DeleteAlertPolicy(context.Context, *DeleteAlertPolicyRequest) (*empty.Empty, error) + // Updates an alerting policy. You can either replace the entire policy with + // a new one or replace only certain fields in the current alerting policy by + // specifying the fields to be updated via `updateMask`. Returns the + // updated alerting policy. + UpdateAlertPolicy(context.Context, *UpdateAlertPolicyRequest) (*AlertPolicy, error) +} + +func RegisterAlertPolicyServiceServer(s *grpc.Server, srv AlertPolicyServiceServer) { + s.RegisterService(&_AlertPolicyService_serviceDesc, srv) +} + +func _AlertPolicyService_ListAlertPolicies_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListAlertPoliciesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AlertPolicyServiceServer).ListAlertPolicies(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.AlertPolicyService/ListAlertPolicies", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AlertPolicyServiceServer).ListAlertPolicies(ctx, req.(*ListAlertPoliciesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _AlertPolicyService_GetAlertPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetAlertPolicyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AlertPolicyServiceServer).GetAlertPolicy(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.AlertPolicyService/GetAlertPolicy", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AlertPolicyServiceServer).GetAlertPolicy(ctx, req.(*GetAlertPolicyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _AlertPolicyService_CreateAlertPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateAlertPolicyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AlertPolicyServiceServer).CreateAlertPolicy(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.AlertPolicyService/CreateAlertPolicy", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AlertPolicyServiceServer).CreateAlertPolicy(ctx, req.(*CreateAlertPolicyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _AlertPolicyService_DeleteAlertPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteAlertPolicyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AlertPolicyServiceServer).DeleteAlertPolicy(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.AlertPolicyService/DeleteAlertPolicy", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AlertPolicyServiceServer).DeleteAlertPolicy(ctx, req.(*DeleteAlertPolicyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _AlertPolicyService_UpdateAlertPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateAlertPolicyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AlertPolicyServiceServer).UpdateAlertPolicy(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.AlertPolicyService/UpdateAlertPolicy", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AlertPolicyServiceServer).UpdateAlertPolicy(ctx, req.(*UpdateAlertPolicyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _AlertPolicyService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.monitoring.v3.AlertPolicyService", + HandlerType: (*AlertPolicyServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListAlertPolicies", + Handler: _AlertPolicyService_ListAlertPolicies_Handler, + }, + { + MethodName: "GetAlertPolicy", + Handler: _AlertPolicyService_GetAlertPolicy_Handler, + }, + { + MethodName: "CreateAlertPolicy", + Handler: _AlertPolicyService_CreateAlertPolicy_Handler, + }, + { + MethodName: "DeleteAlertPolicy", + Handler: _AlertPolicyService_DeleteAlertPolicy_Handler, + }, + { + MethodName: "UpdateAlertPolicy", + Handler: _AlertPolicyService_UpdateAlertPolicy_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/monitoring/v3/alert_service.proto", +} diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/common.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/common.pb.go new file mode 100644 index 000000000..4e48c0e09 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/common.pb.go @@ -0,0 +1,898 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/monitoring/v3/common.proto + +package monitoring + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + duration "github.com/golang/protobuf/ptypes/duration" + timestamp "github.com/golang/protobuf/ptypes/timestamp" + _ "google.golang.org/genproto/googleapis/api/annotations" + distribution "google.golang.org/genproto/googleapis/api/distribution" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Specifies an ordering relationship on two arguments, here called left and +// right. +type ComparisonType int32 + +const ( + // No ordering relationship is specified. + ComparisonType_COMPARISON_UNSPECIFIED ComparisonType = 0 + // The left argument is greater than the right argument. + ComparisonType_COMPARISON_GT ComparisonType = 1 + // The left argument is greater than or equal to the right argument. + ComparisonType_COMPARISON_GE ComparisonType = 2 + // The left argument is less than the right argument. + ComparisonType_COMPARISON_LT ComparisonType = 3 + // The left argument is less than or equal to the right argument. + ComparisonType_COMPARISON_LE ComparisonType = 4 + // The left argument is equal to the right argument. + ComparisonType_COMPARISON_EQ ComparisonType = 5 + // The left argument is not equal to the right argument. + ComparisonType_COMPARISON_NE ComparisonType = 6 +) + +var ComparisonType_name = map[int32]string{ + 0: "COMPARISON_UNSPECIFIED", + 1: "COMPARISON_GT", + 2: "COMPARISON_GE", + 3: "COMPARISON_LT", + 4: "COMPARISON_LE", + 5: "COMPARISON_EQ", + 6: "COMPARISON_NE", +} + +var ComparisonType_value = map[string]int32{ + "COMPARISON_UNSPECIFIED": 0, + "COMPARISON_GT": 1, + "COMPARISON_GE": 2, + "COMPARISON_LT": 3, + "COMPARISON_LE": 4, + "COMPARISON_EQ": 5, + "COMPARISON_NE": 6, +} + +func (x ComparisonType) String() string { + return proto.EnumName(ComparisonType_name, int32(x)) +} + +func (ComparisonType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_013c57c1dcbb8d65, []int{0} +} + +// The tier of service for a Stackdriver account. Please see the +// [service tiers documentation](https://cloud.google.com/monitoring/accounts/tiers) +// for more details. +type ServiceTier int32 // Deprecated: Do not use. +const ( + // An invalid sentinel value, used to indicate that a tier has not + // been provided explicitly. + ServiceTier_SERVICE_TIER_UNSPECIFIED ServiceTier = 0 + // The Stackdriver Basic tier, a free tier of service that provides basic + // features, a moderate allotment of logs, and access to built-in metrics. + // A number of features are not available in this tier. For more details, + // see [the service tiers documentation](https://cloud.google.com/monitoring/accounts/tiers). + ServiceTier_SERVICE_TIER_BASIC ServiceTier = 1 + // The Stackdriver Premium tier, a higher, more expensive tier of service + // that provides access to all Stackdriver features, lets you use Stackdriver + // with AWS accounts, and has a larger allotments for logs and metrics. For + // more details, see [the service tiers documentation](https://cloud.google.com/monitoring/accounts/tiers). + ServiceTier_SERVICE_TIER_PREMIUM ServiceTier = 2 +) + +var ServiceTier_name = map[int32]string{ + 0: "SERVICE_TIER_UNSPECIFIED", + 1: "SERVICE_TIER_BASIC", + 2: "SERVICE_TIER_PREMIUM", +} + +var ServiceTier_value = map[string]int32{ + "SERVICE_TIER_UNSPECIFIED": 0, + "SERVICE_TIER_BASIC": 1, + "SERVICE_TIER_PREMIUM": 2, +} + +func (x ServiceTier) String() string { + return proto.EnumName(ServiceTier_name, int32(x)) +} + +func (ServiceTier) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_013c57c1dcbb8d65, []int{1} +} + +// The Aligner describes how to bring the data points in a single +// time series into temporal alignment. +type Aggregation_Aligner int32 + +const ( + // No alignment. Raw data is returned. Not valid if cross-time + // series reduction is requested. The value type of the result is + // the same as the value type of the input. + Aggregation_ALIGN_NONE Aggregation_Aligner = 0 + // Align and convert to delta metric type. This alignment is valid + // for cumulative metrics and delta metrics. Aligning an existing + // delta metric to a delta metric requires that the alignment + // period be increased. The value type of the result is the same + // as the value type of the input. + // + // One can think of this aligner as a rate but without time units; that + // is, the output is conceptually (second_point - first_point). + Aggregation_ALIGN_DELTA Aggregation_Aligner = 1 + // Align and convert to a rate. This alignment is valid for + // cumulative metrics and delta metrics with numeric values. The output is a + // gauge metric with value type + // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE]. + // + // One can think of this aligner as conceptually providing the slope of + // the line that passes through the value at the start and end of the + // window. In other words, this is conceptually ((y1 - y0)/(t1 - t0)), + // and the output unit is one that has a "/time" dimension. + // + // If, by rate, you are looking for percentage change, see the + // `ALIGN_PERCENT_CHANGE` aligner option. + Aggregation_ALIGN_RATE Aggregation_Aligner = 2 + // Align by interpolating between adjacent points around the + // period boundary. This alignment is valid for gauge + // metrics with numeric values. The value type of the result is the same + // as the value type of the input. + Aggregation_ALIGN_INTERPOLATE Aggregation_Aligner = 3 + // Align by shifting the oldest data point before the period + // boundary to the boundary. This alignment is valid for gauge + // metrics. The value type of the result is the same as the + // value type of the input. + Aggregation_ALIGN_NEXT_OLDER Aggregation_Aligner = 4 + // Align time series via aggregation. The resulting data point in + // the alignment period is the minimum of all data points in the + // period. This alignment is valid for gauge and delta metrics with numeric + // values. The value type of the result is the same as the value + // type of the input. + Aggregation_ALIGN_MIN Aggregation_Aligner = 10 + // Align time series via aggregation. The resulting data point in + // the alignment period is the maximum of all data points in the + // period. This alignment is valid for gauge and delta metrics with numeric + // values. The value type of the result is the same as the value + // type of the input. + Aggregation_ALIGN_MAX Aggregation_Aligner = 11 + // Align time series via aggregation. The resulting data point in + // the alignment period is the average or arithmetic mean of all + // data points in the period. This alignment is valid for gauge and delta + // metrics with numeric values. The value type of the output is + // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE]. + Aggregation_ALIGN_MEAN Aggregation_Aligner = 12 + // Align time series via aggregation. The resulting data point in + // the alignment period is the count of all data points in the + // period. This alignment is valid for gauge and delta metrics with numeric + // or Boolean values. The value type of the output is + // [INT64][google.api.MetricDescriptor.ValueType.INT64]. + Aggregation_ALIGN_COUNT Aggregation_Aligner = 13 + // Align time series via aggregation. The resulting data point in + // the alignment period is the sum of all data points in the + // period. This alignment is valid for gauge and delta metrics with numeric + // and distribution values. The value type of the output is the + // same as the value type of the input. + Aggregation_ALIGN_SUM Aggregation_Aligner = 14 + // Align time series via aggregation. The resulting data point in + // the alignment period is the standard deviation of all data + // points in the period. This alignment is valid for gauge and delta metrics + // with numeric values. The value type of the output is + // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE]. + Aggregation_ALIGN_STDDEV Aggregation_Aligner = 15 + // Align time series via aggregation. The resulting data point in + // the alignment period is the count of True-valued data points in the + // period. This alignment is valid for gauge metrics with + // Boolean values. The value type of the output is + // [INT64][google.api.MetricDescriptor.ValueType.INT64]. + Aggregation_ALIGN_COUNT_TRUE Aggregation_Aligner = 16 + // Align time series via aggregation. The resulting data point in + // the alignment period is the count of False-valued data points in the + // period. This alignment is valid for gauge metrics with + // Boolean values. The value type of the output is + // [INT64][google.api.MetricDescriptor.ValueType.INT64]. + Aggregation_ALIGN_COUNT_FALSE Aggregation_Aligner = 24 + // Align time series via aggregation. The resulting data point in + // the alignment period is the fraction of True-valued data points in the + // period. This alignment is valid for gauge metrics with Boolean values. + // The output value is in the range [0, 1] and has value type + // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE]. + Aggregation_ALIGN_FRACTION_TRUE Aggregation_Aligner = 17 + // Align time series via aggregation. The resulting data point in + // the alignment period is the 99th percentile of all data + // points in the period. This alignment is valid for gauge and delta metrics + // with distribution values. The output is a gauge metric with value type + // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE]. + Aggregation_ALIGN_PERCENTILE_99 Aggregation_Aligner = 18 + // Align time series via aggregation. The resulting data point in + // the alignment period is the 95th percentile of all data + // points in the period. This alignment is valid for gauge and delta metrics + // with distribution values. The output is a gauge metric with value type + // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE]. + Aggregation_ALIGN_PERCENTILE_95 Aggregation_Aligner = 19 + // Align time series via aggregation. The resulting data point in + // the alignment period is the 50th percentile of all data + // points in the period. This alignment is valid for gauge and delta metrics + // with distribution values. The output is a gauge metric with value type + // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE]. + Aggregation_ALIGN_PERCENTILE_50 Aggregation_Aligner = 20 + // Align time series via aggregation. The resulting data point in + // the alignment period is the 5th percentile of all data + // points in the period. This alignment is valid for gauge and delta metrics + // with distribution values. The output is a gauge metric with value type + // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE]. + Aggregation_ALIGN_PERCENTILE_05 Aggregation_Aligner = 21 + // Align and convert to a percentage change. This alignment is valid for + // gauge and delta metrics with numeric values. This alignment conceptually + // computes the equivalent of "((current - previous)/previous)*100" + // where previous value is determined based on the alignmentPeriod. + // In the event that previous is 0 the calculated value is infinity with the + // exception that if both (current - previous) and previous are 0 the + // calculated value is 0. + // A 10 minute moving mean is computed at each point of the time window + // prior to the above calculation to smooth the metric and prevent false + // positives from very short lived spikes. + // Only applicable for data that is >= 0. Any values < 0 are treated as + // no data. While delta metrics are accepted by this alignment special care + // should be taken that the values for the metric will always be positive. + // The output is a gauge metric with value type + // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE]. + Aggregation_ALIGN_PERCENT_CHANGE Aggregation_Aligner = 23 +) + +var Aggregation_Aligner_name = map[int32]string{ + 0: "ALIGN_NONE", + 1: "ALIGN_DELTA", + 2: "ALIGN_RATE", + 3: "ALIGN_INTERPOLATE", + 4: "ALIGN_NEXT_OLDER", + 10: "ALIGN_MIN", + 11: "ALIGN_MAX", + 12: "ALIGN_MEAN", + 13: "ALIGN_COUNT", + 14: "ALIGN_SUM", + 15: "ALIGN_STDDEV", + 16: "ALIGN_COUNT_TRUE", + 24: "ALIGN_COUNT_FALSE", + 17: "ALIGN_FRACTION_TRUE", + 18: "ALIGN_PERCENTILE_99", + 19: "ALIGN_PERCENTILE_95", + 20: "ALIGN_PERCENTILE_50", + 21: "ALIGN_PERCENTILE_05", + 23: "ALIGN_PERCENT_CHANGE", +} + +var Aggregation_Aligner_value = map[string]int32{ + "ALIGN_NONE": 0, + "ALIGN_DELTA": 1, + "ALIGN_RATE": 2, + "ALIGN_INTERPOLATE": 3, + "ALIGN_NEXT_OLDER": 4, + "ALIGN_MIN": 10, + "ALIGN_MAX": 11, + "ALIGN_MEAN": 12, + "ALIGN_COUNT": 13, + "ALIGN_SUM": 14, + "ALIGN_STDDEV": 15, + "ALIGN_COUNT_TRUE": 16, + "ALIGN_COUNT_FALSE": 24, + "ALIGN_FRACTION_TRUE": 17, + "ALIGN_PERCENTILE_99": 18, + "ALIGN_PERCENTILE_95": 19, + "ALIGN_PERCENTILE_50": 20, + "ALIGN_PERCENTILE_05": 21, + "ALIGN_PERCENT_CHANGE": 23, +} + +func (x Aggregation_Aligner) String() string { + return proto.EnumName(Aggregation_Aligner_name, int32(x)) +} + +func (Aggregation_Aligner) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_013c57c1dcbb8d65, []int{2, 0} +} + +// A Reducer describes how to aggregate data points from multiple +// time series into a single time series. +type Aggregation_Reducer int32 + +const ( + // No cross-time series reduction. The output of the aligner is + // returned. + Aggregation_REDUCE_NONE Aggregation_Reducer = 0 + // Reduce by computing the mean across time series for each + // alignment period. This reducer is valid for delta and + // gauge metrics with numeric or distribution values. The value type of the + // output is [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE]. + Aggregation_REDUCE_MEAN Aggregation_Reducer = 1 + // Reduce by computing the minimum across time series for each + // alignment period. This reducer is valid for delta and + // gauge metrics with numeric values. The value type of the output + // is the same as the value type of the input. + Aggregation_REDUCE_MIN Aggregation_Reducer = 2 + // Reduce by computing the maximum across time series for each + // alignment period. This reducer is valid for delta and + // gauge metrics with numeric values. The value type of the output + // is the same as the value type of the input. + Aggregation_REDUCE_MAX Aggregation_Reducer = 3 + // Reduce by computing the sum across time series for each + // alignment period. This reducer is valid for delta and + // gauge metrics with numeric and distribution values. The value type of + // the output is the same as the value type of the input. + Aggregation_REDUCE_SUM Aggregation_Reducer = 4 + // Reduce by computing the standard deviation across time series + // for each alignment period. This reducer is valid for delta + // and gauge metrics with numeric or distribution values. The value type of + // the output is [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE]. + Aggregation_REDUCE_STDDEV Aggregation_Reducer = 5 + // Reduce by computing the count of data points across time series + // for each alignment period. This reducer is valid for delta + // and gauge metrics of numeric, Boolean, distribution, and string value + // type. The value type of the output is + // [INT64][google.api.MetricDescriptor.ValueType.INT64]. + Aggregation_REDUCE_COUNT Aggregation_Reducer = 6 + // Reduce by computing the count of True-valued data points across time + // series for each alignment period. This reducer is valid for delta + // and gauge metrics of Boolean value type. The value type of + // the output is [INT64][google.api.MetricDescriptor.ValueType.INT64]. + Aggregation_REDUCE_COUNT_TRUE Aggregation_Reducer = 7 + // Reduce by computing the count of False-valued data points across time + // series for each alignment period. This reducer is valid for delta + // and gauge metrics of Boolean value type. The value type of + // the output is [INT64][google.api.MetricDescriptor.ValueType.INT64]. + Aggregation_REDUCE_COUNT_FALSE Aggregation_Reducer = 15 + // Reduce by computing the fraction of True-valued data points across time + // series for each alignment period. This reducer is valid for delta + // and gauge metrics of Boolean value type. The output value is in the + // range [0, 1] and has value type + // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE]. + Aggregation_REDUCE_FRACTION_TRUE Aggregation_Reducer = 8 + // Reduce by computing 99th percentile of data points across time series + // for each alignment period. This reducer is valid for gauge and delta + // metrics of numeric and distribution type. The value of the output is + // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE] + Aggregation_REDUCE_PERCENTILE_99 Aggregation_Reducer = 9 + // Reduce by computing 95th percentile of data points across time series + // for each alignment period. This reducer is valid for gauge and delta + // metrics of numeric and distribution type. The value of the output is + // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE] + Aggregation_REDUCE_PERCENTILE_95 Aggregation_Reducer = 10 + // Reduce by computing 50th percentile of data points across time series + // for each alignment period. This reducer is valid for gauge and delta + // metrics of numeric and distribution type. The value of the output is + // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE] + Aggregation_REDUCE_PERCENTILE_50 Aggregation_Reducer = 11 + // Reduce by computing 5th percentile of data points across time series + // for each alignment period. This reducer is valid for gauge and delta + // metrics of numeric and distribution type. The value of the output is + // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE] + Aggregation_REDUCE_PERCENTILE_05 Aggregation_Reducer = 12 +) + +var Aggregation_Reducer_name = map[int32]string{ + 0: "REDUCE_NONE", + 1: "REDUCE_MEAN", + 2: "REDUCE_MIN", + 3: "REDUCE_MAX", + 4: "REDUCE_SUM", + 5: "REDUCE_STDDEV", + 6: "REDUCE_COUNT", + 7: "REDUCE_COUNT_TRUE", + 15: "REDUCE_COUNT_FALSE", + 8: "REDUCE_FRACTION_TRUE", + 9: "REDUCE_PERCENTILE_99", + 10: "REDUCE_PERCENTILE_95", + 11: "REDUCE_PERCENTILE_50", + 12: "REDUCE_PERCENTILE_05", +} + +var Aggregation_Reducer_value = map[string]int32{ + "REDUCE_NONE": 0, + "REDUCE_MEAN": 1, + "REDUCE_MIN": 2, + "REDUCE_MAX": 3, + "REDUCE_SUM": 4, + "REDUCE_STDDEV": 5, + "REDUCE_COUNT": 6, + "REDUCE_COUNT_TRUE": 7, + "REDUCE_COUNT_FALSE": 15, + "REDUCE_FRACTION_TRUE": 8, + "REDUCE_PERCENTILE_99": 9, + "REDUCE_PERCENTILE_95": 10, + "REDUCE_PERCENTILE_50": 11, + "REDUCE_PERCENTILE_05": 12, +} + +func (x Aggregation_Reducer) String() string { + return proto.EnumName(Aggregation_Reducer_name, int32(x)) +} + +func (Aggregation_Reducer) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_013c57c1dcbb8d65, []int{2, 1} +} + +// A single strongly-typed value. +type TypedValue struct { + // The typed value field. + // + // Types that are valid to be assigned to Value: + // *TypedValue_BoolValue + // *TypedValue_Int64Value + // *TypedValue_DoubleValue + // *TypedValue_StringValue + // *TypedValue_DistributionValue + Value isTypedValue_Value `protobuf_oneof:"value"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TypedValue) Reset() { *m = TypedValue{} } +func (m *TypedValue) String() string { return proto.CompactTextString(m) } +func (*TypedValue) ProtoMessage() {} +func (*TypedValue) Descriptor() ([]byte, []int) { + return fileDescriptor_013c57c1dcbb8d65, []int{0} +} + +func (m *TypedValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TypedValue.Unmarshal(m, b) +} +func (m *TypedValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TypedValue.Marshal(b, m, deterministic) +} +func (m *TypedValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_TypedValue.Merge(m, src) +} +func (m *TypedValue) XXX_Size() int { + return xxx_messageInfo_TypedValue.Size(m) +} +func (m *TypedValue) XXX_DiscardUnknown() { + xxx_messageInfo_TypedValue.DiscardUnknown(m) +} + +var xxx_messageInfo_TypedValue proto.InternalMessageInfo + +type isTypedValue_Value interface { + isTypedValue_Value() +} + +type TypedValue_BoolValue struct { + BoolValue bool `protobuf:"varint,1,opt,name=bool_value,json=boolValue,proto3,oneof"` +} + +type TypedValue_Int64Value struct { + Int64Value int64 `protobuf:"varint,2,opt,name=int64_value,json=int64Value,proto3,oneof"` +} + +type TypedValue_DoubleValue struct { + DoubleValue float64 `protobuf:"fixed64,3,opt,name=double_value,json=doubleValue,proto3,oneof"` +} + +type TypedValue_StringValue struct { + StringValue string `protobuf:"bytes,4,opt,name=string_value,json=stringValue,proto3,oneof"` +} + +type TypedValue_DistributionValue struct { + DistributionValue *distribution.Distribution `protobuf:"bytes,5,opt,name=distribution_value,json=distributionValue,proto3,oneof"` +} + +func (*TypedValue_BoolValue) isTypedValue_Value() {} + +func (*TypedValue_Int64Value) isTypedValue_Value() {} + +func (*TypedValue_DoubleValue) isTypedValue_Value() {} + +func (*TypedValue_StringValue) isTypedValue_Value() {} + +func (*TypedValue_DistributionValue) isTypedValue_Value() {} + +func (m *TypedValue) GetValue() isTypedValue_Value { + if m != nil { + return m.Value + } + return nil +} + +func (m *TypedValue) GetBoolValue() bool { + if x, ok := m.GetValue().(*TypedValue_BoolValue); ok { + return x.BoolValue + } + return false +} + +func (m *TypedValue) GetInt64Value() int64 { + if x, ok := m.GetValue().(*TypedValue_Int64Value); ok { + return x.Int64Value + } + return 0 +} + +func (m *TypedValue) GetDoubleValue() float64 { + if x, ok := m.GetValue().(*TypedValue_DoubleValue); ok { + return x.DoubleValue + } + return 0 +} + +func (m *TypedValue) GetStringValue() string { + if x, ok := m.GetValue().(*TypedValue_StringValue); ok { + return x.StringValue + } + return "" +} + +func (m *TypedValue) GetDistributionValue() *distribution.Distribution { + if x, ok := m.GetValue().(*TypedValue_DistributionValue); ok { + return x.DistributionValue + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*TypedValue) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _TypedValue_OneofMarshaler, _TypedValue_OneofUnmarshaler, _TypedValue_OneofSizer, []interface{}{ + (*TypedValue_BoolValue)(nil), + (*TypedValue_Int64Value)(nil), + (*TypedValue_DoubleValue)(nil), + (*TypedValue_StringValue)(nil), + (*TypedValue_DistributionValue)(nil), + } +} + +func _TypedValue_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*TypedValue) + // value + switch x := m.Value.(type) { + case *TypedValue_BoolValue: + t := uint64(0) + if x.BoolValue { + t = 1 + } + b.EncodeVarint(1<<3 | proto.WireVarint) + b.EncodeVarint(t) + case *TypedValue_Int64Value: + b.EncodeVarint(2<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.Int64Value)) + case *TypedValue_DoubleValue: + b.EncodeVarint(3<<3 | proto.WireFixed64) + b.EncodeFixed64(math.Float64bits(x.DoubleValue)) + case *TypedValue_StringValue: + b.EncodeVarint(4<<3 | proto.WireBytes) + b.EncodeStringBytes(x.StringValue) + case *TypedValue_DistributionValue: + b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.DistributionValue); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("TypedValue.Value has unexpected type %T", x) + } + return nil +} + +func _TypedValue_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*TypedValue) + switch tag { + case 1: // value.bool_value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Value = &TypedValue_BoolValue{x != 0} + return true, err + case 2: // value.int64_value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Value = &TypedValue_Int64Value{int64(x)} + return true, err + case 3: // value.double_value + if wire != proto.WireFixed64 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed64() + m.Value = &TypedValue_DoubleValue{math.Float64frombits(x)} + return true, err + case 4: // value.string_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Value = &TypedValue_StringValue{x} + return true, err + case 5: // value.distribution_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(distribution.Distribution) + err := b.DecodeMessage(msg) + m.Value = &TypedValue_DistributionValue{msg} + return true, err + default: + return false, nil + } +} + +func _TypedValue_OneofSizer(msg proto.Message) (n int) { + m := msg.(*TypedValue) + // value + switch x := m.Value.(type) { + case *TypedValue_BoolValue: + n += 1 // tag and wire + n += 1 + case *TypedValue_Int64Value: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(x.Int64Value)) + case *TypedValue_DoubleValue: + n += 1 // tag and wire + n += 8 + case *TypedValue_StringValue: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.StringValue))) + n += len(x.StringValue) + case *TypedValue_DistributionValue: + s := proto.Size(x.DistributionValue) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// A time interval extending just after a start time through an end time. +// If the start time is the same as the end time, then the interval +// represents a single point in time. +type TimeInterval struct { + // Required. The end of the time interval. + EndTime *timestamp.Timestamp `protobuf:"bytes,2,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` + // Optional. The beginning of the time interval. The default value + // for the start time is the end time. The start time must not be + // later than the end time. + StartTime *timestamp.Timestamp `protobuf:"bytes,1,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TimeInterval) Reset() { *m = TimeInterval{} } +func (m *TimeInterval) String() string { return proto.CompactTextString(m) } +func (*TimeInterval) ProtoMessage() {} +func (*TimeInterval) Descriptor() ([]byte, []int) { + return fileDescriptor_013c57c1dcbb8d65, []int{1} +} + +func (m *TimeInterval) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TimeInterval.Unmarshal(m, b) +} +func (m *TimeInterval) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TimeInterval.Marshal(b, m, deterministic) +} +func (m *TimeInterval) XXX_Merge(src proto.Message) { + xxx_messageInfo_TimeInterval.Merge(m, src) +} +func (m *TimeInterval) XXX_Size() int { + return xxx_messageInfo_TimeInterval.Size(m) +} +func (m *TimeInterval) XXX_DiscardUnknown() { + xxx_messageInfo_TimeInterval.DiscardUnknown(m) +} + +var xxx_messageInfo_TimeInterval proto.InternalMessageInfo + +func (m *TimeInterval) GetEndTime() *timestamp.Timestamp { + if m != nil { + return m.EndTime + } + return nil +} + +func (m *TimeInterval) GetStartTime() *timestamp.Timestamp { + if m != nil { + return m.StartTime + } + return nil +} + +// Describes how to combine multiple time series to provide different views of +// the data. Aggregation consists of an alignment step on individual time +// series (`alignment_period` and `per_series_aligner`) followed by an optional +// reduction step of the data across the aligned time series +// (`cross_series_reducer` and `group_by_fields`). For more details, see +// [Aggregation](/monitoring/api/learn_more#aggregation). +type Aggregation struct { + // The alignment period for per-[time series][google.monitoring.v3.TimeSeries] + // alignment. If present, `alignmentPeriod` must be at least 60 + // seconds. After per-time series alignment, each time series will + // contain data points only on the period boundaries. If + // `perSeriesAligner` is not specified or equals `ALIGN_NONE`, then + // this field is ignored. If `perSeriesAligner` is specified and + // does not equal `ALIGN_NONE`, then this field must be defined; + // otherwise an error is returned. + AlignmentPeriod *duration.Duration `protobuf:"bytes,1,opt,name=alignment_period,json=alignmentPeriod,proto3" json:"alignment_period,omitempty"` + // The approach to be used to align individual time series. Not all + // alignment functions may be applied to all time series, depending + // on the metric type and value type of the original time + // series. Alignment may change the metric type or the value type of + // the time series. + // + // Time series data must be aligned in order to perform cross-time + // series reduction. If `crossSeriesReducer` is specified, then + // `perSeriesAligner` must be specified and not equal `ALIGN_NONE` + // and `alignmentPeriod` must be specified; otherwise, an error is + // returned. + PerSeriesAligner Aggregation_Aligner `protobuf:"varint,2,opt,name=per_series_aligner,json=perSeriesAligner,proto3,enum=google.monitoring.v3.Aggregation_Aligner" json:"per_series_aligner,omitempty"` + // The approach to be used to combine time series. Not all reducer + // functions may be applied to all time series, depending on the + // metric type and the value type of the original time + // series. Reduction may change the metric type of value type of the + // time series. + // + // Time series data must be aligned in order to perform cross-time + // series reduction. If `crossSeriesReducer` is specified, then + // `perSeriesAligner` must be specified and not equal `ALIGN_NONE` + // and `alignmentPeriod` must be specified; otherwise, an error is + // returned. + CrossSeriesReducer Aggregation_Reducer `protobuf:"varint,4,opt,name=cross_series_reducer,json=crossSeriesReducer,proto3,enum=google.monitoring.v3.Aggregation_Reducer" json:"cross_series_reducer,omitempty"` + // The set of fields to preserve when `crossSeriesReducer` is + // specified. The `groupByFields` determine how the time series are + // partitioned into subsets prior to applying the aggregation + // function. Each subset contains time series that have the same + // value for each of the grouping fields. Each individual time + // series is a member of exactly one subset. The + // `crossSeriesReducer` is applied to each subset of time series. + // It is not possible to reduce across different resource types, so + // this field implicitly contains `resource.type`. Fields not + // specified in `groupByFields` are aggregated away. If + // `groupByFields` is not specified and all the time series have + // the same resource type, then the time series are aggregated into + // a single output time series. If `crossSeriesReducer` is not + // defined, this field is ignored. + GroupByFields []string `protobuf:"bytes,5,rep,name=group_by_fields,json=groupByFields,proto3" json:"group_by_fields,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Aggregation) Reset() { *m = Aggregation{} } +func (m *Aggregation) String() string { return proto.CompactTextString(m) } +func (*Aggregation) ProtoMessage() {} +func (*Aggregation) Descriptor() ([]byte, []int) { + return fileDescriptor_013c57c1dcbb8d65, []int{2} +} + +func (m *Aggregation) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Aggregation.Unmarshal(m, b) +} +func (m *Aggregation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Aggregation.Marshal(b, m, deterministic) +} +func (m *Aggregation) XXX_Merge(src proto.Message) { + xxx_messageInfo_Aggregation.Merge(m, src) +} +func (m *Aggregation) XXX_Size() int { + return xxx_messageInfo_Aggregation.Size(m) +} +func (m *Aggregation) XXX_DiscardUnknown() { + xxx_messageInfo_Aggregation.DiscardUnknown(m) +} + +var xxx_messageInfo_Aggregation proto.InternalMessageInfo + +func (m *Aggregation) GetAlignmentPeriod() *duration.Duration { + if m != nil { + return m.AlignmentPeriod + } + return nil +} + +func (m *Aggregation) GetPerSeriesAligner() Aggregation_Aligner { + if m != nil { + return m.PerSeriesAligner + } + return Aggregation_ALIGN_NONE +} + +func (m *Aggregation) GetCrossSeriesReducer() Aggregation_Reducer { + if m != nil { + return m.CrossSeriesReducer + } + return Aggregation_REDUCE_NONE +} + +func (m *Aggregation) GetGroupByFields() []string { + if m != nil { + return m.GroupByFields + } + return nil +} + +func init() { + proto.RegisterEnum("google.monitoring.v3.ComparisonType", ComparisonType_name, ComparisonType_value) + proto.RegisterEnum("google.monitoring.v3.ServiceTier", ServiceTier_name, ServiceTier_value) + proto.RegisterEnum("google.monitoring.v3.Aggregation_Aligner", Aggregation_Aligner_name, Aggregation_Aligner_value) + proto.RegisterEnum("google.monitoring.v3.Aggregation_Reducer", Aggregation_Reducer_name, Aggregation_Reducer_value) + proto.RegisterType((*TypedValue)(nil), "google.monitoring.v3.TypedValue") + proto.RegisterType((*TimeInterval)(nil), "google.monitoring.v3.TimeInterval") + proto.RegisterType((*Aggregation)(nil), "google.monitoring.v3.Aggregation") +} + +func init() { proto.RegisterFile("google/monitoring/v3/common.proto", fileDescriptor_013c57c1dcbb8d65) } + +var fileDescriptor_013c57c1dcbb8d65 = []byte{ + // 957 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x95, 0xc1, 0x6e, 0xe3, 0x44, + 0x18, 0xc7, 0xe3, 0x64, 0xdb, 0x34, 0x9f, 0xdb, 0x66, 0x3a, 0xdb, 0xed, 0x86, 0x68, 0x61, 0xb3, + 0x45, 0x42, 0x61, 0x0f, 0x4e, 0xd5, 0x12, 0xa4, 0x0a, 0x09, 0xc9, 0x75, 0xa6, 0xad, 0xa5, 0xc4, + 0x09, 0x13, 0xa7, 0x54, 0x50, 0xc9, 0x72, 0x9a, 0x59, 0xcb, 0x52, 0xe2, 0xb1, 0x6c, 0xa7, 0x52, + 0x6f, 0xdc, 0x79, 0x07, 0x2e, 0xdc, 0xb8, 0xf1, 0x1a, 0x3c, 0x0c, 0x17, 0x5e, 0x00, 0x79, 0xc6, + 0x59, 0x3b, 0x21, 0x08, 0x8e, 0xdf, 0xef, 0xff, 0xff, 0xbe, 0x99, 0xf9, 0x8f, 0x35, 0x86, 0x77, + 0x1e, 0xe7, 0xde, 0x9c, 0x75, 0x16, 0x3c, 0xf0, 0x13, 0x1e, 0xf9, 0x81, 0xd7, 0x79, 0xba, 0xe8, + 0x3c, 0xf2, 0xc5, 0x82, 0x07, 0x5a, 0x18, 0xf1, 0x84, 0xe3, 0x63, 0x69, 0xd1, 0x72, 0x8b, 0xf6, + 0x74, 0xd1, 0x7c, 0x93, 0x35, 0xba, 0xa1, 0xdf, 0x71, 0x83, 0x80, 0x27, 0x6e, 0xe2, 0xf3, 0x20, + 0x96, 0x3d, 0xcd, 0x4f, 0x0b, 0xea, 0xcc, 0x8f, 0x93, 0xc8, 0x9f, 0x2e, 0x53, 0x3d, 0x93, 0x3f, + 0xcb, 0x64, 0x51, 0x4d, 0x97, 0x1f, 0x3a, 0xb3, 0x65, 0xe4, 0x16, 0xf4, 0xb7, 0x9b, 0x7a, 0xe2, + 0x2f, 0x58, 0x9c, 0xb8, 0x8b, 0x50, 0x1a, 0x4e, 0xff, 0x54, 0x00, 0xec, 0xe7, 0x90, 0xcd, 0xee, + 0xdc, 0xf9, 0x92, 0xe1, 0xb7, 0x00, 0x53, 0xce, 0xe7, 0xce, 0x53, 0x5a, 0x35, 0x94, 0x96, 0xd2, + 0xde, 0xbb, 0x2d, 0xd1, 0x5a, 0xca, 0xa4, 0xe1, 0x1d, 0xa8, 0x7e, 0x90, 0x7c, 0xfd, 0x55, 0xe6, + 0x28, 0xb7, 0x94, 0x76, 0xe5, 0xb6, 0x44, 0x41, 0x40, 0x69, 0xf9, 0x1c, 0xf6, 0x67, 0x7c, 0x39, + 0x9d, 0xb3, 0xcc, 0x53, 0x69, 0x29, 0x6d, 0xe5, 0xb6, 0x44, 0x55, 0x49, 0x3f, 0x9a, 0xd2, 0xc3, + 0x04, 0x5e, 0x66, 0x7a, 0xd1, 0x52, 0xda, 0xb5, 0xd4, 0x24, 0xa9, 0x34, 0x99, 0x80, 0x8b, 0x67, + 0xce, 0xac, 0x3b, 0x2d, 0xa5, 0xad, 0x9e, 0x37, 0xb4, 0x2c, 0x4d, 0x37, 0xf4, 0xb5, 0x5e, 0xc1, + 0x75, 0x5b, 0xa2, 0x47, 0xc5, 0x2e, 0x31, 0xea, 0xaa, 0x0a, 0x3b, 0xa2, 0xfb, 0xf4, 0x27, 0x05, + 0xf6, 0x6d, 0x7f, 0xc1, 0xcc, 0x20, 0x61, 0xd1, 0x93, 0x3b, 0xc7, 0x5d, 0xd8, 0x63, 0xc1, 0xcc, + 0x49, 0x83, 0x11, 0xc7, 0x51, 0xcf, 0x9b, 0xab, 0xd1, 0xab, 0xd4, 0x34, 0x7b, 0x95, 0x1a, 0xad, + 0xb2, 0x60, 0x96, 0x56, 0xf8, 0x12, 0x20, 0x4e, 0xdc, 0x28, 0x91, 0x8d, 0xca, 0x7f, 0x36, 0xd6, + 0x84, 0x3b, 0xad, 0x4f, 0xff, 0xaa, 0x82, 0xaa, 0x7b, 0x5e, 0xc4, 0x3c, 0x71, 0x55, 0xb8, 0x07, + 0xc8, 0x9d, 0xfb, 0x5e, 0xb0, 0x60, 0x41, 0xe2, 0x84, 0x2c, 0xf2, 0xf9, 0x2c, 0x1b, 0xf8, 0xc9, + 0x3f, 0x06, 0xf6, 0xb2, 0xfb, 0xa5, 0xf5, 0x8f, 0x2d, 0x23, 0xd1, 0x81, 0xbf, 0x07, 0x1c, 0xb2, + 0xc8, 0x89, 0x59, 0xe4, 0xb3, 0xd8, 0x11, 0x2a, 0x8b, 0xc4, 0x89, 0x0e, 0xcf, 0xbf, 0xd4, 0xb6, + 0x7d, 0x7a, 0x5a, 0x61, 0x13, 0x9a, 0x2e, 0x1b, 0x28, 0x0a, 0x59, 0x34, 0x16, 0x33, 0x32, 0x82, + 0x7f, 0x84, 0xe3, 0xc7, 0x88, 0xc7, 0xf1, 0x6a, 0x74, 0xc4, 0x66, 0xcb, 0x47, 0x16, 0x89, 0x2b, + 0xfb, 0x5f, 0xa3, 0xa9, 0x6c, 0xa0, 0x58, 0x8c, 0x91, 0xc3, 0x33, 0x86, 0xbf, 0x80, 0xba, 0x17, + 0xf1, 0x65, 0xe8, 0x4c, 0x9f, 0x9d, 0x0f, 0x3e, 0x9b, 0xcf, 0xe2, 0xc6, 0x4e, 0xab, 0xd2, 0xae, + 0xd1, 0x03, 0x81, 0xaf, 0x9e, 0xaf, 0x05, 0x3c, 0xfd, 0xb9, 0x02, 0xd5, 0xd5, 0x86, 0x0e, 0x01, + 0xf4, 0xbe, 0x79, 0x63, 0x39, 0xd6, 0xd0, 0x22, 0xa8, 0x84, 0xeb, 0xa0, 0xca, 0xba, 0x47, 0xfa, + 0xb6, 0x8e, 0x94, 0xdc, 0x40, 0x75, 0x9b, 0xa0, 0x32, 0x7e, 0x05, 0x47, 0xb2, 0x36, 0x2d, 0x9b, + 0xd0, 0xd1, 0xb0, 0x9f, 0xe2, 0x0a, 0x3e, 0x06, 0x94, 0xcd, 0x21, 0xf7, 0xb6, 0x33, 0xec, 0xf7, + 0x08, 0x45, 0x2f, 0xf0, 0x01, 0xd4, 0x24, 0x1d, 0x98, 0x16, 0x82, 0x42, 0xa9, 0xdf, 0x23, 0x35, + 0x1f, 0x3d, 0x20, 0xba, 0x85, 0xf6, 0xf3, 0xb5, 0x8d, 0xe1, 0xc4, 0xb2, 0xd1, 0x41, 0xee, 0x1f, + 0x4f, 0x06, 0xe8, 0x10, 0x23, 0xd8, 0xcf, 0x4a, 0xbb, 0xd7, 0x23, 0x77, 0xa8, 0x9e, 0xaf, 0x2a, + 0x3a, 0x1c, 0x9b, 0x4e, 0x08, 0x42, 0xf9, 0x16, 0x25, 0xbd, 0xd6, 0xfb, 0x63, 0x82, 0x1a, 0xf8, + 0x35, 0xbc, 0x94, 0xf8, 0x9a, 0xea, 0x86, 0x6d, 0x0e, 0x2d, 0xe9, 0x3f, 0xca, 0x85, 0x11, 0xa1, + 0x06, 0xb1, 0x6c, 0xb3, 0x4f, 0x9c, 0xcb, 0x4b, 0x84, 0xb7, 0x0b, 0x5d, 0xf4, 0x72, 0xab, 0xd0, + 0x3d, 0x43, 0xc7, 0x5b, 0x85, 0xb3, 0x2e, 0x7a, 0x85, 0x1b, 0x70, 0xbc, 0x26, 0x38, 0xc6, 0xad, + 0x6e, 0xdd, 0x10, 0xf4, 0xfa, 0xf4, 0xf7, 0x32, 0x54, 0x57, 0x37, 0x58, 0x07, 0x95, 0x92, 0xde, + 0xc4, 0x20, 0x85, 0xeb, 0xc8, 0x80, 0xc8, 0x48, 0x5c, 0xc7, 0x0a, 0x98, 0x16, 0x2a, 0x17, 0x6b, + 0xfd, 0x1e, 0x55, 0x0a, 0x75, 0x9a, 0xd9, 0x0b, 0x7c, 0x04, 0x07, 0xab, 0x5a, 0x86, 0xb6, 0x93, + 0xc6, 0x98, 0x21, 0x99, 0xf3, 0x6e, 0x1a, 0x58, 0x91, 0xc8, 0x5c, 0xaa, 0xf8, 0x04, 0xf0, 0x1a, + 0x96, 0x41, 0xd6, 0xd3, 0xb3, 0x64, 0x7c, 0x3d, 0xc9, 0xbd, 0x82, 0xb2, 0x1e, 0x65, 0xed, 0x5f, + 0x94, 0x2e, 0x82, 0xed, 0x4a, 0xf7, 0x0c, 0xa9, 0xdb, 0x95, 0xb3, 0x2e, 0xda, 0x7f, 0xff, 0x8b, + 0x02, 0x87, 0x06, 0x5f, 0x84, 0x6e, 0xe4, 0xc7, 0x3c, 0x48, 0xdf, 0x5c, 0xdc, 0x84, 0x13, 0x63, + 0x38, 0x18, 0xe9, 0xd4, 0x1c, 0x0f, 0x2d, 0x67, 0x62, 0x8d, 0x47, 0xc4, 0x30, 0xaf, 0x4d, 0xd2, + 0x43, 0xa5, 0x34, 0x84, 0x82, 0x76, 0x63, 0x23, 0x65, 0x13, 0xa5, 0x5f, 0xf6, 0x3a, 0xea, 0xdb, + 0xa8, 0xb2, 0x89, 0x88, 0x0c, 0xb4, 0x80, 0xc8, 0x77, 0x68, 0x67, 0x03, 0x59, 0x04, 0xed, 0xbe, + 0x77, 0x41, 0x1d, 0xb3, 0xe8, 0xc9, 0x7f, 0x64, 0xb6, 0xcf, 0x22, 0xfc, 0x06, 0x1a, 0x63, 0x42, + 0xef, 0x4c, 0x83, 0x38, 0xb6, 0x49, 0xe8, 0xc6, 0xf6, 0x4e, 0x00, 0xaf, 0xa9, 0x57, 0xfa, 0xd8, + 0x34, 0x90, 0x92, 0x9e, 0x7f, 0x8d, 0x8f, 0x28, 0x19, 0x98, 0x93, 0x01, 0x2a, 0x37, 0xcb, 0x0d, + 0xe5, 0xea, 0x57, 0x05, 0x1a, 0x8f, 0x7c, 0xb1, 0xf5, 0xc9, 0xb8, 0x52, 0x0d, 0xf1, 0xb3, 0x1c, + 0xa5, 0x4f, 0xdd, 0x48, 0xf9, 0xe1, 0xdb, 0xcc, 0xe4, 0xf1, 0xb9, 0x1b, 0x78, 0x1a, 0x8f, 0xbc, + 0x8e, 0xc7, 0x02, 0xf1, 0x10, 0x76, 0xa4, 0xe4, 0x86, 0x7e, 0xbc, 0xfe, 0xbf, 0xfd, 0x26, 0xaf, + 0x7e, 0x2b, 0x37, 0x6f, 0xe4, 0x00, 0x63, 0xce, 0x97, 0x33, 0x6d, 0x90, 0xaf, 0x75, 0x77, 0xf1, + 0xc7, 0x4a, 0x7c, 0x10, 0xe2, 0x43, 0x2e, 0x3e, 0xdc, 0x5d, 0x4c, 0x77, 0xc5, 0x22, 0x17, 0x7f, + 0x07, 0x00, 0x00, 0xff, 0xff, 0x8d, 0x78, 0xd9, 0x96, 0xd3, 0x07, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/dropped_labels.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/dropped_labels.pb.go new file mode 100644 index 000000000..8dfdc45f0 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/dropped_labels.pb.go @@ -0,0 +1,104 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/monitoring/v3/dropped_labels.proto + +package monitoring + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + _ "google.golang.org/genproto/googleapis/api/annotations" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// A set of (label, value) pairs which were dropped during aggregation, attached +// to google.api.Distribution.Exemplars in google.api.Distribution values during +// aggregation. +// +// These values are used in combination with the label values that remain on the +// aggregated Distribution timeseries to construct the full label set for the +// exemplar values. The resulting full label set may be used to identify the +// specific task/job/instance (for example) which may be contributing to a +// long-tail, while allowing the storage savings of only storing aggregated +// distribution values for a large group. +// +// Note that there are no guarantees on ordering of the labels from +// exemplar-to-exemplar and from distribution-to-distribution in the same +// stream, and there may be duplicates. It is up to clients to resolve any +// ambiguities. +type DroppedLabels struct { + // Map from label to its value, for all labels dropped in any aggregation. + Label map[string]string `protobuf:"bytes,1,rep,name=label,proto3" json:"label,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DroppedLabels) Reset() { *m = DroppedLabels{} } +func (m *DroppedLabels) String() string { return proto.CompactTextString(m) } +func (*DroppedLabels) ProtoMessage() {} +func (*DroppedLabels) Descriptor() ([]byte, []int) { + return fileDescriptor_15749142c06d7f43, []int{0} +} + +func (m *DroppedLabels) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DroppedLabels.Unmarshal(m, b) +} +func (m *DroppedLabels) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DroppedLabels.Marshal(b, m, deterministic) +} +func (m *DroppedLabels) XXX_Merge(src proto.Message) { + xxx_messageInfo_DroppedLabels.Merge(m, src) +} +func (m *DroppedLabels) XXX_Size() int { + return xxx_messageInfo_DroppedLabels.Size(m) +} +func (m *DroppedLabels) XXX_DiscardUnknown() { + xxx_messageInfo_DroppedLabels.DiscardUnknown(m) +} + +var xxx_messageInfo_DroppedLabels proto.InternalMessageInfo + +func (m *DroppedLabels) GetLabel() map[string]string { + if m != nil { + return m.Label + } + return nil +} + +func init() { + proto.RegisterType((*DroppedLabels)(nil), "google.monitoring.v3.DroppedLabels") + proto.RegisterMapType((map[string]string)(nil), "google.monitoring.v3.DroppedLabels.LabelEntry") +} + +func init() { + proto.RegisterFile("google/monitoring/v3/dropped_labels.proto", fileDescriptor_15749142c06d7f43) +} + +var fileDescriptor_15749142c06d7f43 = []byte{ + // 219 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0x4c, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0xcf, 0xcd, 0xcf, 0xcb, 0x2c, 0xc9, 0x2f, 0xca, 0xcc, 0x4b, 0xd7, 0x2f, 0x33, + 0xd6, 0x4f, 0x29, 0xca, 0x2f, 0x28, 0x48, 0x4d, 0x89, 0xcf, 0x49, 0x4c, 0x4a, 0xcd, 0x29, 0xd6, + 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x81, 0x28, 0xd5, 0x43, 0x28, 0xd5, 0x2b, 0x33, 0x96, + 0x92, 0x81, 0x1a, 0x90, 0x58, 0x90, 0xa9, 0x9f, 0x98, 0x97, 0x97, 0x5f, 0x92, 0x58, 0x92, 0x99, + 0x9f, 0x07, 0xd5, 0xa3, 0xd4, 0xcf, 0xc8, 0xc5, 0xeb, 0x02, 0x31, 0xcc, 0x07, 0x6c, 0x96, 0x90, + 0x0b, 0x17, 0x2b, 0xd8, 0x54, 0x09, 0x46, 0x05, 0x66, 0x0d, 0x6e, 0x23, 0x3d, 0x3d, 0x6c, 0xa6, + 0xea, 0xa1, 0xe8, 0xd1, 0x03, 0x53, 0xae, 0x79, 0x25, 0x45, 0x95, 0x41, 0x10, 0xcd, 0x52, 0x16, + 0x5c, 0x5c, 0x08, 0x41, 0x21, 0x01, 0x2e, 0xe6, 0xec, 0xd4, 0x4a, 0x09, 0x46, 0x05, 0x46, 0x0d, + 0xce, 0x20, 0x10, 0x53, 0x48, 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7, 0x34, 0x55, 0x82, 0x09, 0x2c, + 0x06, 0xe1, 0x58, 0x31, 0x59, 0x30, 0x3a, 0x39, 0x44, 0xd9, 0x41, 0x6d, 0x4c, 0xcf, 0xcf, 0x49, + 0xcc, 0x4b, 0xd7, 0xcb, 0x2f, 0x4a, 0xd7, 0x4f, 0x4f, 0xcd, 0x03, 0xbb, 0x57, 0x1f, 0x22, 0x95, + 0x58, 0x90, 0x59, 0x8c, 0x1a, 0x22, 0xd6, 0x08, 0x5e, 0x12, 0x1b, 0x58, 0xa9, 0x31, 0x20, 0x00, + 0x00, 0xff, 0xff, 0x7e, 0x29, 0xf8, 0x00, 0x3b, 0x01, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/group.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/group.pb.go new file mode 100644 index 000000000..b63597853 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/group.pb.go @@ -0,0 +1,157 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/monitoring/v3/group.proto + +package monitoring + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// The description of a dynamic collection of monitored resources. Each group +// has a filter that is matched against monitored resources and their associated +// metadata. If a group's filter matches an available monitored resource, then +// that resource is a member of that group. Groups can contain any number of +// monitored resources, and each monitored resource can be a member of any +// number of groups. +// +// Groups can be nested in parent-child hierarchies. The `parentName` field +// identifies an optional parent for each group. If a group has a parent, then +// the only monitored resources available to be matched by the group's filter +// are the resources contained in the parent group. In other words, a group +// contains the monitored resources that match its filter and the filters of all +// the group's ancestors. A group without a parent can contain any monitored +// resource. +// +// For example, consider an infrastructure running a set of instances with two +// user-defined tags: `"environment"` and `"role"`. A parent group has a filter, +// `environment="production"`. A child of that parent group has a filter, +// `role="transcoder"`. The parent group contains all instances in the +// production environment, regardless of their roles. The child group contains +// instances that have the transcoder role *and* are in the production +// environment. +// +// The monitored resources contained in a group can change at any moment, +// depending on what resources exist and what filters are associated with the +// group and its ancestors. +type Group struct { + // Output only. The name of this group. The format is + // `"projects/{project_id_or_number}/groups/{group_id}"`. + // When creating a group, this field is ignored and a new name is created + // consisting of the project specified in the call to `CreateGroup` + // and a unique `{group_id}` that is generated automatically. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // A user-assigned name for this group, used only for display purposes. + DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // The name of the group's parent, if it has one. + // The format is `"projects/{project_id_or_number}/groups/{group_id}"`. + // For groups with no parent, `parentName` is the empty string, `""`. + ParentName string `protobuf:"bytes,3,opt,name=parent_name,json=parentName,proto3" json:"parent_name,omitempty"` + // The filter used to determine which monitored resources belong to this group. + Filter string `protobuf:"bytes,5,opt,name=filter,proto3" json:"filter,omitempty"` + // If true, the members of this group are considered to be a cluster. + // The system can perform additional analysis on groups that are clusters. + IsCluster bool `protobuf:"varint,6,opt,name=is_cluster,json=isCluster,proto3" json:"is_cluster,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Group) Reset() { *m = Group{} } +func (m *Group) String() string { return proto.CompactTextString(m) } +func (*Group) ProtoMessage() {} +func (*Group) Descriptor() ([]byte, []int) { + return fileDescriptor_907e30c1f087271d, []int{0} +} + +func (m *Group) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Group.Unmarshal(m, b) +} +func (m *Group) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Group.Marshal(b, m, deterministic) +} +func (m *Group) XXX_Merge(src proto.Message) { + xxx_messageInfo_Group.Merge(m, src) +} +func (m *Group) XXX_Size() int { + return xxx_messageInfo_Group.Size(m) +} +func (m *Group) XXX_DiscardUnknown() { + xxx_messageInfo_Group.DiscardUnknown(m) +} + +var xxx_messageInfo_Group proto.InternalMessageInfo + +func (m *Group) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Group) GetDisplayName() string { + if m != nil { + return m.DisplayName + } + return "" +} + +func (m *Group) GetParentName() string { + if m != nil { + return m.ParentName + } + return "" +} + +func (m *Group) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +func (m *Group) GetIsCluster() bool { + if m != nil { + return m.IsCluster + } + return false +} + +func init() { + proto.RegisterType((*Group)(nil), "google.monitoring.v3.Group") +} + +func init() { proto.RegisterFile("google/monitoring/v3/group.proto", fileDescriptor_907e30c1f087271d) } + +var fileDescriptor_907e30c1f087271d = []byte{ + // 261 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0xcf, 0x4a, 0x2b, 0x31, + 0x14, 0x87, 0x49, 0xef, 0xed, 0x60, 0x4f, 0x5d, 0x0d, 0x22, 0x83, 0x20, 0x8e, 0xae, 0xba, 0xca, + 0x2c, 0xb2, 0x14, 0x5c, 0xb4, 0x8b, 0xae, 0x94, 0xd2, 0x45, 0x17, 0x32, 0x50, 0x62, 0x1b, 0x43, + 0x20, 0x93, 0x13, 0x92, 0x99, 0x82, 0x2f, 0xe2, 0x03, 0xb8, 0xf4, 0x51, 0x7c, 0x2a, 0x99, 0x93, + 0x91, 0x41, 0x70, 0x97, 0xf3, 0xfb, 0x3e, 0x72, 0xfe, 0x40, 0xa9, 0x11, 0xb5, 0x55, 0x55, 0x83, + 0xce, 0xb4, 0x18, 0x8c, 0xd3, 0xd5, 0x49, 0x54, 0x3a, 0x60, 0xe7, 0xb9, 0x0f, 0xd8, 0x62, 0x7e, + 0x91, 0x0c, 0x3e, 0x1a, 0xfc, 0x24, 0xee, 0xde, 0x19, 0x4c, 0xd7, 0xbd, 0x95, 0xe7, 0xf0, 0xdf, + 0xc9, 0x46, 0x15, 0xac, 0x64, 0x8b, 0xd9, 0x96, 0xde, 0xf9, 0x2d, 0x9c, 0x1f, 0x4d, 0xf4, 0x56, + 0xbe, 0xed, 0x89, 0x4d, 0x88, 0xcd, 0x87, 0xec, 0xa9, 0x57, 0x6e, 0x60, 0xee, 0x65, 0x50, 0xae, + 0x4d, 0xc6, 0x3f, 0x32, 0x20, 0x45, 0x24, 0x5c, 0x42, 0xf6, 0x6a, 0x6c, 0xab, 0x42, 0x31, 0x25, + 0x36, 0x54, 0xf9, 0x35, 0x80, 0x89, 0xfb, 0x83, 0xed, 0x62, 0xcf, 0xb2, 0x92, 0x2d, 0xce, 0xb6, + 0x33, 0x13, 0x57, 0x29, 0x58, 0x7e, 0x30, 0x28, 0x0e, 0xd8, 0xf0, 0xbf, 0xa6, 0x5e, 0x02, 0x8d, + 0xbc, 0xe9, 0xf7, 0xda, 0xb0, 0xe7, 0x87, 0xc1, 0xd1, 0x68, 0xa5, 0xd3, 0x1c, 0x83, 0xae, 0xb4, + 0x72, 0xb4, 0x75, 0x95, 0x90, 0xf4, 0x26, 0xfe, 0x3e, 0xcd, 0xfd, 0x58, 0x7d, 0x4e, 0xae, 0xd6, + 0xe9, 0x83, 0x95, 0xc5, 0xee, 0xc8, 0x1f, 0xc7, 0x56, 0x3b, 0xf1, 0xf5, 0x03, 0x6b, 0x82, 0xf5, + 0x08, 0xeb, 0x9d, 0x78, 0xc9, 0xa8, 0x89, 0xf8, 0x0e, 0x00, 0x00, 0xff, 0xff, 0x95, 0xd1, 0xa1, + 0x34, 0x7e, 0x01, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/group_service.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/group_service.pb.go new file mode 100644 index 000000000..7a19cf7b0 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/group_service.pb.go @@ -0,0 +1,948 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/monitoring/v3/group_service.proto + +package monitoring + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + empty "github.com/golang/protobuf/ptypes/empty" + context "golang.org/x/net/context" + _ "google.golang.org/genproto/googleapis/api/annotations" + monitoredres "google.golang.org/genproto/googleapis/api/monitoredres" + grpc "google.golang.org/grpc" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// The `ListGroup` request. +type ListGroupsRequest struct { + // The project whose groups are to be listed. The format is + // `"projects/{project_id_or_number}"`. + Name string `protobuf:"bytes,7,opt,name=name,proto3" json:"name,omitempty"` + // An optional filter consisting of a single group name. The filters limit the + // groups returned based on their parent-child relationship with the specified + // group. If no filter is specified, all groups are returned. + // + // Types that are valid to be assigned to Filter: + // *ListGroupsRequest_ChildrenOfGroup + // *ListGroupsRequest_AncestorsOfGroup + // *ListGroupsRequest_DescendantsOfGroup + Filter isListGroupsRequest_Filter `protobuf_oneof:"filter"` + // A positive number that is the maximum number of results to return. + PageSize int32 `protobuf:"varint,5,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // If this field is not empty then it must contain the `nextPageToken` value + // returned by a previous call to this method. Using this field causes the + // method to return additional results from the previous method call. + PageToken string `protobuf:"bytes,6,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListGroupsRequest) Reset() { *m = ListGroupsRequest{} } +func (m *ListGroupsRequest) String() string { return proto.CompactTextString(m) } +func (*ListGroupsRequest) ProtoMessage() {} +func (*ListGroupsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_21ad21d0ed55c55a, []int{0} +} + +func (m *ListGroupsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListGroupsRequest.Unmarshal(m, b) +} +func (m *ListGroupsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListGroupsRequest.Marshal(b, m, deterministic) +} +func (m *ListGroupsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListGroupsRequest.Merge(m, src) +} +func (m *ListGroupsRequest) XXX_Size() int { + return xxx_messageInfo_ListGroupsRequest.Size(m) +} +func (m *ListGroupsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListGroupsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListGroupsRequest proto.InternalMessageInfo + +func (m *ListGroupsRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +type isListGroupsRequest_Filter interface { + isListGroupsRequest_Filter() +} + +type ListGroupsRequest_ChildrenOfGroup struct { + ChildrenOfGroup string `protobuf:"bytes,2,opt,name=children_of_group,json=childrenOfGroup,proto3,oneof"` +} + +type ListGroupsRequest_AncestorsOfGroup struct { + AncestorsOfGroup string `protobuf:"bytes,3,opt,name=ancestors_of_group,json=ancestorsOfGroup,proto3,oneof"` +} + +type ListGroupsRequest_DescendantsOfGroup struct { + DescendantsOfGroup string `protobuf:"bytes,4,opt,name=descendants_of_group,json=descendantsOfGroup,proto3,oneof"` +} + +func (*ListGroupsRequest_ChildrenOfGroup) isListGroupsRequest_Filter() {} + +func (*ListGroupsRequest_AncestorsOfGroup) isListGroupsRequest_Filter() {} + +func (*ListGroupsRequest_DescendantsOfGroup) isListGroupsRequest_Filter() {} + +func (m *ListGroupsRequest) GetFilter() isListGroupsRequest_Filter { + if m != nil { + return m.Filter + } + return nil +} + +func (m *ListGroupsRequest) GetChildrenOfGroup() string { + if x, ok := m.GetFilter().(*ListGroupsRequest_ChildrenOfGroup); ok { + return x.ChildrenOfGroup + } + return "" +} + +func (m *ListGroupsRequest) GetAncestorsOfGroup() string { + if x, ok := m.GetFilter().(*ListGroupsRequest_AncestorsOfGroup); ok { + return x.AncestorsOfGroup + } + return "" +} + +func (m *ListGroupsRequest) GetDescendantsOfGroup() string { + if x, ok := m.GetFilter().(*ListGroupsRequest_DescendantsOfGroup); ok { + return x.DescendantsOfGroup + } + return "" +} + +func (m *ListGroupsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListGroupsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ListGroupsRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ListGroupsRequest_OneofMarshaler, _ListGroupsRequest_OneofUnmarshaler, _ListGroupsRequest_OneofSizer, []interface{}{ + (*ListGroupsRequest_ChildrenOfGroup)(nil), + (*ListGroupsRequest_AncestorsOfGroup)(nil), + (*ListGroupsRequest_DescendantsOfGroup)(nil), + } +} + +func _ListGroupsRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ListGroupsRequest) + // filter + switch x := m.Filter.(type) { + case *ListGroupsRequest_ChildrenOfGroup: + b.EncodeVarint(2<<3 | proto.WireBytes) + b.EncodeStringBytes(x.ChildrenOfGroup) + case *ListGroupsRequest_AncestorsOfGroup: + b.EncodeVarint(3<<3 | proto.WireBytes) + b.EncodeStringBytes(x.AncestorsOfGroup) + case *ListGroupsRequest_DescendantsOfGroup: + b.EncodeVarint(4<<3 | proto.WireBytes) + b.EncodeStringBytes(x.DescendantsOfGroup) + case nil: + default: + return fmt.Errorf("ListGroupsRequest.Filter has unexpected type %T", x) + } + return nil +} + +func _ListGroupsRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ListGroupsRequest) + switch tag { + case 2: // filter.children_of_group + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Filter = &ListGroupsRequest_ChildrenOfGroup{x} + return true, err + case 3: // filter.ancestors_of_group + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Filter = &ListGroupsRequest_AncestorsOfGroup{x} + return true, err + case 4: // filter.descendants_of_group + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Filter = &ListGroupsRequest_DescendantsOfGroup{x} + return true, err + default: + return false, nil + } +} + +func _ListGroupsRequest_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ListGroupsRequest) + // filter + switch x := m.Filter.(type) { + case *ListGroupsRequest_ChildrenOfGroup: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.ChildrenOfGroup))) + n += len(x.ChildrenOfGroup) + case *ListGroupsRequest_AncestorsOfGroup: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.AncestorsOfGroup))) + n += len(x.AncestorsOfGroup) + case *ListGroupsRequest_DescendantsOfGroup: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.DescendantsOfGroup))) + n += len(x.DescendantsOfGroup) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// The `ListGroups` response. +type ListGroupsResponse struct { + // The groups that match the specified filters. + Group []*Group `protobuf:"bytes,1,rep,name=group,proto3" json:"group,omitempty"` + // If there are more results than have been returned, then this field is set + // to a non-empty value. To see the additional results, + // use that value as `pageToken` in the next call to this method. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListGroupsResponse) Reset() { *m = ListGroupsResponse{} } +func (m *ListGroupsResponse) String() string { return proto.CompactTextString(m) } +func (*ListGroupsResponse) ProtoMessage() {} +func (*ListGroupsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_21ad21d0ed55c55a, []int{1} +} + +func (m *ListGroupsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListGroupsResponse.Unmarshal(m, b) +} +func (m *ListGroupsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListGroupsResponse.Marshal(b, m, deterministic) +} +func (m *ListGroupsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListGroupsResponse.Merge(m, src) +} +func (m *ListGroupsResponse) XXX_Size() int { + return xxx_messageInfo_ListGroupsResponse.Size(m) +} +func (m *ListGroupsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListGroupsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListGroupsResponse proto.InternalMessageInfo + +func (m *ListGroupsResponse) GetGroup() []*Group { + if m != nil { + return m.Group + } + return nil +} + +func (m *ListGroupsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// The `GetGroup` request. +type GetGroupRequest struct { + // The group to retrieve. The format is + // `"projects/{project_id_or_number}/groups/{group_id}"`. + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetGroupRequest) Reset() { *m = GetGroupRequest{} } +func (m *GetGroupRequest) String() string { return proto.CompactTextString(m) } +func (*GetGroupRequest) ProtoMessage() {} +func (*GetGroupRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_21ad21d0ed55c55a, []int{2} +} + +func (m *GetGroupRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetGroupRequest.Unmarshal(m, b) +} +func (m *GetGroupRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetGroupRequest.Marshal(b, m, deterministic) +} +func (m *GetGroupRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetGroupRequest.Merge(m, src) +} +func (m *GetGroupRequest) XXX_Size() int { + return xxx_messageInfo_GetGroupRequest.Size(m) +} +func (m *GetGroupRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetGroupRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetGroupRequest proto.InternalMessageInfo + +func (m *GetGroupRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// The `CreateGroup` request. +type CreateGroupRequest struct { + // The project in which to create the group. The format is + // `"projects/{project_id_or_number}"`. + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + // A group definition. It is an error to define the `name` field because + // the system assigns the name. + Group *Group `protobuf:"bytes,2,opt,name=group,proto3" json:"group,omitempty"` + // If true, validate this request but do not create the group. + ValidateOnly bool `protobuf:"varint,3,opt,name=validate_only,json=validateOnly,proto3" json:"validate_only,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateGroupRequest) Reset() { *m = CreateGroupRequest{} } +func (m *CreateGroupRequest) String() string { return proto.CompactTextString(m) } +func (*CreateGroupRequest) ProtoMessage() {} +func (*CreateGroupRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_21ad21d0ed55c55a, []int{3} +} + +func (m *CreateGroupRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateGroupRequest.Unmarshal(m, b) +} +func (m *CreateGroupRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateGroupRequest.Marshal(b, m, deterministic) +} +func (m *CreateGroupRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateGroupRequest.Merge(m, src) +} +func (m *CreateGroupRequest) XXX_Size() int { + return xxx_messageInfo_CreateGroupRequest.Size(m) +} +func (m *CreateGroupRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateGroupRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateGroupRequest proto.InternalMessageInfo + +func (m *CreateGroupRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *CreateGroupRequest) GetGroup() *Group { + if m != nil { + return m.Group + } + return nil +} + +func (m *CreateGroupRequest) GetValidateOnly() bool { + if m != nil { + return m.ValidateOnly + } + return false +} + +// The `UpdateGroup` request. +type UpdateGroupRequest struct { + // The new definition of the group. All fields of the existing group, + // excepting `name`, are replaced with the corresponding fields of this group. + Group *Group `protobuf:"bytes,2,opt,name=group,proto3" json:"group,omitempty"` + // If true, validate this request but do not update the existing group. + ValidateOnly bool `protobuf:"varint,3,opt,name=validate_only,json=validateOnly,proto3" json:"validate_only,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateGroupRequest) Reset() { *m = UpdateGroupRequest{} } +func (m *UpdateGroupRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateGroupRequest) ProtoMessage() {} +func (*UpdateGroupRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_21ad21d0ed55c55a, []int{4} +} + +func (m *UpdateGroupRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateGroupRequest.Unmarshal(m, b) +} +func (m *UpdateGroupRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateGroupRequest.Marshal(b, m, deterministic) +} +func (m *UpdateGroupRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateGroupRequest.Merge(m, src) +} +func (m *UpdateGroupRequest) XXX_Size() int { + return xxx_messageInfo_UpdateGroupRequest.Size(m) +} +func (m *UpdateGroupRequest) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateGroupRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateGroupRequest proto.InternalMessageInfo + +func (m *UpdateGroupRequest) GetGroup() *Group { + if m != nil { + return m.Group + } + return nil +} + +func (m *UpdateGroupRequest) GetValidateOnly() bool { + if m != nil { + return m.ValidateOnly + } + return false +} + +// The `DeleteGroup` request. You can only delete a group if it has no children. +type DeleteGroupRequest struct { + // The group to delete. The format is + // `"projects/{project_id_or_number}/groups/{group_id}"`. + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteGroupRequest) Reset() { *m = DeleteGroupRequest{} } +func (m *DeleteGroupRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteGroupRequest) ProtoMessage() {} +func (*DeleteGroupRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_21ad21d0ed55c55a, []int{5} +} + +func (m *DeleteGroupRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteGroupRequest.Unmarshal(m, b) +} +func (m *DeleteGroupRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteGroupRequest.Marshal(b, m, deterministic) +} +func (m *DeleteGroupRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteGroupRequest.Merge(m, src) +} +func (m *DeleteGroupRequest) XXX_Size() int { + return xxx_messageInfo_DeleteGroupRequest.Size(m) +} +func (m *DeleteGroupRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteGroupRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteGroupRequest proto.InternalMessageInfo + +func (m *DeleteGroupRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// The `ListGroupMembers` request. +type ListGroupMembersRequest struct { + // The group whose members are listed. The format is + // `"projects/{project_id_or_number}/groups/{group_id}"`. + Name string `protobuf:"bytes,7,opt,name=name,proto3" json:"name,omitempty"` + // A positive number that is the maximum number of results to return. + PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // If this field is not empty then it must contain the `nextPageToken` value + // returned by a previous call to this method. Using this field causes the + // method to return additional results from the previous method call. + PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + // An optional [list filter](/monitoring/api/learn_more#filtering) describing + // the members to be returned. The filter may reference the type, labels, and + // metadata of monitored resources that comprise the group. + // For example, to return only resources representing Compute Engine VM + // instances, use this filter: + // + // resource.type = "gce_instance" + Filter string `protobuf:"bytes,5,opt,name=filter,proto3" json:"filter,omitempty"` + // An optional time interval for which results should be returned. Only + // members that were part of the group during the specified interval are + // included in the response. If no interval is provided then the group + // membership over the last minute is returned. + Interval *TimeInterval `protobuf:"bytes,6,opt,name=interval,proto3" json:"interval,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListGroupMembersRequest) Reset() { *m = ListGroupMembersRequest{} } +func (m *ListGroupMembersRequest) String() string { return proto.CompactTextString(m) } +func (*ListGroupMembersRequest) ProtoMessage() {} +func (*ListGroupMembersRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_21ad21d0ed55c55a, []int{6} +} + +func (m *ListGroupMembersRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListGroupMembersRequest.Unmarshal(m, b) +} +func (m *ListGroupMembersRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListGroupMembersRequest.Marshal(b, m, deterministic) +} +func (m *ListGroupMembersRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListGroupMembersRequest.Merge(m, src) +} +func (m *ListGroupMembersRequest) XXX_Size() int { + return xxx_messageInfo_ListGroupMembersRequest.Size(m) +} +func (m *ListGroupMembersRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListGroupMembersRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListGroupMembersRequest proto.InternalMessageInfo + +func (m *ListGroupMembersRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ListGroupMembersRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListGroupMembersRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +func (m *ListGroupMembersRequest) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +func (m *ListGroupMembersRequest) GetInterval() *TimeInterval { + if m != nil { + return m.Interval + } + return nil +} + +// The `ListGroupMembers` response. +type ListGroupMembersResponse struct { + // A set of monitored resources in the group. + Members []*monitoredres.MonitoredResource `protobuf:"bytes,1,rep,name=members,proto3" json:"members,omitempty"` + // If there are more results than have been returned, then this field is + // set to a non-empty value. To see the additional results, use that value as + // `pageToken` in the next call to this method. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + // The total number of elements matching this request. + TotalSize int32 `protobuf:"varint,3,opt,name=total_size,json=totalSize,proto3" json:"total_size,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListGroupMembersResponse) Reset() { *m = ListGroupMembersResponse{} } +func (m *ListGroupMembersResponse) String() string { return proto.CompactTextString(m) } +func (*ListGroupMembersResponse) ProtoMessage() {} +func (*ListGroupMembersResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_21ad21d0ed55c55a, []int{7} +} + +func (m *ListGroupMembersResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListGroupMembersResponse.Unmarshal(m, b) +} +func (m *ListGroupMembersResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListGroupMembersResponse.Marshal(b, m, deterministic) +} +func (m *ListGroupMembersResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListGroupMembersResponse.Merge(m, src) +} +func (m *ListGroupMembersResponse) XXX_Size() int { + return xxx_messageInfo_ListGroupMembersResponse.Size(m) +} +func (m *ListGroupMembersResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListGroupMembersResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListGroupMembersResponse proto.InternalMessageInfo + +func (m *ListGroupMembersResponse) GetMembers() []*monitoredres.MonitoredResource { + if m != nil { + return m.Members + } + return nil +} + +func (m *ListGroupMembersResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +func (m *ListGroupMembersResponse) GetTotalSize() int32 { + if m != nil { + return m.TotalSize + } + return 0 +} + +func init() { + proto.RegisterType((*ListGroupsRequest)(nil), "google.monitoring.v3.ListGroupsRequest") + proto.RegisterType((*ListGroupsResponse)(nil), "google.monitoring.v3.ListGroupsResponse") + proto.RegisterType((*GetGroupRequest)(nil), "google.monitoring.v3.GetGroupRequest") + proto.RegisterType((*CreateGroupRequest)(nil), "google.monitoring.v3.CreateGroupRequest") + proto.RegisterType((*UpdateGroupRequest)(nil), "google.monitoring.v3.UpdateGroupRequest") + proto.RegisterType((*DeleteGroupRequest)(nil), "google.monitoring.v3.DeleteGroupRequest") + proto.RegisterType((*ListGroupMembersRequest)(nil), "google.monitoring.v3.ListGroupMembersRequest") + proto.RegisterType((*ListGroupMembersResponse)(nil), "google.monitoring.v3.ListGroupMembersResponse") +} + +func init() { + proto.RegisterFile("google/monitoring/v3/group_service.proto", fileDescriptor_21ad21d0ed55c55a) +} + +var fileDescriptor_21ad21d0ed55c55a = []byte{ + // 826 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x55, 0x4d, 0x6f, 0xd3, 0x4c, + 0x10, 0x7e, 0xdd, 0xa4, 0x69, 0xb2, 0x69, 0xd5, 0x76, 0x55, 0xf5, 0x8d, 0xdc, 0x0f, 0x05, 0xf7, + 0x83, 0xa8, 0x50, 0x5b, 0x24, 0x07, 0x24, 0x10, 0x3d, 0xb4, 0xa0, 0x82, 0x44, 0xd5, 0xca, 0x2d, + 0x3d, 0xa0, 0x4a, 0x91, 0x9b, 0x4c, 0x8c, 0xc1, 0xde, 0x35, 0xf6, 0x26, 0xd0, 0xa2, 0x4a, 0x80, + 0xc4, 0x81, 0x33, 0x37, 0x6e, 0x1c, 0xe1, 0x2f, 0x70, 0xe2, 0xca, 0x95, 0xbf, 0xc0, 0xff, 0x00, + 0x79, 0xbd, 0x9b, 0x38, 0x9f, 0xed, 0x85, 0x5b, 0xb2, 0xf3, 0x8c, 0x9f, 0x67, 0x66, 0x9f, 0x99, + 0x45, 0x25, 0x9b, 0x52, 0xdb, 0x05, 0xc3, 0xa3, 0xc4, 0x61, 0x34, 0x70, 0x88, 0x6d, 0xb4, 0x2a, + 0x86, 0x1d, 0xd0, 0xa6, 0x5f, 0x0d, 0x21, 0x68, 0x39, 0x35, 0xd0, 0xfd, 0x80, 0x32, 0x8a, 0xe7, + 0x62, 0xa4, 0xde, 0x41, 0xea, 0xad, 0x8a, 0xba, 0x28, 0xf2, 0x2d, 0xdf, 0x31, 0x2c, 0x42, 0x28, + 0xb3, 0x98, 0x43, 0x49, 0x18, 0xe7, 0xa8, 0x2b, 0x89, 0xa8, 0xc8, 0x83, 0x7a, 0x35, 0x80, 0x90, + 0x36, 0x03, 0xf9, 0x61, 0xf5, 0xda, 0x40, 0x09, 0x35, 0xea, 0x79, 0x94, 0x08, 0x48, 0x71, 0xb8, + 0x4a, 0x81, 0x58, 0x10, 0x08, 0xfe, 0xef, 0xb4, 0xd9, 0x30, 0xc0, 0xf3, 0xd9, 0x59, 0x1c, 0xd4, + 0xfe, 0x28, 0x68, 0xf6, 0xb1, 0x13, 0xb2, 0xdd, 0x28, 0x21, 0x34, 0xe1, 0x65, 0x13, 0x42, 0x86, + 0x31, 0x4a, 0x13, 0xcb, 0x83, 0xc2, 0x44, 0x51, 0x29, 0xe5, 0x4c, 0xfe, 0x1b, 0xdf, 0x44, 0xb3, + 0xb5, 0x67, 0x8e, 0x5b, 0x0f, 0x80, 0x54, 0x69, 0xa3, 0xca, 0x19, 0x0a, 0x63, 0x11, 0xe0, 0xe1, + 0x7f, 0xe6, 0xb4, 0x0c, 0xed, 0x37, 0xf8, 0x97, 0xb0, 0x8e, 0xb0, 0x45, 0x6a, 0x10, 0x32, 0x1a, + 0x84, 0x1d, 0x78, 0x4a, 0xc0, 0x67, 0xda, 0x31, 0x89, 0x2f, 0xa3, 0xb9, 0x3a, 0x84, 0x35, 0x20, + 0x75, 0x8b, 0xb0, 0x44, 0x46, 0x5a, 0x64, 0xe0, 0x44, 0x54, 0xe6, 0x2c, 0xa0, 0x9c, 0x6f, 0xd9, + 0x50, 0x0d, 0x9d, 0x73, 0x28, 0x8c, 0x17, 0x95, 0xd2, 0xb8, 0x99, 0x8d, 0x0e, 0x0e, 0x9d, 0x73, + 0xc0, 0x4b, 0x08, 0xf1, 0x20, 0xa3, 0x2f, 0x80, 0x14, 0x32, 0xbc, 0x10, 0x0e, 0x3f, 0x8a, 0x0e, + 0xb6, 0xb3, 0x28, 0xd3, 0x70, 0x5c, 0x06, 0x81, 0x46, 0x11, 0x4e, 0x36, 0x20, 0xf4, 0x29, 0x09, + 0x01, 0xdf, 0x42, 0xe3, 0xb1, 0x00, 0xa5, 0x98, 0x2a, 0xe5, 0xcb, 0x0b, 0xfa, 0xa0, 0x2b, 0xd6, + 0x79, 0x92, 0x19, 0x23, 0xf1, 0x3a, 0x9a, 0x26, 0xf0, 0x9a, 0x55, 0x13, 0xb4, 0xbc, 0x3d, 0xe6, + 0x54, 0x74, 0x7c, 0x20, 0xa9, 0xb5, 0x35, 0x34, 0xbd, 0x0b, 0x31, 0x5f, 0x6f, 0xbf, 0x53, 0x9d, + 0x7e, 0x6b, 0x6f, 0x15, 0x84, 0x77, 0x02, 0xb0, 0x18, 0x0c, 0x84, 0xa6, 0x13, 0x57, 0xd3, 0x16, + 0x1b, 0xf1, 0x5d, 0x4d, 0xec, 0x0a, 0x9a, 0x6a, 0x59, 0xae, 0x53, 0xb7, 0x18, 0x54, 0x29, 0x71, + 0xcf, 0x38, 0x75, 0xd6, 0x9c, 0x94, 0x87, 0xfb, 0xc4, 0x3d, 0xd3, 0x5c, 0x84, 0x9f, 0xf8, 0xf5, + 0x5e, 0x05, 0xff, 0x8a, 0xad, 0x84, 0xf0, 0x7d, 0x70, 0x61, 0x48, 0xbd, 0xc9, 0xd6, 0xfc, 0x50, + 0xd0, 0xff, 0xed, 0x3b, 0xdb, 0x03, 0xef, 0x14, 0x82, 0x91, 0xd6, 0xed, 0x32, 0x4a, 0x6a, 0xa4, + 0x51, 0xd2, 0x3d, 0x46, 0xc1, 0xf3, 0xd2, 0x28, 0xdc, 0x61, 0x39, 0x53, 0xfc, 0xc3, 0x5b, 0x28, + 0xeb, 0x10, 0x06, 0x41, 0xcb, 0x72, 0xb9, 0xbb, 0xf2, 0x65, 0x6d, 0x70, 0x23, 0x8e, 0x1c, 0x0f, + 0x1e, 0x09, 0xa4, 0xd9, 0xce, 0xd1, 0x3e, 0x2b, 0xa8, 0xd0, 0x5f, 0x83, 0x70, 0xdf, 0x6d, 0x34, + 0xe1, 0xc5, 0x47, 0xc2, 0x7f, 0x4b, 0xf2, 0xdb, 0x96, 0xef, 0xe8, 0x7b, 0x72, 0x5d, 0x98, 0x62, + 0x5b, 0x98, 0x12, 0x7d, 0x55, 0x0f, 0x46, 0x45, 0x33, 0xca, 0x2c, 0x37, 0xd9, 0x92, 0x1c, 0x3f, + 0x89, 0x7a, 0x52, 0xfe, 0x9e, 0x41, 0x93, 0x5c, 0xd8, 0x61, 0xbc, 0xe7, 0xf0, 0x07, 0x05, 0xa1, + 0xce, 0x94, 0xe0, 0xeb, 0x83, 0x4b, 0xed, 0x5b, 0x24, 0x6a, 0xe9, 0x72, 0x60, 0x5c, 0xb2, 0xb6, + 0xfa, 0xfe, 0xd7, 0xef, 0x4f, 0x63, 0xcb, 0x78, 0x31, 0x5a, 0x5f, 0x6f, 0xa2, 0x6b, 0xbb, 0xe7, + 0x07, 0xf4, 0x39, 0xd4, 0x58, 0x68, 0x6c, 0x5c, 0xc4, 0x0b, 0x2d, 0xc4, 0x2d, 0x94, 0x95, 0xb3, + 0x83, 0xd7, 0x86, 0x18, 0xaf, 0x7b, 0xb6, 0xd4, 0x51, 0xfe, 0xd4, 0xd6, 0x39, 0x6b, 0x11, 0x2f, + 0x0f, 0x62, 0x15, 0xa4, 0xc6, 0xc6, 0x05, 0x7e, 0xa7, 0xa0, 0x7c, 0x62, 0x18, 0xf1, 0x90, 0xba, + 0xfa, 0xe7, 0x75, 0x34, 0xfd, 0x0d, 0x4e, 0xbf, 0xa6, 0x8d, 0x2c, 0xfa, 0x8e, 0x18, 0xa2, 0x8f, + 0x0a, 0xca, 0x27, 0xc6, 0x71, 0x98, 0x86, 0xfe, 0x89, 0x1d, 0xad, 0xa1, 0xc2, 0x35, 0x6c, 0xaa, + 0xab, 0x5c, 0x43, 0xfc, 0x70, 0x0c, 0x6d, 0x84, 0xd4, 0xf2, 0x0a, 0xe5, 0x13, 0xb3, 0x3a, 0x4c, + 0x4a, 0xff, 0x38, 0xab, 0xf3, 0x12, 0x29, 0x5f, 0x23, 0xfd, 0x41, 0xf4, 0x1a, 0xc9, 0x8b, 0xd8, + 0xb8, 0xec, 0x22, 0xbe, 0x28, 0x68, 0xa6, 0x77, 0x6c, 0xf0, 0xe6, 0x25, 0x2e, 0xeb, 0x5e, 0x11, + 0xaa, 0x7e, 0x55, 0xb8, 0xb0, 0xa6, 0xce, 0xb5, 0x95, 0xf0, 0xfa, 0x68, 0x6d, 0x86, 0x18, 0xc2, + 0xed, 0xaf, 0x0a, 0x2a, 0xd4, 0xa8, 0x37, 0x90, 0x65, 0x7b, 0x36, 0x39, 0x57, 0x07, 0x51, 0x13, + 0x0e, 0x94, 0xa7, 0x5b, 0x02, 0x6a, 0x53, 0xd7, 0x22, 0xb6, 0x4e, 0x03, 0xdb, 0xb0, 0x81, 0xf0, + 0x16, 0x19, 0x71, 0xc8, 0xf2, 0x9d, 0xb0, 0xfb, 0x8d, 0xbf, 0xdb, 0xf9, 0xf7, 0x6d, 0x4c, 0xdd, + 0x8d, 0x3f, 0xb0, 0xe3, 0xd2, 0x66, 0x5d, 0x2e, 0x88, 0x88, 0xf1, 0xb8, 0xf2, 0x53, 0x06, 0x4f, + 0x78, 0xf0, 0xa4, 0x13, 0x3c, 0x39, 0xae, 0x9c, 0x66, 0x38, 0x49, 0xe5, 0x6f, 0x00, 0x00, 0x00, + 0xff, 0xff, 0x86, 0x94, 0xf2, 0xde, 0xed, 0x08, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// GroupServiceClient is the client API for GroupService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type GroupServiceClient interface { + // Lists the existing groups. + ListGroups(ctx context.Context, in *ListGroupsRequest, opts ...grpc.CallOption) (*ListGroupsResponse, error) + // Gets a single group. + GetGroup(ctx context.Context, in *GetGroupRequest, opts ...grpc.CallOption) (*Group, error) + // Creates a new group. + CreateGroup(ctx context.Context, in *CreateGroupRequest, opts ...grpc.CallOption) (*Group, error) + // Updates an existing group. + // You can change any group attributes except `name`. + UpdateGroup(ctx context.Context, in *UpdateGroupRequest, opts ...grpc.CallOption) (*Group, error) + // Deletes an existing group. + DeleteGroup(ctx context.Context, in *DeleteGroupRequest, opts ...grpc.CallOption) (*empty.Empty, error) + // Lists the monitored resources that are members of a group. + ListGroupMembers(ctx context.Context, in *ListGroupMembersRequest, opts ...grpc.CallOption) (*ListGroupMembersResponse, error) +} + +type groupServiceClient struct { + cc *grpc.ClientConn +} + +func NewGroupServiceClient(cc *grpc.ClientConn) GroupServiceClient { + return &groupServiceClient{cc} +} + +func (c *groupServiceClient) ListGroups(ctx context.Context, in *ListGroupsRequest, opts ...grpc.CallOption) (*ListGroupsResponse, error) { + out := new(ListGroupsResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.GroupService/ListGroups", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *groupServiceClient) GetGroup(ctx context.Context, in *GetGroupRequest, opts ...grpc.CallOption) (*Group, error) { + out := new(Group) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.GroupService/GetGroup", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *groupServiceClient) CreateGroup(ctx context.Context, in *CreateGroupRequest, opts ...grpc.CallOption) (*Group, error) { + out := new(Group) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.GroupService/CreateGroup", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *groupServiceClient) UpdateGroup(ctx context.Context, in *UpdateGroupRequest, opts ...grpc.CallOption) (*Group, error) { + out := new(Group) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.GroupService/UpdateGroup", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *groupServiceClient) DeleteGroup(ctx context.Context, in *DeleteGroupRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.GroupService/DeleteGroup", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *groupServiceClient) ListGroupMembers(ctx context.Context, in *ListGroupMembersRequest, opts ...grpc.CallOption) (*ListGroupMembersResponse, error) { + out := new(ListGroupMembersResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.GroupService/ListGroupMembers", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// GroupServiceServer is the server API for GroupService service. +type GroupServiceServer interface { + // Lists the existing groups. + ListGroups(context.Context, *ListGroupsRequest) (*ListGroupsResponse, error) + // Gets a single group. + GetGroup(context.Context, *GetGroupRequest) (*Group, error) + // Creates a new group. + CreateGroup(context.Context, *CreateGroupRequest) (*Group, error) + // Updates an existing group. + // You can change any group attributes except `name`. + UpdateGroup(context.Context, *UpdateGroupRequest) (*Group, error) + // Deletes an existing group. + DeleteGroup(context.Context, *DeleteGroupRequest) (*empty.Empty, error) + // Lists the monitored resources that are members of a group. + ListGroupMembers(context.Context, *ListGroupMembersRequest) (*ListGroupMembersResponse, error) +} + +func RegisterGroupServiceServer(s *grpc.Server, srv GroupServiceServer) { + s.RegisterService(&_GroupService_serviceDesc, srv) +} + +func _GroupService_ListGroups_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListGroupsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GroupServiceServer).ListGroups(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.GroupService/ListGroups", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GroupServiceServer).ListGroups(ctx, req.(*ListGroupsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _GroupService_GetGroup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetGroupRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GroupServiceServer).GetGroup(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.GroupService/GetGroup", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GroupServiceServer).GetGroup(ctx, req.(*GetGroupRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _GroupService_CreateGroup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateGroupRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GroupServiceServer).CreateGroup(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.GroupService/CreateGroup", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GroupServiceServer).CreateGroup(ctx, req.(*CreateGroupRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _GroupService_UpdateGroup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateGroupRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GroupServiceServer).UpdateGroup(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.GroupService/UpdateGroup", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GroupServiceServer).UpdateGroup(ctx, req.(*UpdateGroupRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _GroupService_DeleteGroup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteGroupRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GroupServiceServer).DeleteGroup(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.GroupService/DeleteGroup", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GroupServiceServer).DeleteGroup(ctx, req.(*DeleteGroupRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _GroupService_ListGroupMembers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListGroupMembersRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GroupServiceServer).ListGroupMembers(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.GroupService/ListGroupMembers", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GroupServiceServer).ListGroupMembers(ctx, req.(*ListGroupMembersRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _GroupService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.monitoring.v3.GroupService", + HandlerType: (*GroupServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListGroups", + Handler: _GroupService_ListGroups_Handler, + }, + { + MethodName: "GetGroup", + Handler: _GroupService_GetGroup_Handler, + }, + { + MethodName: "CreateGroup", + Handler: _GroupService_CreateGroup_Handler, + }, + { + MethodName: "UpdateGroup", + Handler: _GroupService_UpdateGroup_Handler, + }, + { + MethodName: "DeleteGroup", + Handler: _GroupService_DeleteGroup_Handler, + }, + { + MethodName: "ListGroupMembers", + Handler: _GroupService_ListGroupMembers_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/monitoring/v3/group_service.proto", +} diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/metric.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/metric.pb.go new file mode 100644 index 000000000..e4929901d --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/metric.pb.go @@ -0,0 +1,234 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/monitoring/v3/metric.proto + +package monitoring + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + _ "google.golang.org/genproto/googleapis/api/annotations" + _ "google.golang.org/genproto/googleapis/api/label" + metric "google.golang.org/genproto/googleapis/api/metric" + monitoredres "google.golang.org/genproto/googleapis/api/monitoredres" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// A single data point in a time series. +type Point struct { + // The time interval to which the data point applies. For `GAUGE` metrics, + // only the end time of the interval is used. For `DELTA` metrics, the start + // and end time should specify a non-zero interval, with subsequent points + // specifying contiguous and non-overlapping intervals. For `CUMULATIVE` + // metrics, the start and end time should specify a non-zero interval, with + // subsequent points specifying the same start time and increasing end times, + // until an event resets the cumulative value to zero and sets a new start + // time for the following points. + Interval *TimeInterval `protobuf:"bytes,1,opt,name=interval,proto3" json:"interval,omitempty"` + // The value of the data point. + Value *TypedValue `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Point) Reset() { *m = Point{} } +func (m *Point) String() string { return proto.CompactTextString(m) } +func (*Point) ProtoMessage() {} +func (*Point) Descriptor() ([]byte, []int) { + return fileDescriptor_c76199a3d2c4c21e, []int{0} +} + +func (m *Point) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Point.Unmarshal(m, b) +} +func (m *Point) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Point.Marshal(b, m, deterministic) +} +func (m *Point) XXX_Merge(src proto.Message) { + xxx_messageInfo_Point.Merge(m, src) +} +func (m *Point) XXX_Size() int { + return xxx_messageInfo_Point.Size(m) +} +func (m *Point) XXX_DiscardUnknown() { + xxx_messageInfo_Point.DiscardUnknown(m) +} + +var xxx_messageInfo_Point proto.InternalMessageInfo + +func (m *Point) GetInterval() *TimeInterval { + if m != nil { + return m.Interval + } + return nil +} + +func (m *Point) GetValue() *TypedValue { + if m != nil { + return m.Value + } + return nil +} + +// A collection of data points that describes the time-varying values +// of a metric. A time series is identified by a combination of a +// fully-specified monitored resource and a fully-specified metric. +// This type is used for both listing and creating time series. +type TimeSeries struct { + // The associated metric. A fully-specified metric used to identify the time + // series. + Metric *metric.Metric `protobuf:"bytes,1,opt,name=metric,proto3" json:"metric,omitempty"` + // The associated monitored resource. Custom metrics can use only certain + // monitored resource types in their time series data. + Resource *monitoredres.MonitoredResource `protobuf:"bytes,2,opt,name=resource,proto3" json:"resource,omitempty"` + // Output only. The associated monitored resource metadata. When reading a + // a timeseries, this field will include metadata labels that are explicitly + // named in the reduction. When creating a timeseries, this field is ignored. + Metadata *monitoredres.MonitoredResourceMetadata `protobuf:"bytes,7,opt,name=metadata,proto3" json:"metadata,omitempty"` + // The metric kind of the time series. When listing time series, this metric + // kind might be different from the metric kind of the associated metric if + // this time series is an alignment or reduction of other time series. + // + // When creating a time series, this field is optional. If present, it must be + // the same as the metric kind of the associated metric. If the associated + // metric's descriptor must be auto-created, then this field specifies the + // metric kind of the new descriptor and must be either `GAUGE` (the default) + // or `CUMULATIVE`. + MetricKind metric.MetricDescriptor_MetricKind `protobuf:"varint,3,opt,name=metric_kind,json=metricKind,proto3,enum=google.api.MetricDescriptor_MetricKind" json:"metric_kind,omitempty"` + // The value type of the time series. When listing time series, this value + // type might be different from the value type of the associated metric if + // this time series is an alignment or reduction of other time series. + // + // When creating a time series, this field is optional. If present, it must be + // the same as the type of the data in the `points` field. + ValueType metric.MetricDescriptor_ValueType `protobuf:"varint,4,opt,name=value_type,json=valueType,proto3,enum=google.api.MetricDescriptor_ValueType" json:"value_type,omitempty"` + // The data points of this time series. When listing time series, points are + // returned in reverse time order. + // + // When creating a time series, this field must contain exactly one point and + // the point's type must be the same as the value type of the associated + // metric. If the associated metric's descriptor must be auto-created, then + // the value type of the descriptor is determined by the point's type, which + // must be `BOOL`, `INT64`, `DOUBLE`, or `DISTRIBUTION`. + Points []*Point `protobuf:"bytes,5,rep,name=points,proto3" json:"points,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TimeSeries) Reset() { *m = TimeSeries{} } +func (m *TimeSeries) String() string { return proto.CompactTextString(m) } +func (*TimeSeries) ProtoMessage() {} +func (*TimeSeries) Descriptor() ([]byte, []int) { + return fileDescriptor_c76199a3d2c4c21e, []int{1} +} + +func (m *TimeSeries) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TimeSeries.Unmarshal(m, b) +} +func (m *TimeSeries) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TimeSeries.Marshal(b, m, deterministic) +} +func (m *TimeSeries) XXX_Merge(src proto.Message) { + xxx_messageInfo_TimeSeries.Merge(m, src) +} +func (m *TimeSeries) XXX_Size() int { + return xxx_messageInfo_TimeSeries.Size(m) +} +func (m *TimeSeries) XXX_DiscardUnknown() { + xxx_messageInfo_TimeSeries.DiscardUnknown(m) +} + +var xxx_messageInfo_TimeSeries proto.InternalMessageInfo + +func (m *TimeSeries) GetMetric() *metric.Metric { + if m != nil { + return m.Metric + } + return nil +} + +func (m *TimeSeries) GetResource() *monitoredres.MonitoredResource { + if m != nil { + return m.Resource + } + return nil +} + +func (m *TimeSeries) GetMetadata() *monitoredres.MonitoredResourceMetadata { + if m != nil { + return m.Metadata + } + return nil +} + +func (m *TimeSeries) GetMetricKind() metric.MetricDescriptor_MetricKind { + if m != nil { + return m.MetricKind + } + return metric.MetricDescriptor_METRIC_KIND_UNSPECIFIED +} + +func (m *TimeSeries) GetValueType() metric.MetricDescriptor_ValueType { + if m != nil { + return m.ValueType + } + return metric.MetricDescriptor_VALUE_TYPE_UNSPECIFIED +} + +func (m *TimeSeries) GetPoints() []*Point { + if m != nil { + return m.Points + } + return nil +} + +func init() { + proto.RegisterType((*Point)(nil), "google.monitoring.v3.Point") + proto.RegisterType((*TimeSeries)(nil), "google.monitoring.v3.TimeSeries") +} + +func init() { proto.RegisterFile("google/monitoring/v3/metric.proto", fileDescriptor_c76199a3d2c4c21e) } + +var fileDescriptor_c76199a3d2c4c21e = []byte{ + // 441 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0x51, 0xab, 0xd3, 0x30, + 0x14, 0xc7, 0xe9, 0xae, 0x9b, 0x33, 0x03, 0x1f, 0x82, 0x68, 0x99, 0x0a, 0x73, 0xa2, 0x0e, 0x1f, + 0x5a, 0x58, 0x41, 0x10, 0xe1, 0x82, 0x57, 0x45, 0x45, 0x2e, 0x8c, 0x28, 0x7b, 0x90, 0xc1, 0xc8, + 0x6d, 0x0f, 0x25, 0xd8, 0xe4, 0x84, 0x34, 0x2b, 0xdc, 0x27, 0x3f, 0x8c, 0x6f, 0x7e, 0x14, 0x3f, + 0x93, 0x0f, 0xd2, 0x24, 0xdd, 0x76, 0xb1, 0xf7, 0xbe, 0xb5, 0xf9, 0xff, 0xfe, 0xe7, 0x7f, 0x72, + 0x72, 0xc8, 0x93, 0x12, 0xb1, 0xac, 0x20, 0x95, 0xa8, 0x84, 0x45, 0x23, 0x54, 0x99, 0x36, 0x59, + 0x2a, 0xc1, 0x1a, 0x91, 0x27, 0xda, 0xa0, 0x45, 0x7a, 0xcf, 0x23, 0xc9, 0x01, 0x49, 0x9a, 0x6c, + 0xfa, 0x28, 0x18, 0xb9, 0x16, 0x29, 0x57, 0x0a, 0x2d, 0xb7, 0x02, 0x55, 0xed, 0x3d, 0xd3, 0xfb, + 0x47, 0x6a, 0xc5, 0x2f, 0xa0, 0x0a, 0xe7, 0x0f, 0x8e, 0xce, 0x8f, 0x43, 0xa6, 0x4f, 0x8f, 0x05, + 0x1f, 0x04, 0xc5, 0xd6, 0x40, 0x8d, 0x3b, 0x93, 0x43, 0x80, 0xfa, 0x9b, 0xcd, 0x51, 0x4a, 0x54, + 0x1e, 0x99, 0xff, 0x24, 0xc3, 0x15, 0x0a, 0x65, 0xe9, 0x29, 0x19, 0x0b, 0x65, 0xc1, 0x34, 0xbc, + 0x8a, 0xa3, 0x59, 0xb4, 0x98, 0x2c, 0xe7, 0x49, 0xdf, 0x45, 0x92, 0x6f, 0x42, 0xc2, 0xe7, 0x40, + 0xb2, 0xbd, 0x87, 0xbe, 0x22, 0xc3, 0x86, 0x57, 0x3b, 0x88, 0x07, 0xce, 0x3c, 0xbb, 0xc6, 0x7c, + 0xa9, 0xa1, 0x58, 0xb7, 0x1c, 0xf3, 0xf8, 0xfc, 0xef, 0x80, 0x90, 0xb6, 0xe4, 0x57, 0x30, 0x02, + 0x6a, 0xfa, 0x92, 0x8c, 0xfc, 0x3d, 0x43, 0x13, 0xb4, 0xab, 0xc3, 0xb5, 0x48, 0xce, 0x9d, 0xc2, + 0x02, 0x41, 0x5f, 0x93, 0x71, 0x77, 0xe1, 0x90, 0xfa, 0xf8, 0x0a, 0xdd, 0x8d, 0x85, 0x05, 0x88, + 0xed, 0x71, 0xfa, 0x96, 0x8c, 0x25, 0x58, 0x5e, 0x70, 0xcb, 0xe3, 0xdb, 0xce, 0xfa, 0xec, 0x46, + 0xeb, 0x79, 0x80, 0xd9, 0xde, 0x46, 0x3f, 0x91, 0x89, 0xef, 0x63, 0xfb, 0x43, 0xa8, 0x22, 0x3e, + 0x99, 0x45, 0x8b, 0xbb, 0xcb, 0x17, 0xff, 0xb7, 0xfb, 0x1e, 0xea, 0xdc, 0x08, 0x6d, 0xd1, 0x84, + 0x83, 0x2f, 0x42, 0x15, 0x8c, 0xc8, 0xfd, 0x37, 0xfd, 0x40, 0x88, 0x9b, 0xc5, 0xd6, 0x5e, 0x6a, + 0x88, 0x6f, 0xb9, 0x42, 0xcf, 0x6f, 0x2c, 0xe4, 0x26, 0xd8, 0xce, 0x92, 0xdd, 0x69, 0xba, 0x4f, + 0x9a, 0x91, 0x91, 0x6e, 0x9f, 0xb2, 0x8e, 0x87, 0xb3, 0x93, 0xc5, 0x64, 0xf9, 0xb0, 0xff, 0x09, + 0xdc, 0x73, 0xb3, 0x80, 0x9e, 0xfd, 0x8a, 0x48, 0x9c, 0xa3, 0xec, 0x45, 0xcf, 0x26, 0x3e, 0x78, + 0xd5, 0x6e, 0xca, 0x2a, 0xfa, 0x7e, 0x1a, 0xa0, 0x12, 0x2b, 0xae, 0xca, 0x04, 0x4d, 0x99, 0x96, + 0xa0, 0xdc, 0x1e, 0xa5, 0x5e, 0xe2, 0x5a, 0xd4, 0x57, 0xb7, 0xed, 0xcd, 0xe1, 0xef, 0xf7, 0x60, + 0xfa, 0xd1, 0x17, 0x78, 0x57, 0xe1, 0xae, 0xe8, 0x86, 0xdc, 0x66, 0xad, 0xb3, 0x3f, 0x9d, 0xb8, + 0x71, 0xe2, 0xe6, 0x20, 0x6e, 0xd6, 0xd9, 0xc5, 0xc8, 0x85, 0x64, 0xff, 0x02, 0x00, 0x00, 0xff, + 0xff, 0x5a, 0x88, 0xc9, 0x0b, 0x7e, 0x03, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/metric_service.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/metric_service.pb.go new file mode 100644 index 000000000..080dced64 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/metric_service.pb.go @@ -0,0 +1,1222 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/monitoring/v3/metric_service.proto + +package monitoring + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + _ "github.com/golang/protobuf/ptypes/duration" + empty "github.com/golang/protobuf/ptypes/empty" + context "golang.org/x/net/context" + _ "google.golang.org/genproto/googleapis/api/annotations" + metric "google.golang.org/genproto/googleapis/api/metric" + monitoredres "google.golang.org/genproto/googleapis/api/monitoredres" + status "google.golang.org/genproto/googleapis/rpc/status" + grpc "google.golang.org/grpc" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Controls which fields are returned by `ListTimeSeries`. +type ListTimeSeriesRequest_TimeSeriesView int32 + +const ( + // Returns the identity of the metric(s), the time series, + // and the time series data. + ListTimeSeriesRequest_FULL ListTimeSeriesRequest_TimeSeriesView = 0 + // Returns the identity of the metric and the time series resource, + // but not the time series data. + ListTimeSeriesRequest_HEADERS ListTimeSeriesRequest_TimeSeriesView = 1 +) + +var ListTimeSeriesRequest_TimeSeriesView_name = map[int32]string{ + 0: "FULL", + 1: "HEADERS", +} + +var ListTimeSeriesRequest_TimeSeriesView_value = map[string]int32{ + "FULL": 0, + "HEADERS": 1, +} + +func (x ListTimeSeriesRequest_TimeSeriesView) String() string { + return proto.EnumName(ListTimeSeriesRequest_TimeSeriesView_name, int32(x)) +} + +func (ListTimeSeriesRequest_TimeSeriesView) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_7b3d47b45a293957, []int{8, 0} +} + +// The `ListMonitoredResourceDescriptors` request. +type ListMonitoredResourceDescriptorsRequest struct { + // The project on which to execute the request. The format is + // `"projects/{project_id_or_number}"`. + Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"` + // An optional [filter](/monitoring/api/v3/filters) describing + // the descriptors to be returned. The filter can reference + // the descriptor's type and labels. For example, the + // following filter returns only Google Compute Engine descriptors + // that have an `id` label: + // + // resource.type = starts_with("gce_") AND resource.label:id + Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"` + // A positive number that is the maximum number of results to return. + PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // If this field is not empty then it must contain the `nextPageToken` value + // returned by a previous call to this method. Using this field causes the + // method to return additional results from the previous method call. + PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListMonitoredResourceDescriptorsRequest) Reset() { + *m = ListMonitoredResourceDescriptorsRequest{} +} +func (m *ListMonitoredResourceDescriptorsRequest) String() string { return proto.CompactTextString(m) } +func (*ListMonitoredResourceDescriptorsRequest) ProtoMessage() {} +func (*ListMonitoredResourceDescriptorsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7b3d47b45a293957, []int{0} +} + +func (m *ListMonitoredResourceDescriptorsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListMonitoredResourceDescriptorsRequest.Unmarshal(m, b) +} +func (m *ListMonitoredResourceDescriptorsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListMonitoredResourceDescriptorsRequest.Marshal(b, m, deterministic) +} +func (m *ListMonitoredResourceDescriptorsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListMonitoredResourceDescriptorsRequest.Merge(m, src) +} +func (m *ListMonitoredResourceDescriptorsRequest) XXX_Size() int { + return xxx_messageInfo_ListMonitoredResourceDescriptorsRequest.Size(m) +} +func (m *ListMonitoredResourceDescriptorsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListMonitoredResourceDescriptorsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListMonitoredResourceDescriptorsRequest proto.InternalMessageInfo + +func (m *ListMonitoredResourceDescriptorsRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ListMonitoredResourceDescriptorsRequest) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +func (m *ListMonitoredResourceDescriptorsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListMonitoredResourceDescriptorsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// The `ListMonitoredResourceDescriptors` response. +type ListMonitoredResourceDescriptorsResponse struct { + // The monitored resource descriptors that are available to this project + // and that match `filter`, if present. + ResourceDescriptors []*monitoredres.MonitoredResourceDescriptor `protobuf:"bytes,1,rep,name=resource_descriptors,json=resourceDescriptors,proto3" json:"resource_descriptors,omitempty"` + // If there are more results than have been returned, then this field is set + // to a non-empty value. To see the additional results, + // use that value as `pageToken` in the next call to this method. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListMonitoredResourceDescriptorsResponse) Reset() { + *m = ListMonitoredResourceDescriptorsResponse{} +} +func (m *ListMonitoredResourceDescriptorsResponse) String() string { return proto.CompactTextString(m) } +func (*ListMonitoredResourceDescriptorsResponse) ProtoMessage() {} +func (*ListMonitoredResourceDescriptorsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_7b3d47b45a293957, []int{1} +} + +func (m *ListMonitoredResourceDescriptorsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListMonitoredResourceDescriptorsResponse.Unmarshal(m, b) +} +func (m *ListMonitoredResourceDescriptorsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListMonitoredResourceDescriptorsResponse.Marshal(b, m, deterministic) +} +func (m *ListMonitoredResourceDescriptorsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListMonitoredResourceDescriptorsResponse.Merge(m, src) +} +func (m *ListMonitoredResourceDescriptorsResponse) XXX_Size() int { + return xxx_messageInfo_ListMonitoredResourceDescriptorsResponse.Size(m) +} +func (m *ListMonitoredResourceDescriptorsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListMonitoredResourceDescriptorsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListMonitoredResourceDescriptorsResponse proto.InternalMessageInfo + +func (m *ListMonitoredResourceDescriptorsResponse) GetResourceDescriptors() []*monitoredres.MonitoredResourceDescriptor { + if m != nil { + return m.ResourceDescriptors + } + return nil +} + +func (m *ListMonitoredResourceDescriptorsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// The `GetMonitoredResourceDescriptor` request. +type GetMonitoredResourceDescriptorRequest struct { + // The monitored resource descriptor to get. The format is + // `"projects/{project_id_or_number}/monitoredResourceDescriptors/{resource_type}"`. + // The `{resource_type}` is a predefined type, such as + // `cloudsql_database`. + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetMonitoredResourceDescriptorRequest) Reset() { *m = GetMonitoredResourceDescriptorRequest{} } +func (m *GetMonitoredResourceDescriptorRequest) String() string { return proto.CompactTextString(m) } +func (*GetMonitoredResourceDescriptorRequest) ProtoMessage() {} +func (*GetMonitoredResourceDescriptorRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7b3d47b45a293957, []int{2} +} + +func (m *GetMonitoredResourceDescriptorRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetMonitoredResourceDescriptorRequest.Unmarshal(m, b) +} +func (m *GetMonitoredResourceDescriptorRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetMonitoredResourceDescriptorRequest.Marshal(b, m, deterministic) +} +func (m *GetMonitoredResourceDescriptorRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetMonitoredResourceDescriptorRequest.Merge(m, src) +} +func (m *GetMonitoredResourceDescriptorRequest) XXX_Size() int { + return xxx_messageInfo_GetMonitoredResourceDescriptorRequest.Size(m) +} +func (m *GetMonitoredResourceDescriptorRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetMonitoredResourceDescriptorRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetMonitoredResourceDescriptorRequest proto.InternalMessageInfo + +func (m *GetMonitoredResourceDescriptorRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// The `ListMetricDescriptors` request. +type ListMetricDescriptorsRequest struct { + // The project on which to execute the request. The format is + // `"projects/{project_id_or_number}"`. + Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"` + // If this field is empty, all custom and + // system-defined metric descriptors are returned. + // Otherwise, the [filter](/monitoring/api/v3/filters) + // specifies which metric descriptors are to be + // returned. For example, the following filter matches all + // [custom metrics](/monitoring/custom-metrics): + // + // metric.type = starts_with("custom.googleapis.com/") + Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"` + // A positive number that is the maximum number of results to return. + PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // If this field is not empty then it must contain the `nextPageToken` value + // returned by a previous call to this method. Using this field causes the + // method to return additional results from the previous method call. + PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListMetricDescriptorsRequest) Reset() { *m = ListMetricDescriptorsRequest{} } +func (m *ListMetricDescriptorsRequest) String() string { return proto.CompactTextString(m) } +func (*ListMetricDescriptorsRequest) ProtoMessage() {} +func (*ListMetricDescriptorsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7b3d47b45a293957, []int{3} +} + +func (m *ListMetricDescriptorsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListMetricDescriptorsRequest.Unmarshal(m, b) +} +func (m *ListMetricDescriptorsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListMetricDescriptorsRequest.Marshal(b, m, deterministic) +} +func (m *ListMetricDescriptorsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListMetricDescriptorsRequest.Merge(m, src) +} +func (m *ListMetricDescriptorsRequest) XXX_Size() int { + return xxx_messageInfo_ListMetricDescriptorsRequest.Size(m) +} +func (m *ListMetricDescriptorsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListMetricDescriptorsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListMetricDescriptorsRequest proto.InternalMessageInfo + +func (m *ListMetricDescriptorsRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ListMetricDescriptorsRequest) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +func (m *ListMetricDescriptorsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListMetricDescriptorsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// The `ListMetricDescriptors` response. +type ListMetricDescriptorsResponse struct { + // The metric descriptors that are available to the project + // and that match the value of `filter`, if present. + MetricDescriptors []*metric.MetricDescriptor `protobuf:"bytes,1,rep,name=metric_descriptors,json=metricDescriptors,proto3" json:"metric_descriptors,omitempty"` + // If there are more results than have been returned, then this field is set + // to a non-empty value. To see the additional results, + // use that value as `pageToken` in the next call to this method. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListMetricDescriptorsResponse) Reset() { *m = ListMetricDescriptorsResponse{} } +func (m *ListMetricDescriptorsResponse) String() string { return proto.CompactTextString(m) } +func (*ListMetricDescriptorsResponse) ProtoMessage() {} +func (*ListMetricDescriptorsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_7b3d47b45a293957, []int{4} +} + +func (m *ListMetricDescriptorsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListMetricDescriptorsResponse.Unmarshal(m, b) +} +func (m *ListMetricDescriptorsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListMetricDescriptorsResponse.Marshal(b, m, deterministic) +} +func (m *ListMetricDescriptorsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListMetricDescriptorsResponse.Merge(m, src) +} +func (m *ListMetricDescriptorsResponse) XXX_Size() int { + return xxx_messageInfo_ListMetricDescriptorsResponse.Size(m) +} +func (m *ListMetricDescriptorsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListMetricDescriptorsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListMetricDescriptorsResponse proto.InternalMessageInfo + +func (m *ListMetricDescriptorsResponse) GetMetricDescriptors() []*metric.MetricDescriptor { + if m != nil { + return m.MetricDescriptors + } + return nil +} + +func (m *ListMetricDescriptorsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// The `GetMetricDescriptor` request. +type GetMetricDescriptorRequest struct { + // The metric descriptor on which to execute the request. The format is + // `"projects/{project_id_or_number}/metricDescriptors/{metric_id}"`. + // An example value of `{metric_id}` is + // `"compute.googleapis.com/instance/disk/read_bytes_count"`. + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetMetricDescriptorRequest) Reset() { *m = GetMetricDescriptorRequest{} } +func (m *GetMetricDescriptorRequest) String() string { return proto.CompactTextString(m) } +func (*GetMetricDescriptorRequest) ProtoMessage() {} +func (*GetMetricDescriptorRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7b3d47b45a293957, []int{5} +} + +func (m *GetMetricDescriptorRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetMetricDescriptorRequest.Unmarshal(m, b) +} +func (m *GetMetricDescriptorRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetMetricDescriptorRequest.Marshal(b, m, deterministic) +} +func (m *GetMetricDescriptorRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetMetricDescriptorRequest.Merge(m, src) +} +func (m *GetMetricDescriptorRequest) XXX_Size() int { + return xxx_messageInfo_GetMetricDescriptorRequest.Size(m) +} +func (m *GetMetricDescriptorRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetMetricDescriptorRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetMetricDescriptorRequest proto.InternalMessageInfo + +func (m *GetMetricDescriptorRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// The `CreateMetricDescriptor` request. +type CreateMetricDescriptorRequest struct { + // The project on which to execute the request. The format is + // `"projects/{project_id_or_number}"`. + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + // The new [custom metric](/monitoring/custom-metrics) + // descriptor. + MetricDescriptor *metric.MetricDescriptor `protobuf:"bytes,2,opt,name=metric_descriptor,json=metricDescriptor,proto3" json:"metric_descriptor,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateMetricDescriptorRequest) Reset() { *m = CreateMetricDescriptorRequest{} } +func (m *CreateMetricDescriptorRequest) String() string { return proto.CompactTextString(m) } +func (*CreateMetricDescriptorRequest) ProtoMessage() {} +func (*CreateMetricDescriptorRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7b3d47b45a293957, []int{6} +} + +func (m *CreateMetricDescriptorRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateMetricDescriptorRequest.Unmarshal(m, b) +} +func (m *CreateMetricDescriptorRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateMetricDescriptorRequest.Marshal(b, m, deterministic) +} +func (m *CreateMetricDescriptorRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateMetricDescriptorRequest.Merge(m, src) +} +func (m *CreateMetricDescriptorRequest) XXX_Size() int { + return xxx_messageInfo_CreateMetricDescriptorRequest.Size(m) +} +func (m *CreateMetricDescriptorRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateMetricDescriptorRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateMetricDescriptorRequest proto.InternalMessageInfo + +func (m *CreateMetricDescriptorRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *CreateMetricDescriptorRequest) GetMetricDescriptor() *metric.MetricDescriptor { + if m != nil { + return m.MetricDescriptor + } + return nil +} + +// The `DeleteMetricDescriptor` request. +type DeleteMetricDescriptorRequest struct { + // The metric descriptor on which to execute the request. The format is + // `"projects/{project_id_or_number}/metricDescriptors/{metric_id}"`. + // An example of `{metric_id}` is: + // `"custom.googleapis.com/my_test_metric"`. + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteMetricDescriptorRequest) Reset() { *m = DeleteMetricDescriptorRequest{} } +func (m *DeleteMetricDescriptorRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteMetricDescriptorRequest) ProtoMessage() {} +func (*DeleteMetricDescriptorRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7b3d47b45a293957, []int{7} +} + +func (m *DeleteMetricDescriptorRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteMetricDescriptorRequest.Unmarshal(m, b) +} +func (m *DeleteMetricDescriptorRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteMetricDescriptorRequest.Marshal(b, m, deterministic) +} +func (m *DeleteMetricDescriptorRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteMetricDescriptorRequest.Merge(m, src) +} +func (m *DeleteMetricDescriptorRequest) XXX_Size() int { + return xxx_messageInfo_DeleteMetricDescriptorRequest.Size(m) +} +func (m *DeleteMetricDescriptorRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteMetricDescriptorRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteMetricDescriptorRequest proto.InternalMessageInfo + +func (m *DeleteMetricDescriptorRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// The `ListTimeSeries` request. +type ListTimeSeriesRequest struct { + // The project on which to execute the request. The format is + // "projects/{project_id_or_number}". + Name string `protobuf:"bytes,10,opt,name=name,proto3" json:"name,omitempty"` + // A [monitoring filter](/monitoring/api/v3/filters) that specifies which time + // series should be returned. The filter must specify a single metric type, + // and can additionally specify metric labels and other information. For + // example: + // + // metric.type = "compute.googleapis.com/instance/cpu/usage_time" AND + // metric.label.instance_name = "my-instance-name" + // + Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"` + // The time interval for which results should be returned. Only time series + // that contain data points in the specified interval are included + // in the response. + Interval *TimeInterval `protobuf:"bytes,4,opt,name=interval,proto3" json:"interval,omitempty"` + // By default, the raw time series data is returned. + // Use this field to combine multiple time series for different + // views of the data. + Aggregation *Aggregation `protobuf:"bytes,5,opt,name=aggregation,proto3" json:"aggregation,omitempty"` + // Unsupported: must be left blank. The points in each time series are + // returned in reverse time order. + OrderBy string `protobuf:"bytes,6,opt,name=order_by,json=orderBy,proto3" json:"order_by,omitempty"` + // Specifies which information is returned about the time series. + View ListTimeSeriesRequest_TimeSeriesView `protobuf:"varint,7,opt,name=view,proto3,enum=google.monitoring.v3.ListTimeSeriesRequest_TimeSeriesView" json:"view,omitempty"` + // A positive number that is the maximum number of results to return. If + // `page_size` is empty or more than 100,000 results, the effective + // `page_size` is 100,000 results. If `view` is set to `FULL`, this is the + // maximum number of `Points` returned. If `view` is set to `HEADERS`, this is + // the maximum number of `TimeSeries` returned. + PageSize int32 `protobuf:"varint,8,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // If this field is not empty then it must contain the `nextPageToken` value + // returned by a previous call to this method. Using this field causes the + // method to return additional results from the previous method call. + PageToken string `protobuf:"bytes,9,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListTimeSeriesRequest) Reset() { *m = ListTimeSeriesRequest{} } +func (m *ListTimeSeriesRequest) String() string { return proto.CompactTextString(m) } +func (*ListTimeSeriesRequest) ProtoMessage() {} +func (*ListTimeSeriesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7b3d47b45a293957, []int{8} +} + +func (m *ListTimeSeriesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListTimeSeriesRequest.Unmarshal(m, b) +} +func (m *ListTimeSeriesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListTimeSeriesRequest.Marshal(b, m, deterministic) +} +func (m *ListTimeSeriesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListTimeSeriesRequest.Merge(m, src) +} +func (m *ListTimeSeriesRequest) XXX_Size() int { + return xxx_messageInfo_ListTimeSeriesRequest.Size(m) +} +func (m *ListTimeSeriesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListTimeSeriesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListTimeSeriesRequest proto.InternalMessageInfo + +func (m *ListTimeSeriesRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ListTimeSeriesRequest) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +func (m *ListTimeSeriesRequest) GetInterval() *TimeInterval { + if m != nil { + return m.Interval + } + return nil +} + +func (m *ListTimeSeriesRequest) GetAggregation() *Aggregation { + if m != nil { + return m.Aggregation + } + return nil +} + +func (m *ListTimeSeriesRequest) GetOrderBy() string { + if m != nil { + return m.OrderBy + } + return "" +} + +func (m *ListTimeSeriesRequest) GetView() ListTimeSeriesRequest_TimeSeriesView { + if m != nil { + return m.View + } + return ListTimeSeriesRequest_FULL +} + +func (m *ListTimeSeriesRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListTimeSeriesRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// The `ListTimeSeries` response. +type ListTimeSeriesResponse struct { + // One or more time series that match the filter included in the request. + TimeSeries []*TimeSeries `protobuf:"bytes,1,rep,name=time_series,json=timeSeries,proto3" json:"time_series,omitempty"` + // If there are more results than have been returned, then this field is set + // to a non-empty value. To see the additional results, + // use that value as `pageToken` in the next call to this method. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + // Query execution errors that may have caused the time series data returned + // to be incomplete. + ExecutionErrors []*status.Status `protobuf:"bytes,3,rep,name=execution_errors,json=executionErrors,proto3" json:"execution_errors,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListTimeSeriesResponse) Reset() { *m = ListTimeSeriesResponse{} } +func (m *ListTimeSeriesResponse) String() string { return proto.CompactTextString(m) } +func (*ListTimeSeriesResponse) ProtoMessage() {} +func (*ListTimeSeriesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_7b3d47b45a293957, []int{9} +} + +func (m *ListTimeSeriesResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListTimeSeriesResponse.Unmarshal(m, b) +} +func (m *ListTimeSeriesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListTimeSeriesResponse.Marshal(b, m, deterministic) +} +func (m *ListTimeSeriesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListTimeSeriesResponse.Merge(m, src) +} +func (m *ListTimeSeriesResponse) XXX_Size() int { + return xxx_messageInfo_ListTimeSeriesResponse.Size(m) +} +func (m *ListTimeSeriesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListTimeSeriesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListTimeSeriesResponse proto.InternalMessageInfo + +func (m *ListTimeSeriesResponse) GetTimeSeries() []*TimeSeries { + if m != nil { + return m.TimeSeries + } + return nil +} + +func (m *ListTimeSeriesResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +func (m *ListTimeSeriesResponse) GetExecutionErrors() []*status.Status { + if m != nil { + return m.ExecutionErrors + } + return nil +} + +// The `CreateTimeSeries` request. +type CreateTimeSeriesRequest struct { + // The project on which to execute the request. The format is + // `"projects/{project_id_or_number}"`. + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + // The new data to be added to a list of time series. + // Adds at most one data point to each of several time series. The new data + // point must be more recent than any other point in its time series. Each + // `TimeSeries` value must fully specify a unique time series by supplying + // all label values for the metric and the monitored resource. + TimeSeries []*TimeSeries `protobuf:"bytes,2,rep,name=time_series,json=timeSeries,proto3" json:"time_series,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateTimeSeriesRequest) Reset() { *m = CreateTimeSeriesRequest{} } +func (m *CreateTimeSeriesRequest) String() string { return proto.CompactTextString(m) } +func (*CreateTimeSeriesRequest) ProtoMessage() {} +func (*CreateTimeSeriesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7b3d47b45a293957, []int{10} +} + +func (m *CreateTimeSeriesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateTimeSeriesRequest.Unmarshal(m, b) +} +func (m *CreateTimeSeriesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateTimeSeriesRequest.Marshal(b, m, deterministic) +} +func (m *CreateTimeSeriesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateTimeSeriesRequest.Merge(m, src) +} +func (m *CreateTimeSeriesRequest) XXX_Size() int { + return xxx_messageInfo_CreateTimeSeriesRequest.Size(m) +} +func (m *CreateTimeSeriesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateTimeSeriesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateTimeSeriesRequest proto.InternalMessageInfo + +func (m *CreateTimeSeriesRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *CreateTimeSeriesRequest) GetTimeSeries() []*TimeSeries { + if m != nil { + return m.TimeSeries + } + return nil +} + +// Describes the result of a failed request to write data to a time series. +type CreateTimeSeriesError struct { + // The time series, including the `Metric`, `MonitoredResource`, + // and `Point`s (including timestamp and value) that resulted + // in the error. This field provides all of the context that + // would be needed to retry the operation. + TimeSeries *TimeSeries `protobuf:"bytes,1,opt,name=time_series,json=timeSeries,proto3" json:"time_series,omitempty"` + // The status of the requested write operation. + Status *status.Status `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateTimeSeriesError) Reset() { *m = CreateTimeSeriesError{} } +func (m *CreateTimeSeriesError) String() string { return proto.CompactTextString(m) } +func (*CreateTimeSeriesError) ProtoMessage() {} +func (*CreateTimeSeriesError) Descriptor() ([]byte, []int) { + return fileDescriptor_7b3d47b45a293957, []int{11} +} + +func (m *CreateTimeSeriesError) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateTimeSeriesError.Unmarshal(m, b) +} +func (m *CreateTimeSeriesError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateTimeSeriesError.Marshal(b, m, deterministic) +} +func (m *CreateTimeSeriesError) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateTimeSeriesError.Merge(m, src) +} +func (m *CreateTimeSeriesError) XXX_Size() int { + return xxx_messageInfo_CreateTimeSeriesError.Size(m) +} +func (m *CreateTimeSeriesError) XXX_DiscardUnknown() { + xxx_messageInfo_CreateTimeSeriesError.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateTimeSeriesError proto.InternalMessageInfo + +func (m *CreateTimeSeriesError) GetTimeSeries() *TimeSeries { + if m != nil { + return m.TimeSeries + } + return nil +} + +func (m *CreateTimeSeriesError) GetStatus() *status.Status { + if m != nil { + return m.Status + } + return nil +} + +func init() { + proto.RegisterEnum("google.monitoring.v3.ListTimeSeriesRequest_TimeSeriesView", ListTimeSeriesRequest_TimeSeriesView_name, ListTimeSeriesRequest_TimeSeriesView_value) + proto.RegisterType((*ListMonitoredResourceDescriptorsRequest)(nil), "google.monitoring.v3.ListMonitoredResourceDescriptorsRequest") + proto.RegisterType((*ListMonitoredResourceDescriptorsResponse)(nil), "google.monitoring.v3.ListMonitoredResourceDescriptorsResponse") + proto.RegisterType((*GetMonitoredResourceDescriptorRequest)(nil), "google.monitoring.v3.GetMonitoredResourceDescriptorRequest") + proto.RegisterType((*ListMetricDescriptorsRequest)(nil), "google.monitoring.v3.ListMetricDescriptorsRequest") + proto.RegisterType((*ListMetricDescriptorsResponse)(nil), "google.monitoring.v3.ListMetricDescriptorsResponse") + proto.RegisterType((*GetMetricDescriptorRequest)(nil), "google.monitoring.v3.GetMetricDescriptorRequest") + proto.RegisterType((*CreateMetricDescriptorRequest)(nil), "google.monitoring.v3.CreateMetricDescriptorRequest") + proto.RegisterType((*DeleteMetricDescriptorRequest)(nil), "google.monitoring.v3.DeleteMetricDescriptorRequest") + proto.RegisterType((*ListTimeSeriesRequest)(nil), "google.monitoring.v3.ListTimeSeriesRequest") + proto.RegisterType((*ListTimeSeriesResponse)(nil), "google.monitoring.v3.ListTimeSeriesResponse") + proto.RegisterType((*CreateTimeSeriesRequest)(nil), "google.monitoring.v3.CreateTimeSeriesRequest") + proto.RegisterType((*CreateTimeSeriesError)(nil), "google.monitoring.v3.CreateTimeSeriesError") +} + +func init() { + proto.RegisterFile("google/monitoring/v3/metric_service.proto", fileDescriptor_7b3d47b45a293957) +} + +var fileDescriptor_7b3d47b45a293957 = []byte{ + // 1049 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x57, 0x4f, 0x6f, 0x1b, 0x45, + 0x14, 0x67, 0xe2, 0x34, 0x71, 0x9e, 0xd5, 0xd4, 0x9d, 0xb6, 0xae, 0xd9, 0x26, 0x95, 0xbb, 0xa8, + 0xc4, 0x75, 0xcb, 0x6e, 0x65, 0x57, 0x1c, 0x92, 0x26, 0x52, 0xfe, 0x51, 0x2a, 0x02, 0x8a, 0xd6, + 0x25, 0x87, 0x2a, 0x92, 0xb5, 0xb1, 0xa7, 0xab, 0x01, 0xef, 0xce, 0x32, 0x3b, 0x76, 0x9b, 0xa2, + 0x70, 0xe0, 0xd0, 0x3b, 0x02, 0x24, 0xf8, 0x0a, 0x39, 0x80, 0xf8, 0x0a, 0x88, 0x13, 0x57, 0xce, + 0xdc, 0xf8, 0x0a, 0xdc, 0xd1, 0xce, 0xee, 0xc6, 0xf6, 0x7a, 0x77, 0x6d, 0x73, 0xe9, 0xcd, 0x3b, + 0xef, 0xcd, 0x7b, 0xbf, 0xf7, 0x9b, 0x79, 0xef, 0x37, 0x86, 0x7b, 0x16, 0x63, 0x56, 0x97, 0xe8, + 0x36, 0x73, 0xa8, 0x60, 0x9c, 0x3a, 0x96, 0xde, 0x6f, 0xe8, 0x36, 0x11, 0x9c, 0xb6, 0x5b, 0x1e, + 0xe1, 0x7d, 0xda, 0x26, 0x9a, 0xcb, 0x99, 0x60, 0xf8, 0x7a, 0xe0, 0xaa, 0x0d, 0x5c, 0xb5, 0x7e, + 0x43, 0x59, 0x09, 0x03, 0x98, 0x2e, 0xd5, 0x4d, 0xc7, 0x61, 0xc2, 0x14, 0x94, 0x39, 0x5e, 0xb0, + 0x47, 0xb9, 0x39, 0x64, 0x0d, 0x82, 0x86, 0x86, 0xf7, 0x86, 0x0d, 0x41, 0x40, 0xd2, 0x69, 0x71, + 0xe2, 0xb1, 0x1e, 0x8f, 0x32, 0x2a, 0x77, 0x12, 0xc1, 0xb5, 0x99, 0x6d, 0x33, 0x27, 0xd3, 0x65, + 0x24, 0xd5, 0xed, 0xd0, 0x45, 0x7e, 0x9d, 0xf4, 0x5e, 0xe8, 0x9d, 0x1e, 0x97, 0x20, 0x43, 0xfb, + 0xad, 0xb8, 0x9d, 0xd8, 0xae, 0x38, 0x8d, 0x15, 0xc0, 0xdd, 0xb6, 0xee, 0x09, 0x53, 0xf4, 0xc2, + 0xca, 0xd4, 0xef, 0x10, 0xac, 0x1d, 0x50, 0x4f, 0x7c, 0x1a, 0x81, 0x37, 0x42, 0xec, 0x7b, 0xc4, + 0x6b, 0x73, 0xea, 0x0a, 0xc6, 0x3d, 0x83, 0x7c, 0xd5, 0x23, 0x9e, 0xc0, 0x18, 0xe6, 0x1d, 0xd3, + 0x26, 0xe5, 0x4b, 0x15, 0x54, 0x5d, 0x32, 0xe4, 0x6f, 0x5c, 0x82, 0x85, 0x17, 0xb4, 0x2b, 0x08, + 0x2f, 0xcf, 0xc9, 0xd5, 0xf0, 0x0b, 0xdf, 0x82, 0x25, 0xd7, 0xb4, 0x48, 0xcb, 0xa3, 0xaf, 0x49, + 0x39, 0x57, 0x41, 0xd5, 0x4b, 0x46, 0xde, 0x5f, 0x68, 0xd2, 0xd7, 0x04, 0xaf, 0x02, 0x48, 0xa3, + 0x60, 0x5f, 0x12, 0xa7, 0x3c, 0x2f, 0x37, 0x4a, 0xf7, 0x67, 0xfe, 0x82, 0xfa, 0x0b, 0x82, 0xea, + 0x64, 0x4c, 0x9e, 0xcb, 0x1c, 0x8f, 0xe0, 0xe7, 0x70, 0x3d, 0xa2, 0xbb, 0xd5, 0x19, 0xd8, 0xcb, + 0xa8, 0x92, 0xab, 0x16, 0xea, 0x6b, 0x5a, 0x78, 0xda, 0xa6, 0x4b, 0xb5, 0x8c, 0x78, 0xc6, 0x35, + 0x3e, 0x9e, 0x03, 0xbf, 0x0f, 0x57, 0x1c, 0xf2, 0x4a, 0xb4, 0x86, 0xc0, 0x06, 0x55, 0x5e, 0xf6, + 0x97, 0x0f, 0x2f, 0x00, 0x6f, 0xc0, 0xdd, 0x27, 0x24, 0x0b, 0x6e, 0x9c, 0xc1, 0xdc, 0x80, 0x41, + 0xf5, 0x0d, 0x82, 0x15, 0x59, 0xad, 0x3c, 0xec, 0xb7, 0x48, 0xfb, 0x0f, 0x08, 0x56, 0x53, 0x80, + 0x84, 0x5c, 0x7f, 0x02, 0x38, 0x6c, 0xa9, 0x71, 0xa6, 0x57, 0x46, 0x98, 0x8e, 0x85, 0x30, 0xae, + 0xda, 0xf1, 0xa0, 0x53, 0x93, 0xfb, 0x10, 0x14, 0x9f, 0xdc, 0x78, 0xc4, 0x0c, 0x46, 0xbf, 0x81, + 0xd5, 0x5d, 0x4e, 0x4c, 0x41, 0x66, 0xd8, 0x84, 0x9f, 0xc2, 0xd5, 0xb1, 0xda, 0x24, 0xa0, 0x49, + 0xa5, 0x15, 0xe3, 0xa5, 0xa9, 0x0d, 0x58, 0xdd, 0x23, 0x5d, 0x32, 0x53, 0x7e, 0xf5, 0xa7, 0x1c, + 0xdc, 0xf0, 0xd9, 0x7f, 0x46, 0x6d, 0xd2, 0x24, 0x9c, 0x92, 0xb1, 0xf3, 0x87, 0x29, 0xce, 0x7f, + 0x0b, 0xf2, 0xd4, 0x11, 0x84, 0xf7, 0xcd, 0xae, 0x3c, 0xe0, 0x42, 0x5d, 0xd5, 0x92, 0xe6, 0x9d, + 0xe6, 0xa7, 0x79, 0x1a, 0x7a, 0x1a, 0x17, 0x7b, 0xf0, 0x2e, 0x14, 0x4c, 0xcb, 0xe2, 0xc4, 0x92, + 0x93, 0x45, 0x5e, 0xb9, 0x42, 0xfd, 0x4e, 0x72, 0x88, 0xed, 0x81, 0xa3, 0x31, 0xbc, 0x0b, 0xbf, + 0x0b, 0x79, 0xc6, 0x3b, 0x84, 0xb7, 0x4e, 0x4e, 0xcb, 0x0b, 0x12, 0xde, 0xa2, 0xfc, 0xde, 0x39, + 0xc5, 0x9f, 0xc1, 0x7c, 0x9f, 0x92, 0x97, 0xe5, 0xc5, 0x0a, 0xaa, 0x2e, 0xd7, 0xd7, 0x93, 0x03, + 0x27, 0xd2, 0xa0, 0x0d, 0x56, 0x8e, 0x28, 0x79, 0x69, 0xc8, 0x38, 0xa3, 0xf7, 0x3d, 0x9f, 0x79, + 0xdf, 0x97, 0xe2, 0xf7, 0x7d, 0x0d, 0x96, 0x47, 0x63, 0xe2, 0x3c, 0xcc, 0x7f, 0xf4, 0xf9, 0xc1, + 0x41, 0xf1, 0x1d, 0x5c, 0x80, 0xc5, 0x8f, 0xf7, 0xb7, 0xf7, 0xf6, 0x8d, 0x66, 0x11, 0xa9, 0xbf, + 0x23, 0x28, 0xc5, 0x31, 0x85, 0x1d, 0xb1, 0x0d, 0x05, 0x41, 0x6d, 0xe2, 0x4b, 0x0c, 0x25, 0x51, + 0x2b, 0x54, 0xd2, 0x29, 0x0f, 0xb7, 0x83, 0xb8, 0xf8, 0x3d, 0x6d, 0x1f, 0xe0, 0x4d, 0x28, 0x92, + 0x57, 0xa4, 0xdd, 0xf3, 0x29, 0x6e, 0x11, 0xce, 0xfd, 0xd6, 0xcb, 0xc9, 0x7c, 0x38, 0xca, 0xc7, + 0xdd, 0xb6, 0xd6, 0x94, 0xd3, 0xdd, 0xb8, 0x72, 0xe1, 0xbb, 0x2f, 0x5d, 0x55, 0x17, 0x6e, 0x06, + 0x4d, 0x91, 0x7e, 0xc1, 0x86, 0xdb, 0x21, 0x56, 0xd8, 0xdc, 0xec, 0x85, 0xf9, 0x83, 0xed, 0x46, + 0x3c, 0xa5, 0x04, 0x33, 0xce, 0x1a, 0x9a, 0x99, 0xb5, 0x1a, 0x2c, 0x04, 0x3a, 0x16, 0xf6, 0x68, + 0x12, 0x07, 0xa1, 0x47, 0xfd, 0x5f, 0x80, 0xcb, 0x41, 0x2b, 0x36, 0x83, 0x97, 0x00, 0xfe, 0x1b, + 0x41, 0x65, 0x92, 0xc2, 0xe0, 0xcd, 0xf4, 0xdb, 0x39, 0x85, 0x5a, 0x2a, 0x5b, 0xff, 0x77, 0x7b, + 0x70, 0xb5, 0xd4, 0xf5, 0x6f, 0xff, 0xfa, 0xe7, 0xfb, 0xb9, 0x47, 0xb8, 0xee, 0xbf, 0x04, 0xbe, + 0xf6, 0x0f, 0x65, 0xd3, 0xe5, 0xec, 0x0b, 0xd2, 0x16, 0x9e, 0x5e, 0x3b, 0x1b, 0xbc, 0x36, 0x92, + 0xa0, 0xff, 0x81, 0xe0, 0x76, 0xb6, 0x22, 0xe1, 0x8d, 0x64, 0x78, 0x53, 0xe9, 0x98, 0x32, 0xad, + 0xac, 0xaa, 0x8f, 0x65, 0x11, 0x1f, 0xe2, 0x47, 0x49, 0x45, 0x64, 0xd6, 0xa0, 0xd7, 0xce, 0xf0, + 0x6f, 0x28, 0x98, 0x89, 0x63, 0x8a, 0x84, 0xeb, 0x19, 0xe4, 0xa6, 0xe8, 0xa8, 0xd2, 0x98, 0x69, + 0x4f, 0x78, 0x0a, 0xba, 0x2c, 0xe0, 0x1e, 0x5e, 0x4b, 0x39, 0x85, 0x31, 0x64, 0x3f, 0x23, 0xb8, + 0x96, 0xa0, 0x57, 0xf8, 0x61, 0x3a, 0xdf, 0xc9, 0x2a, 0xa1, 0x64, 0xca, 0x8e, 0x5a, 0x97, 0xc0, + 0x1e, 0xe0, 0x5a, 0x32, 0xb3, 0x71, 0x5c, 0x7a, 0xad, 0x76, 0x86, 0x7f, 0x45, 0x50, 0x4a, 0x56, + 0x46, 0x9c, 0x42, 0x4e, 0xa6, 0x8e, 0x4e, 0x40, 0xb8, 0x23, 0x11, 0x3e, 0x56, 0xa7, 0xa5, 0x6e, + 0x7d, 0x5c, 0x80, 0x7d, 0x36, 0x4b, 0xc9, 0x5a, 0x9a, 0x86, 0x38, 0x53, 0x79, 0x95, 0x52, 0xb4, + 0x29, 0x7a, 0x25, 0x6b, 0xfb, 0xfe, 0x2b, 0x39, 0x62, 0xb3, 0x36, 0x0b, 0x9b, 0x3f, 0x22, 0x58, + 0x1e, 0x95, 0x05, 0x7c, 0x7f, 0x06, 0x41, 0x53, 0x1e, 0x4c, 0xe7, 0x1c, 0x5e, 0xc4, 0xaa, 0x44, + 0xa8, 0xe2, 0x4a, 0x32, 0x9b, 0x43, 0xa3, 0xf1, 0x0d, 0x82, 0x62, 0x7c, 0xee, 0xe2, 0x0f, 0xb2, + 0xce, 0x77, 0x1c, 0x5b, 0x1a, 0x4f, 0xf7, 0x25, 0x8a, 0xbb, 0xea, 0x44, 0x14, 0xeb, 0xa8, 0xb6, + 0x73, 0x8e, 0xa0, 0xdc, 0x66, 0x76, 0x62, 0xe6, 0x1d, 0x3c, 0x32, 0x91, 0x0f, 0xfd, 0x34, 0x87, + 0xe8, 0xf9, 0x56, 0xe8, 0x6b, 0xb1, 0xae, 0xe9, 0x58, 0x1a, 0xe3, 0x96, 0x6e, 0x11, 0x47, 0x82, + 0xd0, 0x03, 0x93, 0xe9, 0x52, 0x6f, 0xf4, 0x6f, 0xd2, 0xc6, 0xe0, 0xeb, 0x7c, 0x4e, 0x79, 0x12, + 0x04, 0xd8, 0xed, 0xb2, 0x5e, 0x27, 0x1a, 0x4d, 0x7e, 0xca, 0xa3, 0xc6, 0x9f, 0x91, 0xf1, 0x58, + 0x1a, 0x8f, 0x07, 0xc6, 0xe3, 0xa3, 0xc6, 0xc9, 0x82, 0x4c, 0xd2, 0xf8, 0x2f, 0x00, 0x00, 0xff, + 0xff, 0x79, 0x2b, 0x3b, 0x90, 0x4a, 0x0e, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// MetricServiceClient is the client API for MetricService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type MetricServiceClient interface { + // Lists monitored resource descriptors that match a filter. This method does not require a Stackdriver account. + ListMonitoredResourceDescriptors(ctx context.Context, in *ListMonitoredResourceDescriptorsRequest, opts ...grpc.CallOption) (*ListMonitoredResourceDescriptorsResponse, error) + // Gets a single monitored resource descriptor. This method does not require a Stackdriver account. + GetMonitoredResourceDescriptor(ctx context.Context, in *GetMonitoredResourceDescriptorRequest, opts ...grpc.CallOption) (*monitoredres.MonitoredResourceDescriptor, error) + // Lists metric descriptors that match a filter. This method does not require a Stackdriver account. + ListMetricDescriptors(ctx context.Context, in *ListMetricDescriptorsRequest, opts ...grpc.CallOption) (*ListMetricDescriptorsResponse, error) + // Gets a single metric descriptor. This method does not require a Stackdriver account. + GetMetricDescriptor(ctx context.Context, in *GetMetricDescriptorRequest, opts ...grpc.CallOption) (*metric.MetricDescriptor, error) + // Creates a new metric descriptor. + // User-created metric descriptors define + // [custom metrics](/monitoring/custom-metrics). + CreateMetricDescriptor(ctx context.Context, in *CreateMetricDescriptorRequest, opts ...grpc.CallOption) (*metric.MetricDescriptor, error) + // Deletes a metric descriptor. Only user-created + // [custom metrics](/monitoring/custom-metrics) can be deleted. + DeleteMetricDescriptor(ctx context.Context, in *DeleteMetricDescriptorRequest, opts ...grpc.CallOption) (*empty.Empty, error) + // Lists time series that match a filter. This method does not require a Stackdriver account. + ListTimeSeries(ctx context.Context, in *ListTimeSeriesRequest, opts ...grpc.CallOption) (*ListTimeSeriesResponse, error) + // Creates or adds data to one or more time series. + // The response is empty if all time series in the request were written. + // If any time series could not be written, a corresponding failure message is + // included in the error response. + CreateTimeSeries(ctx context.Context, in *CreateTimeSeriesRequest, opts ...grpc.CallOption) (*empty.Empty, error) +} + +type metricServiceClient struct { + cc *grpc.ClientConn +} + +func NewMetricServiceClient(cc *grpc.ClientConn) MetricServiceClient { + return &metricServiceClient{cc} +} + +func (c *metricServiceClient) ListMonitoredResourceDescriptors(ctx context.Context, in *ListMonitoredResourceDescriptorsRequest, opts ...grpc.CallOption) (*ListMonitoredResourceDescriptorsResponse, error) { + out := new(ListMonitoredResourceDescriptorsResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/ListMonitoredResourceDescriptors", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *metricServiceClient) GetMonitoredResourceDescriptor(ctx context.Context, in *GetMonitoredResourceDescriptorRequest, opts ...grpc.CallOption) (*monitoredres.MonitoredResourceDescriptor, error) { + out := new(monitoredres.MonitoredResourceDescriptor) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/GetMonitoredResourceDescriptor", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *metricServiceClient) ListMetricDescriptors(ctx context.Context, in *ListMetricDescriptorsRequest, opts ...grpc.CallOption) (*ListMetricDescriptorsResponse, error) { + out := new(ListMetricDescriptorsResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/ListMetricDescriptors", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *metricServiceClient) GetMetricDescriptor(ctx context.Context, in *GetMetricDescriptorRequest, opts ...grpc.CallOption) (*metric.MetricDescriptor, error) { + out := new(metric.MetricDescriptor) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/GetMetricDescriptor", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *metricServiceClient) CreateMetricDescriptor(ctx context.Context, in *CreateMetricDescriptorRequest, opts ...grpc.CallOption) (*metric.MetricDescriptor, error) { + out := new(metric.MetricDescriptor) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/CreateMetricDescriptor", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *metricServiceClient) DeleteMetricDescriptor(ctx context.Context, in *DeleteMetricDescriptorRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/DeleteMetricDescriptor", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *metricServiceClient) ListTimeSeries(ctx context.Context, in *ListTimeSeriesRequest, opts ...grpc.CallOption) (*ListTimeSeriesResponse, error) { + out := new(ListTimeSeriesResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/ListTimeSeries", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *metricServiceClient) CreateTimeSeries(ctx context.Context, in *CreateTimeSeriesRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/CreateTimeSeries", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// MetricServiceServer is the server API for MetricService service. +type MetricServiceServer interface { + // Lists monitored resource descriptors that match a filter. This method does not require a Stackdriver account. + ListMonitoredResourceDescriptors(context.Context, *ListMonitoredResourceDescriptorsRequest) (*ListMonitoredResourceDescriptorsResponse, error) + // Gets a single monitored resource descriptor. This method does not require a Stackdriver account. + GetMonitoredResourceDescriptor(context.Context, *GetMonitoredResourceDescriptorRequest) (*monitoredres.MonitoredResourceDescriptor, error) + // Lists metric descriptors that match a filter. This method does not require a Stackdriver account. + ListMetricDescriptors(context.Context, *ListMetricDescriptorsRequest) (*ListMetricDescriptorsResponse, error) + // Gets a single metric descriptor. This method does not require a Stackdriver account. + GetMetricDescriptor(context.Context, *GetMetricDescriptorRequest) (*metric.MetricDescriptor, error) + // Creates a new metric descriptor. + // User-created metric descriptors define + // [custom metrics](/monitoring/custom-metrics). + CreateMetricDescriptor(context.Context, *CreateMetricDescriptorRequest) (*metric.MetricDescriptor, error) + // Deletes a metric descriptor. Only user-created + // [custom metrics](/monitoring/custom-metrics) can be deleted. + DeleteMetricDescriptor(context.Context, *DeleteMetricDescriptorRequest) (*empty.Empty, error) + // Lists time series that match a filter. This method does not require a Stackdriver account. + ListTimeSeries(context.Context, *ListTimeSeriesRequest) (*ListTimeSeriesResponse, error) + // Creates or adds data to one or more time series. + // The response is empty if all time series in the request were written. + // If any time series could not be written, a corresponding failure message is + // included in the error response. + CreateTimeSeries(context.Context, *CreateTimeSeriesRequest) (*empty.Empty, error) +} + +func RegisterMetricServiceServer(s *grpc.Server, srv MetricServiceServer) { + s.RegisterService(&_MetricService_serviceDesc, srv) +} + +func _MetricService_ListMonitoredResourceDescriptors_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListMonitoredResourceDescriptorsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricServiceServer).ListMonitoredResourceDescriptors(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.MetricService/ListMonitoredResourceDescriptors", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetricServiceServer).ListMonitoredResourceDescriptors(ctx, req.(*ListMonitoredResourceDescriptorsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MetricService_GetMonitoredResourceDescriptor_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetMonitoredResourceDescriptorRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricServiceServer).GetMonitoredResourceDescriptor(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.MetricService/GetMonitoredResourceDescriptor", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetricServiceServer).GetMonitoredResourceDescriptor(ctx, req.(*GetMonitoredResourceDescriptorRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MetricService_ListMetricDescriptors_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListMetricDescriptorsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricServiceServer).ListMetricDescriptors(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.MetricService/ListMetricDescriptors", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetricServiceServer).ListMetricDescriptors(ctx, req.(*ListMetricDescriptorsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MetricService_GetMetricDescriptor_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetMetricDescriptorRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricServiceServer).GetMetricDescriptor(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.MetricService/GetMetricDescriptor", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetricServiceServer).GetMetricDescriptor(ctx, req.(*GetMetricDescriptorRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MetricService_CreateMetricDescriptor_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateMetricDescriptorRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricServiceServer).CreateMetricDescriptor(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.MetricService/CreateMetricDescriptor", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetricServiceServer).CreateMetricDescriptor(ctx, req.(*CreateMetricDescriptorRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MetricService_DeleteMetricDescriptor_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteMetricDescriptorRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricServiceServer).DeleteMetricDescriptor(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.MetricService/DeleteMetricDescriptor", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetricServiceServer).DeleteMetricDescriptor(ctx, req.(*DeleteMetricDescriptorRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MetricService_ListTimeSeries_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListTimeSeriesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricServiceServer).ListTimeSeries(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.MetricService/ListTimeSeries", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetricServiceServer).ListTimeSeries(ctx, req.(*ListTimeSeriesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MetricService_CreateTimeSeries_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateTimeSeriesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricServiceServer).CreateTimeSeries(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.MetricService/CreateTimeSeries", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetricServiceServer).CreateTimeSeries(ctx, req.(*CreateTimeSeriesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _MetricService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.monitoring.v3.MetricService", + HandlerType: (*MetricServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListMonitoredResourceDescriptors", + Handler: _MetricService_ListMonitoredResourceDescriptors_Handler, + }, + { + MethodName: "GetMonitoredResourceDescriptor", + Handler: _MetricService_GetMonitoredResourceDescriptor_Handler, + }, + { + MethodName: "ListMetricDescriptors", + Handler: _MetricService_ListMetricDescriptors_Handler, + }, + { + MethodName: "GetMetricDescriptor", + Handler: _MetricService_GetMetricDescriptor_Handler, + }, + { + MethodName: "CreateMetricDescriptor", + Handler: _MetricService_CreateMetricDescriptor_Handler, + }, + { + MethodName: "DeleteMetricDescriptor", + Handler: _MetricService_DeleteMetricDescriptor_Handler, + }, + { + MethodName: "ListTimeSeries", + Handler: _MetricService_ListTimeSeries_Handler, + }, + { + MethodName: "CreateTimeSeries", + Handler: _MetricService_CreateTimeSeries_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/monitoring/v3/metric_service.proto", +} diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/mutation_record.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/mutation_record.pb.go new file mode 100644 index 000000000..a4dcb16c1 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/mutation_record.pb.go @@ -0,0 +1,100 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/monitoring/v3/mutation_record.proto + +package monitoring + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + timestamp "github.com/golang/protobuf/ptypes/timestamp" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Describes a change made to a configuration. +type MutationRecord struct { + // When the change occurred. + MutateTime *timestamp.Timestamp `protobuf:"bytes,1,opt,name=mutate_time,json=mutateTime,proto3" json:"mutate_time,omitempty"` + // The email address of the user making the change. + MutatedBy string `protobuf:"bytes,2,opt,name=mutated_by,json=mutatedBy,proto3" json:"mutated_by,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MutationRecord) Reset() { *m = MutationRecord{} } +func (m *MutationRecord) String() string { return proto.CompactTextString(m) } +func (*MutationRecord) ProtoMessage() {} +func (*MutationRecord) Descriptor() ([]byte, []int) { + return fileDescriptor_83c24e690bdb9101, []int{0} +} + +func (m *MutationRecord) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MutationRecord.Unmarshal(m, b) +} +func (m *MutationRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MutationRecord.Marshal(b, m, deterministic) +} +func (m *MutationRecord) XXX_Merge(src proto.Message) { + xxx_messageInfo_MutationRecord.Merge(m, src) +} +func (m *MutationRecord) XXX_Size() int { + return xxx_messageInfo_MutationRecord.Size(m) +} +func (m *MutationRecord) XXX_DiscardUnknown() { + xxx_messageInfo_MutationRecord.DiscardUnknown(m) +} + +var xxx_messageInfo_MutationRecord proto.InternalMessageInfo + +func (m *MutationRecord) GetMutateTime() *timestamp.Timestamp { + if m != nil { + return m.MutateTime + } + return nil +} + +func (m *MutationRecord) GetMutatedBy() string { + if m != nil { + return m.MutatedBy + } + return "" +} + +func init() { + proto.RegisterType((*MutationRecord)(nil), "google.monitoring.v3.MutationRecord") +} + +func init() { + proto.RegisterFile("google/monitoring/v3/mutation_record.proto", fileDescriptor_83c24e690bdb9101) +} + +var fileDescriptor_83c24e690bdb9101 = []byte{ + // 251 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0x4a, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0xcf, 0xcd, 0xcf, 0xcb, 0x2c, 0xc9, 0x2f, 0xca, 0xcc, 0x4b, 0xd7, 0x2f, 0x33, + 0xd6, 0xcf, 0x2d, 0x2d, 0x49, 0x2c, 0xc9, 0xcc, 0xcf, 0x8b, 0x2f, 0x4a, 0x4d, 0xce, 0x2f, 0x4a, + 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x81, 0xa8, 0xd5, 0x43, 0xa8, 0xd5, 0x2b, 0x33, + 0x96, 0x92, 0x87, 0x9a, 0x00, 0x56, 0x93, 0x54, 0x9a, 0xa6, 0x5f, 0x92, 0x99, 0x9b, 0x5a, 0x5c, + 0x92, 0x98, 0x5b, 0x00, 0xd1, 0xa6, 0x94, 0xc3, 0xc5, 0xe7, 0x0b, 0x35, 0x2f, 0x08, 0x6c, 0x9c, + 0x90, 0x35, 0x17, 0x37, 0xd8, 0x86, 0xd4, 0x78, 0x90, 0x5a, 0x09, 0x46, 0x05, 0x46, 0x0d, 0x6e, + 0x23, 0x29, 0x3d, 0xa8, 0xf1, 0x30, 0x83, 0xf4, 0x42, 0x60, 0x06, 0x05, 0x71, 0x41, 0x94, 0x83, + 0x04, 0x84, 0x64, 0xb9, 0xa0, 0xbc, 0x94, 0xf8, 0xa4, 0x4a, 0x09, 0x26, 0x05, 0x46, 0x0d, 0xce, + 0x20, 0x4e, 0xa8, 0x88, 0x53, 0xa5, 0xd3, 0x6a, 0x46, 0x2e, 0x89, 0xe4, 0xfc, 0x5c, 0x3d, 0x6c, + 0x6e, 0x75, 0x12, 0x46, 0x75, 0x48, 0x00, 0xc8, 0xa6, 0x00, 0xc6, 0x28, 0x3b, 0xa8, 0xe2, 0xf4, + 0xfc, 0x9c, 0xc4, 0xbc, 0x74, 0xbd, 0xfc, 0xa2, 0x74, 0xfd, 0xf4, 0xd4, 0x3c, 0xb0, 0x3b, 0xf4, + 0x21, 0x52, 0x89, 0x05, 0x99, 0xc5, 0xa8, 0x61, 0x64, 0x8d, 0xe0, 0xad, 0x62, 0x92, 0x72, 0x87, + 0x18, 0xe0, 0x9c, 0x93, 0x5f, 0x9a, 0xa2, 0xe7, 0x8b, 0xb0, 0x33, 0xcc, 0xf8, 0x14, 0x4c, 0x32, + 0x06, 0x2c, 0x19, 0x83, 0x90, 0x8c, 0x09, 0x33, 0x4e, 0x62, 0x03, 0x5b, 0x62, 0x0c, 0x08, 0x00, + 0x00, 0xff, 0xff, 0x95, 0xa7, 0xf3, 0xbd, 0x87, 0x01, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/notification.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/notification.pb.go new file mode 100644 index 000000000..6147a91d7 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/notification.pb.go @@ -0,0 +1,374 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/monitoring/v3/notification.proto + +package monitoring + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + wrappers "github.com/golang/protobuf/ptypes/wrappers" + _ "google.golang.org/genproto/googleapis/api/annotations" + label "google.golang.org/genproto/googleapis/api/label" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Indicates whether the channel has been verified or not. It is illegal +// to specify this field in a +// [`CreateNotificationChannel`][google.monitoring.v3.NotificationChannelService.CreateNotificationChannel] +// or an +// [`UpdateNotificationChannel`][google.monitoring.v3.NotificationChannelService.UpdateNotificationChannel] +// operation. +type NotificationChannel_VerificationStatus int32 + +const ( + // Sentinel value used to indicate that the state is unknown, omitted, or + // is not applicable (as in the case of channels that neither support + // nor require verification in order to function). + NotificationChannel_VERIFICATION_STATUS_UNSPECIFIED NotificationChannel_VerificationStatus = 0 + // The channel has yet to be verified and requires verification to function. + // Note that this state also applies to the case where the verification + // process has been initiated by sending a verification code but where + // the verification code has not been submitted to complete the process. + NotificationChannel_UNVERIFIED NotificationChannel_VerificationStatus = 1 + // It has been proven that notifications can be received on this + // notification channel and that someone on the project has access + // to messages that are delivered to that channel. + NotificationChannel_VERIFIED NotificationChannel_VerificationStatus = 2 +) + +var NotificationChannel_VerificationStatus_name = map[int32]string{ + 0: "VERIFICATION_STATUS_UNSPECIFIED", + 1: "UNVERIFIED", + 2: "VERIFIED", +} + +var NotificationChannel_VerificationStatus_value = map[string]int32{ + "VERIFICATION_STATUS_UNSPECIFIED": 0, + "UNVERIFIED": 1, + "VERIFIED": 2, +} + +func (x NotificationChannel_VerificationStatus) String() string { + return proto.EnumName(NotificationChannel_VerificationStatus_name, int32(x)) +} + +func (NotificationChannel_VerificationStatus) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_4399f1e4bc1a75ef, []int{1, 0} +} + +// A description of a notification channel. The descriptor includes +// the properties of the channel and the set of labels or fields that +// must be specified to configure channels of a given type. +type NotificationChannelDescriptor struct { + // The full REST resource name for this descriptor. The syntax is: + // + // projects/[PROJECT_ID]/notificationChannelDescriptors/[TYPE] + // + // In the above, `[TYPE]` is the value of the `type` field. + Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"` + // The type of notification channel, such as "email", "sms", etc. + // Notification channel types are globally unique. + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // A human-readable name for the notification channel type. This + // form of the name is suitable for a user interface. + DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // A human-readable description of the notification channel + // type. The description may include a description of the properties + // of the channel and pointers to external documentation. + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + // The set of labels that must be defined to identify a particular + // channel of the corresponding type. Each label includes a + // description for how that field should be populated. + Labels []*label.LabelDescriptor `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty"` + // The tiers that support this notification channel; the project service tier + // must be one of the supported_tiers. + SupportedTiers []ServiceTier `protobuf:"varint,5,rep,packed,name=supported_tiers,json=supportedTiers,proto3,enum=google.monitoring.v3.ServiceTier" json:"supported_tiers,omitempty"` // Deprecated: Do not use. + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NotificationChannelDescriptor) Reset() { *m = NotificationChannelDescriptor{} } +func (m *NotificationChannelDescriptor) String() string { return proto.CompactTextString(m) } +func (*NotificationChannelDescriptor) ProtoMessage() {} +func (*NotificationChannelDescriptor) Descriptor() ([]byte, []int) { + return fileDescriptor_4399f1e4bc1a75ef, []int{0} +} + +func (m *NotificationChannelDescriptor) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NotificationChannelDescriptor.Unmarshal(m, b) +} +func (m *NotificationChannelDescriptor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NotificationChannelDescriptor.Marshal(b, m, deterministic) +} +func (m *NotificationChannelDescriptor) XXX_Merge(src proto.Message) { + xxx_messageInfo_NotificationChannelDescriptor.Merge(m, src) +} +func (m *NotificationChannelDescriptor) XXX_Size() int { + return xxx_messageInfo_NotificationChannelDescriptor.Size(m) +} +func (m *NotificationChannelDescriptor) XXX_DiscardUnknown() { + xxx_messageInfo_NotificationChannelDescriptor.DiscardUnknown(m) +} + +var xxx_messageInfo_NotificationChannelDescriptor proto.InternalMessageInfo + +func (m *NotificationChannelDescriptor) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NotificationChannelDescriptor) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *NotificationChannelDescriptor) GetDisplayName() string { + if m != nil { + return m.DisplayName + } + return "" +} + +func (m *NotificationChannelDescriptor) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *NotificationChannelDescriptor) GetLabels() []*label.LabelDescriptor { + if m != nil { + return m.Labels + } + return nil +} + +// Deprecated: Do not use. +func (m *NotificationChannelDescriptor) GetSupportedTiers() []ServiceTier { + if m != nil { + return m.SupportedTiers + } + return nil +} + +// A `NotificationChannel` is a medium through which an alert is +// delivered when a policy violation is detected. Examples of channels +// include email, SMS, and third-party messaging applications. Fields +// containing sensitive information like authentication tokens or +// contact info are only partially populated on retrieval. +type NotificationChannel struct { + // The type of the notification channel. This field matches the + // value of the [NotificationChannelDescriptor.type][google.monitoring.v3.NotificationChannelDescriptor.type] field. + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // The full REST resource name for this channel. The syntax is: + // + // projects/[PROJECT_ID]/notificationChannels/[CHANNEL_ID] + // + // The `[CHANNEL_ID]` is automatically assigned by the server on creation. + Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"` + // An optional human-readable name for this notification channel. It is + // recommended that you specify a non-empty and unique name in order to + // make it easier to identify the channels in your project, though this is + // not enforced. The display name is limited to 512 Unicode characters. + DisplayName string `protobuf:"bytes,3,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // An optional human-readable description of this notification channel. This + // description may provide additional details, beyond the display + // name, for the channel. This may not exceeed 1024 Unicode characters. + Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"` + // Configuration fields that define the channel and its behavior. The + // permissible and required labels are specified in the + // [NotificationChannelDescriptor.labels][google.monitoring.v3.NotificationChannelDescriptor.labels] of the + // `NotificationChannelDescriptor` corresponding to the `type` field. + Labels map[string]string `protobuf:"bytes,5,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // User-supplied key/value data that does not need to conform to + // the corresponding `NotificationChannelDescriptor`'s schema, unlike + // the `labels` field. This field is intended to be used for organizing + // and identifying the `NotificationChannel` objects. + // + // The field can contain up to 64 entries. Each key and value is limited to + // 63 Unicode characters or 128 bytes, whichever is smaller. Labels and + // values can contain only lowercase letters, numerals, underscores, and + // dashes. Keys must begin with a letter. + UserLabels map[string]string `protobuf:"bytes,8,rep,name=user_labels,json=userLabels,proto3" json:"user_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Indicates whether this channel has been verified or not. On a + // [`ListNotificationChannels`][google.monitoring.v3.NotificationChannelService.ListNotificationChannels] + // or + // [`GetNotificationChannel`][google.monitoring.v3.NotificationChannelService.GetNotificationChannel] + // operation, this field is expected to be populated. + // + // If the value is `UNVERIFIED`, then it indicates that the channel is + // non-functioning (it both requires verification and lacks verification); + // otherwise, it is assumed that the channel works. + // + // If the channel is neither `VERIFIED` nor `UNVERIFIED`, it implies that + // the channel is of a type that does not require verification or that + // this specific channel has been exempted from verification because it was + // created prior to verification being required for channels of this type. + // + // This field cannot be modified using a standard + // [`UpdateNotificationChannel`][google.monitoring.v3.NotificationChannelService.UpdateNotificationChannel] + // operation. To change the value of this field, you must call + // [`VerifyNotificationChannel`][google.monitoring.v3.NotificationChannelService.VerifyNotificationChannel]. + VerificationStatus NotificationChannel_VerificationStatus `protobuf:"varint,9,opt,name=verification_status,json=verificationStatus,proto3,enum=google.monitoring.v3.NotificationChannel_VerificationStatus" json:"verification_status,omitempty"` + // Whether notifications are forwarded to the described channel. This makes + // it possible to disable delivery of notifications to a particular channel + // without removing the channel from all alerting policies that reference + // the channel. This is a more convenient approach when the change is + // temporary and you want to receive notifications from the same set + // of alerting policies on the channel at some point in the future. + Enabled *wrappers.BoolValue `protobuf:"bytes,11,opt,name=enabled,proto3" json:"enabled,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NotificationChannel) Reset() { *m = NotificationChannel{} } +func (m *NotificationChannel) String() string { return proto.CompactTextString(m) } +func (*NotificationChannel) ProtoMessage() {} +func (*NotificationChannel) Descriptor() ([]byte, []int) { + return fileDescriptor_4399f1e4bc1a75ef, []int{1} +} + +func (m *NotificationChannel) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NotificationChannel.Unmarshal(m, b) +} +func (m *NotificationChannel) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NotificationChannel.Marshal(b, m, deterministic) +} +func (m *NotificationChannel) XXX_Merge(src proto.Message) { + xxx_messageInfo_NotificationChannel.Merge(m, src) +} +func (m *NotificationChannel) XXX_Size() int { + return xxx_messageInfo_NotificationChannel.Size(m) +} +func (m *NotificationChannel) XXX_DiscardUnknown() { + xxx_messageInfo_NotificationChannel.DiscardUnknown(m) +} + +var xxx_messageInfo_NotificationChannel proto.InternalMessageInfo + +func (m *NotificationChannel) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *NotificationChannel) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NotificationChannel) GetDisplayName() string { + if m != nil { + return m.DisplayName + } + return "" +} + +func (m *NotificationChannel) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *NotificationChannel) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *NotificationChannel) GetUserLabels() map[string]string { + if m != nil { + return m.UserLabels + } + return nil +} + +func (m *NotificationChannel) GetVerificationStatus() NotificationChannel_VerificationStatus { + if m != nil { + return m.VerificationStatus + } + return NotificationChannel_VERIFICATION_STATUS_UNSPECIFIED +} + +func (m *NotificationChannel) GetEnabled() *wrappers.BoolValue { + if m != nil { + return m.Enabled + } + return nil +} + +func init() { + proto.RegisterEnum("google.monitoring.v3.NotificationChannel_VerificationStatus", NotificationChannel_VerificationStatus_name, NotificationChannel_VerificationStatus_value) + proto.RegisterType((*NotificationChannelDescriptor)(nil), "google.monitoring.v3.NotificationChannelDescriptor") + proto.RegisterType((*NotificationChannel)(nil), "google.monitoring.v3.NotificationChannel") + proto.RegisterMapType((map[string]string)(nil), "google.monitoring.v3.NotificationChannel.LabelsEntry") + proto.RegisterMapType((map[string]string)(nil), "google.monitoring.v3.NotificationChannel.UserLabelsEntry") +} + +func init() { + proto.RegisterFile("google/monitoring/v3/notification.proto", fileDescriptor_4399f1e4bc1a75ef) +} + +var fileDescriptor_4399f1e4bc1a75ef = []byte{ + // 602 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0x6d, 0x6b, 0xdb, 0x3c, + 0x14, 0x7d, 0x9c, 0x34, 0x7d, 0x5a, 0xb9, 0xa4, 0x9d, 0x5a, 0x86, 0xf1, 0xde, 0xd2, 0xee, 0xc3, + 0xf2, 0xc9, 0x86, 0x64, 0x83, 0x75, 0x6f, 0xd0, 0xa4, 0xe9, 0x08, 0xac, 0x59, 0xc9, 0xdb, 0xa0, + 0x14, 0x82, 0x92, 0xa8, 0x9e, 0x98, 0x2d, 0x19, 0x49, 0xf6, 0xc8, 0xcf, 0xd8, 0x8f, 0xd8, 0x87, + 0xed, 0xa7, 0xec, 0x57, 0x0d, 0xcb, 0x8a, 0xed, 0xb5, 0x86, 0x75, 0xdf, 0x74, 0xcf, 0x3d, 0xe7, + 0xdc, 0x7b, 0x4f, 0x4c, 0xc0, 0x33, 0x8f, 0x31, 0xcf, 0xc7, 0x6e, 0xc0, 0x28, 0x91, 0x8c, 0x13, + 0xea, 0xb9, 0x71, 0xdb, 0xa5, 0x4c, 0x92, 0x6b, 0xb2, 0x40, 0x92, 0x30, 0xea, 0x84, 0x9c, 0x49, + 0x06, 0x0f, 0x52, 0xa2, 0x93, 0x13, 0x9d, 0xb8, 0x6d, 0x3f, 0xd4, 0x72, 0x14, 0x12, 0x17, 0x51, + 0xca, 0xa4, 0x92, 0x88, 0x54, 0x63, 0xdf, 0x2f, 0x74, 0x7d, 0x34, 0xc7, 0xbe, 0xc6, 0x0f, 0x4b, + 0x87, 0x2e, 0x58, 0x10, 0xac, 0xc7, 0xd9, 0x8f, 0x35, 0x45, 0x55, 0xf3, 0xe8, 0xda, 0xfd, 0xca, + 0x51, 0x18, 0x62, 0xae, 0xad, 0x8f, 0xbe, 0x55, 0xc0, 0xa3, 0x41, 0x61, 0xcb, 0xee, 0x67, 0x44, + 0x29, 0xf6, 0x4f, 0xb1, 0x58, 0x70, 0x12, 0x4a, 0xc6, 0x21, 0x04, 0x1b, 0x14, 0x05, 0xd8, 0xda, + 0x6c, 0x18, 0xcd, 0xed, 0xa1, 0x7a, 0x27, 0x98, 0x5c, 0x85, 0xd8, 0x32, 0x52, 0x2c, 0x79, 0xc3, + 0x43, 0xb0, 0xb3, 0x24, 0x22, 0xf4, 0xd1, 0x6a, 0xa6, 0xf8, 0x15, 0xd5, 0x33, 0x35, 0x36, 0x48, + 0x64, 0x0d, 0x60, 0x2e, 0xb5, 0x31, 0x61, 0xd4, 0xaa, 0x6a, 0x46, 0x0e, 0xc1, 0x36, 0xd8, 0x54, + 0x07, 0x0a, 0x6b, 0xa3, 0x51, 0x6d, 0x9a, 0xad, 0x07, 0x8e, 0x8e, 0x0b, 0x85, 0xc4, 0xf9, 0x90, + 0x74, 0xf2, 0xcd, 0x86, 0x9a, 0x0a, 0x07, 0x60, 0x57, 0x44, 0x61, 0xc8, 0xb8, 0xc4, 0xcb, 0x99, + 0x24, 0x98, 0x0b, 0xab, 0xd6, 0xa8, 0x36, 0xeb, 0xad, 0x43, 0xa7, 0x2c, 0x6c, 0x67, 0x84, 0x79, + 0x4c, 0x16, 0x78, 0x4c, 0x30, 0xef, 0x54, 0x2c, 0x63, 0x58, 0xcf, 0xd4, 0x09, 0x24, 0x8e, 0xbe, + 0xd7, 0xc0, 0x7e, 0x49, 0x26, 0xa5, 0x57, 0x97, 0xa5, 0x73, 0x33, 0x89, 0xea, 0x5f, 0x93, 0xd8, + 0xb8, 0x9d, 0xc4, 0x79, 0x96, 0x44, 0x4d, 0x25, 0xf1, 0xa2, 0xfc, 0x96, 0x92, 0x3d, 0xd3, 0x9c, + 0x44, 0x8f, 0x4a, 0xbe, 0xca, 0x32, 0xba, 0x04, 0x66, 0x24, 0x30, 0x9f, 0x69, 0xcf, 0x2d, 0xe5, + 0x79, 0x7c, 0x77, 0xcf, 0x89, 0xc0, 0xbc, 0xe8, 0x0b, 0xa2, 0x0c, 0x80, 0x01, 0xd8, 0x8f, 0x31, + 0xcf, 0x24, 0x33, 0x21, 0x91, 0x8c, 0x84, 0xb5, 0xdd, 0x30, 0x9a, 0xf5, 0xd6, 0x9b, 0xbb, 0xcf, + 0x98, 0x16, 0x4c, 0x46, 0xca, 0x63, 0x08, 0xe3, 0x5b, 0x18, 0x7c, 0x0e, 0xfe, 0xc7, 0x14, 0xcd, + 0x7d, 0xbc, 0xb4, 0xcc, 0x86, 0xd1, 0x34, 0x5b, 0xf6, 0x7a, 0xc4, 0xfa, 0x23, 0x77, 0x3a, 0x8c, + 0xf9, 0x53, 0xe4, 0x47, 0x78, 0xb8, 0xa6, 0xda, 0xc7, 0xc0, 0x2c, 0xec, 0x0f, 0xf7, 0x40, 0xf5, + 0x0b, 0x5e, 0xe9, 0x9f, 0x32, 0x79, 0xc2, 0x03, 0x50, 0x8b, 0x13, 0x89, 0xfe, 0x70, 0xd3, 0xe2, + 0x55, 0xe5, 0xa5, 0x61, 0xbf, 0x05, 0xbb, 0x37, 0xce, 0xff, 0x17, 0xf9, 0xd1, 0x27, 0x00, 0x6f, + 0x5f, 0x06, 0x9f, 0x82, 0x27, 0xd3, 0xde, 0xb0, 0x7f, 0xd6, 0xef, 0x9e, 0x8c, 0xfb, 0x1f, 0x07, + 0xb3, 0xd1, 0xf8, 0x64, 0x3c, 0x19, 0xcd, 0x26, 0x83, 0xd1, 0x45, 0xaf, 0xdb, 0x3f, 0xeb, 0xf7, + 0x4e, 0xf7, 0xfe, 0x83, 0x75, 0x00, 0x26, 0x83, 0x94, 0xd6, 0x3b, 0xdd, 0x33, 0xe0, 0x0e, 0xd8, + 0xca, 0xaa, 0x4a, 0xe7, 0x87, 0x01, 0xac, 0x05, 0x0b, 0x4a, 0x03, 0xee, 0xdc, 0x2b, 0x26, 0x7c, + 0x91, 0x04, 0x73, 0x61, 0x5c, 0xbe, 0xd3, 0x54, 0x8f, 0xf9, 0x88, 0x7a, 0x0e, 0xe3, 0x9e, 0xeb, + 0x61, 0xaa, 0x62, 0x73, 0xd3, 0x16, 0x0a, 0x89, 0xf8, 0xf3, 0xff, 0xe4, 0x75, 0x5e, 0xfd, 0xac, + 0xd8, 0xef, 0x53, 0x83, 0xae, 0xcf, 0xa2, 0xa5, 0x73, 0x9e, 0x4f, 0x9c, 0xb6, 0x7f, 0xad, 0x9b, + 0x57, 0xaa, 0x79, 0x95, 0x37, 0xaf, 0xa6, 0xed, 0xf9, 0xa6, 0x1a, 0xd2, 0xfe, 0x1d, 0x00, 0x00, + 0xff, 0xff, 0xf7, 0x1b, 0x09, 0x21, 0x28, 0x05, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/notification_service.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/notification_service.pb.go new file mode 100644 index 000000000..f3e8d1aaa --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/notification_service.pb.go @@ -0,0 +1,1319 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/monitoring/v3/notification_service.proto + +package monitoring + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + empty "github.com/golang/protobuf/ptypes/empty" + timestamp "github.com/golang/protobuf/ptypes/timestamp" + context "golang.org/x/net/context" + _ "google.golang.org/genproto/googleapis/api/annotations" + field_mask "google.golang.org/genproto/protobuf/field_mask" + grpc "google.golang.org/grpc" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// The `ListNotificationChannelDescriptors` request. +type ListNotificationChannelDescriptorsRequest struct { + // The REST resource name of the parent from which to retrieve + // the notification channel descriptors. The expected syntax is: + // + // projects/[PROJECT_ID] + // + // Note that this names the parent container in which to look for the + // descriptors; to retrieve a single descriptor by name, use the + // [GetNotificationChannelDescriptor][google.monitoring.v3.NotificationChannelService.GetNotificationChannelDescriptor] + // operation, instead. + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + // The maximum number of results to return in a single response. If + // not set to a positive number, a reasonable value will be chosen by the + // service. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // If non-empty, `page_token` must contain a value returned as the + // `next_page_token` in a previous response to request the next set + // of results. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListNotificationChannelDescriptorsRequest) Reset() { + *m = ListNotificationChannelDescriptorsRequest{} +} +func (m *ListNotificationChannelDescriptorsRequest) String() string { return proto.CompactTextString(m) } +func (*ListNotificationChannelDescriptorsRequest) ProtoMessage() {} +func (*ListNotificationChannelDescriptorsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7e2bcd7194b305fe, []int{0} +} + +func (m *ListNotificationChannelDescriptorsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListNotificationChannelDescriptorsRequest.Unmarshal(m, b) +} +func (m *ListNotificationChannelDescriptorsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListNotificationChannelDescriptorsRequest.Marshal(b, m, deterministic) +} +func (m *ListNotificationChannelDescriptorsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListNotificationChannelDescriptorsRequest.Merge(m, src) +} +func (m *ListNotificationChannelDescriptorsRequest) XXX_Size() int { + return xxx_messageInfo_ListNotificationChannelDescriptorsRequest.Size(m) +} +func (m *ListNotificationChannelDescriptorsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListNotificationChannelDescriptorsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListNotificationChannelDescriptorsRequest proto.InternalMessageInfo + +func (m *ListNotificationChannelDescriptorsRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ListNotificationChannelDescriptorsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListNotificationChannelDescriptorsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// The `ListNotificationChannelDescriptors` response. +type ListNotificationChannelDescriptorsResponse struct { + // The monitored resource descriptors supported for the specified + // project, optionally filtered. + ChannelDescriptors []*NotificationChannelDescriptor `protobuf:"bytes,1,rep,name=channel_descriptors,json=channelDescriptors,proto3" json:"channel_descriptors,omitempty"` + // If not empty, indicates that there may be more results that match + // the request. Use the value in the `page_token` field in a + // subsequent request to fetch the next set of results. If empty, + // all results have been returned. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListNotificationChannelDescriptorsResponse) Reset() { + *m = ListNotificationChannelDescriptorsResponse{} +} +func (m *ListNotificationChannelDescriptorsResponse) String() string { + return proto.CompactTextString(m) +} +func (*ListNotificationChannelDescriptorsResponse) ProtoMessage() {} +func (*ListNotificationChannelDescriptorsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_7e2bcd7194b305fe, []int{1} +} + +func (m *ListNotificationChannelDescriptorsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListNotificationChannelDescriptorsResponse.Unmarshal(m, b) +} +func (m *ListNotificationChannelDescriptorsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListNotificationChannelDescriptorsResponse.Marshal(b, m, deterministic) +} +func (m *ListNotificationChannelDescriptorsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListNotificationChannelDescriptorsResponse.Merge(m, src) +} +func (m *ListNotificationChannelDescriptorsResponse) XXX_Size() int { + return xxx_messageInfo_ListNotificationChannelDescriptorsResponse.Size(m) +} +func (m *ListNotificationChannelDescriptorsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListNotificationChannelDescriptorsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListNotificationChannelDescriptorsResponse proto.InternalMessageInfo + +func (m *ListNotificationChannelDescriptorsResponse) GetChannelDescriptors() []*NotificationChannelDescriptor { + if m != nil { + return m.ChannelDescriptors + } + return nil +} + +func (m *ListNotificationChannelDescriptorsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// The `GetNotificationChannelDescriptor` response. +type GetNotificationChannelDescriptorRequest struct { + // The channel type for which to execute the request. The format is + // `projects/[PROJECT_ID]/notificationChannelDescriptors/{channel_type}`. + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetNotificationChannelDescriptorRequest) Reset() { + *m = GetNotificationChannelDescriptorRequest{} +} +func (m *GetNotificationChannelDescriptorRequest) String() string { return proto.CompactTextString(m) } +func (*GetNotificationChannelDescriptorRequest) ProtoMessage() {} +func (*GetNotificationChannelDescriptorRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7e2bcd7194b305fe, []int{2} +} + +func (m *GetNotificationChannelDescriptorRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetNotificationChannelDescriptorRequest.Unmarshal(m, b) +} +func (m *GetNotificationChannelDescriptorRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetNotificationChannelDescriptorRequest.Marshal(b, m, deterministic) +} +func (m *GetNotificationChannelDescriptorRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetNotificationChannelDescriptorRequest.Merge(m, src) +} +func (m *GetNotificationChannelDescriptorRequest) XXX_Size() int { + return xxx_messageInfo_GetNotificationChannelDescriptorRequest.Size(m) +} +func (m *GetNotificationChannelDescriptorRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetNotificationChannelDescriptorRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetNotificationChannelDescriptorRequest proto.InternalMessageInfo + +func (m *GetNotificationChannelDescriptorRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// The `CreateNotificationChannel` request. +type CreateNotificationChannelRequest struct { + // The project on which to execute the request. The format is: + // + // projects/[PROJECT_ID] + // + // Note that this names the container into which the channel will be + // written. This does not name the newly created channel. The resulting + // channel's name will have a normalized version of this field as a prefix, + // but will add `/notificationChannels/[CHANNEL_ID]` to identify the channel. + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + // The definition of the `NotificationChannel` to create. + NotificationChannel *NotificationChannel `protobuf:"bytes,2,opt,name=notification_channel,json=notificationChannel,proto3" json:"notification_channel,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateNotificationChannelRequest) Reset() { *m = CreateNotificationChannelRequest{} } +func (m *CreateNotificationChannelRequest) String() string { return proto.CompactTextString(m) } +func (*CreateNotificationChannelRequest) ProtoMessage() {} +func (*CreateNotificationChannelRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7e2bcd7194b305fe, []int{3} +} + +func (m *CreateNotificationChannelRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateNotificationChannelRequest.Unmarshal(m, b) +} +func (m *CreateNotificationChannelRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateNotificationChannelRequest.Marshal(b, m, deterministic) +} +func (m *CreateNotificationChannelRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateNotificationChannelRequest.Merge(m, src) +} +func (m *CreateNotificationChannelRequest) XXX_Size() int { + return xxx_messageInfo_CreateNotificationChannelRequest.Size(m) +} +func (m *CreateNotificationChannelRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateNotificationChannelRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateNotificationChannelRequest proto.InternalMessageInfo + +func (m *CreateNotificationChannelRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *CreateNotificationChannelRequest) GetNotificationChannel() *NotificationChannel { + if m != nil { + return m.NotificationChannel + } + return nil +} + +// The `ListNotificationChannels` request. +type ListNotificationChannelsRequest struct { + // The project on which to execute the request. The format is + // `projects/[PROJECT_ID]`. That is, this names the container + // in which to look for the notification channels; it does not name a + // specific channel. To query a specific channel by REST resource name, use + // the + // [`GetNotificationChannel`][google.monitoring.v3.NotificationChannelService.GetNotificationChannel] operation. + Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"` + // If provided, this field specifies the criteria that must be met by + // notification channels to be included in the response. + // + // For more details, see [sorting and + // filtering](/monitoring/api/v3/sorting-and-filtering). + Filter string `protobuf:"bytes,6,opt,name=filter,proto3" json:"filter,omitempty"` + // A comma-separated list of fields by which to sort the result. Supports + // the same set of fields as in `filter`. Entries can be prefixed with + // a minus sign to sort in descending rather than ascending order. + // + // For more details, see [sorting and + // filtering](/monitoring/api/v3/sorting-and-filtering). + OrderBy string `protobuf:"bytes,7,opt,name=order_by,json=orderBy,proto3" json:"order_by,omitempty"` + // The maximum number of results to return in a single response. If + // not set to a positive number, a reasonable value will be chosen by the + // service. + PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // If non-empty, `page_token` must contain a value returned as the + // `next_page_token` in a previous response to request the next set + // of results. + PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListNotificationChannelsRequest) Reset() { *m = ListNotificationChannelsRequest{} } +func (m *ListNotificationChannelsRequest) String() string { return proto.CompactTextString(m) } +func (*ListNotificationChannelsRequest) ProtoMessage() {} +func (*ListNotificationChannelsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7e2bcd7194b305fe, []int{4} +} + +func (m *ListNotificationChannelsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListNotificationChannelsRequest.Unmarshal(m, b) +} +func (m *ListNotificationChannelsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListNotificationChannelsRequest.Marshal(b, m, deterministic) +} +func (m *ListNotificationChannelsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListNotificationChannelsRequest.Merge(m, src) +} +func (m *ListNotificationChannelsRequest) XXX_Size() int { + return xxx_messageInfo_ListNotificationChannelsRequest.Size(m) +} +func (m *ListNotificationChannelsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListNotificationChannelsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListNotificationChannelsRequest proto.InternalMessageInfo + +func (m *ListNotificationChannelsRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ListNotificationChannelsRequest) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +func (m *ListNotificationChannelsRequest) GetOrderBy() string { + if m != nil { + return m.OrderBy + } + return "" +} + +func (m *ListNotificationChannelsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListNotificationChannelsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// The `ListNotificationChannels` response. +type ListNotificationChannelsResponse struct { + // The notification channels defined for the specified project. + NotificationChannels []*NotificationChannel `protobuf:"bytes,3,rep,name=notification_channels,json=notificationChannels,proto3" json:"notification_channels,omitempty"` + // If not empty, indicates that there may be more results that match + // the request. Use the value in the `page_token` field in a + // subsequent request to fetch the next set of results. If empty, + // all results have been returned. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListNotificationChannelsResponse) Reset() { *m = ListNotificationChannelsResponse{} } +func (m *ListNotificationChannelsResponse) String() string { return proto.CompactTextString(m) } +func (*ListNotificationChannelsResponse) ProtoMessage() {} +func (*ListNotificationChannelsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_7e2bcd7194b305fe, []int{5} +} + +func (m *ListNotificationChannelsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListNotificationChannelsResponse.Unmarshal(m, b) +} +func (m *ListNotificationChannelsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListNotificationChannelsResponse.Marshal(b, m, deterministic) +} +func (m *ListNotificationChannelsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListNotificationChannelsResponse.Merge(m, src) +} +func (m *ListNotificationChannelsResponse) XXX_Size() int { + return xxx_messageInfo_ListNotificationChannelsResponse.Size(m) +} +func (m *ListNotificationChannelsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListNotificationChannelsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListNotificationChannelsResponse proto.InternalMessageInfo + +func (m *ListNotificationChannelsResponse) GetNotificationChannels() []*NotificationChannel { + if m != nil { + return m.NotificationChannels + } + return nil +} + +func (m *ListNotificationChannelsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// The `GetNotificationChannel` request. +type GetNotificationChannelRequest struct { + // The channel for which to execute the request. The format is + // `projects/[PROJECT_ID]/notificationChannels/[CHANNEL_ID]`. + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetNotificationChannelRequest) Reset() { *m = GetNotificationChannelRequest{} } +func (m *GetNotificationChannelRequest) String() string { return proto.CompactTextString(m) } +func (*GetNotificationChannelRequest) ProtoMessage() {} +func (*GetNotificationChannelRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7e2bcd7194b305fe, []int{6} +} + +func (m *GetNotificationChannelRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetNotificationChannelRequest.Unmarshal(m, b) +} +func (m *GetNotificationChannelRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetNotificationChannelRequest.Marshal(b, m, deterministic) +} +func (m *GetNotificationChannelRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetNotificationChannelRequest.Merge(m, src) +} +func (m *GetNotificationChannelRequest) XXX_Size() int { + return xxx_messageInfo_GetNotificationChannelRequest.Size(m) +} +func (m *GetNotificationChannelRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetNotificationChannelRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetNotificationChannelRequest proto.InternalMessageInfo + +func (m *GetNotificationChannelRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// The `UpdateNotificationChannel` request. +type UpdateNotificationChannelRequest struct { + // The fields to update. + UpdateMask *field_mask.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` + // A description of the changes to be applied to the specified + // notification channel. The description must provide a definition for + // fields to be updated; the names of these fields should also be + // included in the `update_mask`. + NotificationChannel *NotificationChannel `protobuf:"bytes,3,opt,name=notification_channel,json=notificationChannel,proto3" json:"notification_channel,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateNotificationChannelRequest) Reset() { *m = UpdateNotificationChannelRequest{} } +func (m *UpdateNotificationChannelRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateNotificationChannelRequest) ProtoMessage() {} +func (*UpdateNotificationChannelRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7e2bcd7194b305fe, []int{7} +} + +func (m *UpdateNotificationChannelRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateNotificationChannelRequest.Unmarshal(m, b) +} +func (m *UpdateNotificationChannelRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateNotificationChannelRequest.Marshal(b, m, deterministic) +} +func (m *UpdateNotificationChannelRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateNotificationChannelRequest.Merge(m, src) +} +func (m *UpdateNotificationChannelRequest) XXX_Size() int { + return xxx_messageInfo_UpdateNotificationChannelRequest.Size(m) +} +func (m *UpdateNotificationChannelRequest) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateNotificationChannelRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateNotificationChannelRequest proto.InternalMessageInfo + +func (m *UpdateNotificationChannelRequest) GetUpdateMask() *field_mask.FieldMask { + if m != nil { + return m.UpdateMask + } + return nil +} + +func (m *UpdateNotificationChannelRequest) GetNotificationChannel() *NotificationChannel { + if m != nil { + return m.NotificationChannel + } + return nil +} + +// The `DeleteNotificationChannel` request. +type DeleteNotificationChannelRequest struct { + // The channel for which to execute the request. The format is + // `projects/[PROJECT_ID]/notificationChannels/[CHANNEL_ID]`. + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + // If true, the notification channel will be deleted regardless of its + // use in alert policies (the policies will be updated to remove the + // channel). If false, channels that are still referenced by an existing + // alerting policy will fail to be deleted in a delete operation. + Force bool `protobuf:"varint,5,opt,name=force,proto3" json:"force,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteNotificationChannelRequest) Reset() { *m = DeleteNotificationChannelRequest{} } +func (m *DeleteNotificationChannelRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteNotificationChannelRequest) ProtoMessage() {} +func (*DeleteNotificationChannelRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7e2bcd7194b305fe, []int{8} +} + +func (m *DeleteNotificationChannelRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteNotificationChannelRequest.Unmarshal(m, b) +} +func (m *DeleteNotificationChannelRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteNotificationChannelRequest.Marshal(b, m, deterministic) +} +func (m *DeleteNotificationChannelRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteNotificationChannelRequest.Merge(m, src) +} +func (m *DeleteNotificationChannelRequest) XXX_Size() int { + return xxx_messageInfo_DeleteNotificationChannelRequest.Size(m) +} +func (m *DeleteNotificationChannelRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteNotificationChannelRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteNotificationChannelRequest proto.InternalMessageInfo + +func (m *DeleteNotificationChannelRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *DeleteNotificationChannelRequest) GetForce() bool { + if m != nil { + return m.Force + } + return false +} + +// The `SendNotificationChannelVerificationCode` request. +type SendNotificationChannelVerificationCodeRequest struct { + // The notification channel to which to send a verification code. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SendNotificationChannelVerificationCodeRequest) Reset() { + *m = SendNotificationChannelVerificationCodeRequest{} +} +func (m *SendNotificationChannelVerificationCodeRequest) String() string { + return proto.CompactTextString(m) +} +func (*SendNotificationChannelVerificationCodeRequest) ProtoMessage() {} +func (*SendNotificationChannelVerificationCodeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7e2bcd7194b305fe, []int{9} +} + +func (m *SendNotificationChannelVerificationCodeRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SendNotificationChannelVerificationCodeRequest.Unmarshal(m, b) +} +func (m *SendNotificationChannelVerificationCodeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SendNotificationChannelVerificationCodeRequest.Marshal(b, m, deterministic) +} +func (m *SendNotificationChannelVerificationCodeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SendNotificationChannelVerificationCodeRequest.Merge(m, src) +} +func (m *SendNotificationChannelVerificationCodeRequest) XXX_Size() int { + return xxx_messageInfo_SendNotificationChannelVerificationCodeRequest.Size(m) +} +func (m *SendNotificationChannelVerificationCodeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SendNotificationChannelVerificationCodeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SendNotificationChannelVerificationCodeRequest proto.InternalMessageInfo + +func (m *SendNotificationChannelVerificationCodeRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// The `GetNotificationChannelVerificationCode` request. +type GetNotificationChannelVerificationCodeRequest struct { + // The notification channel for which a verification code is to be generated + // and retrieved. This must name a channel that is already verified; if + // the specified channel is not verified, the request will fail. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The desired expiration time. If specified, the API will guarantee that + // the returned code will not be valid after the specified timestamp; + // however, the API cannot guarantee that the returned code will be + // valid for at least as long as the requested time (the API puts an upper + // bound on the amount of time for which a code may be valid). If omitted, + // a default expiration will be used, which may be less than the max + // permissible expiration (so specifying an expiration may extend the + // code's lifetime over omitting an expiration, even though the API does + // impose an upper limit on the maximum expiration that is permitted). + ExpireTime *timestamp.Timestamp `protobuf:"bytes,2,opt,name=expire_time,json=expireTime,proto3" json:"expire_time,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetNotificationChannelVerificationCodeRequest) Reset() { + *m = GetNotificationChannelVerificationCodeRequest{} +} +func (m *GetNotificationChannelVerificationCodeRequest) String() string { + return proto.CompactTextString(m) +} +func (*GetNotificationChannelVerificationCodeRequest) ProtoMessage() {} +func (*GetNotificationChannelVerificationCodeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7e2bcd7194b305fe, []int{10} +} + +func (m *GetNotificationChannelVerificationCodeRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetNotificationChannelVerificationCodeRequest.Unmarshal(m, b) +} +func (m *GetNotificationChannelVerificationCodeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetNotificationChannelVerificationCodeRequest.Marshal(b, m, deterministic) +} +func (m *GetNotificationChannelVerificationCodeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetNotificationChannelVerificationCodeRequest.Merge(m, src) +} +func (m *GetNotificationChannelVerificationCodeRequest) XXX_Size() int { + return xxx_messageInfo_GetNotificationChannelVerificationCodeRequest.Size(m) +} +func (m *GetNotificationChannelVerificationCodeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetNotificationChannelVerificationCodeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetNotificationChannelVerificationCodeRequest proto.InternalMessageInfo + +func (m *GetNotificationChannelVerificationCodeRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *GetNotificationChannelVerificationCodeRequest) GetExpireTime() *timestamp.Timestamp { + if m != nil { + return m.ExpireTime + } + return nil +} + +// The `GetNotificationChannelVerificationCode` request. +type GetNotificationChannelVerificationCodeResponse struct { + // The verification code, which may be used to verify other channels + // that have an equivalent identity (i.e. other channels of the same + // type with the same fingerprint such as other email channels with + // the same email address or other sms channels with the same number). + Code string `protobuf:"bytes,1,opt,name=code,proto3" json:"code,omitempty"` + // The expiration time associated with the code that was returned. If + // an expiration was provided in the request, this is the minimum of the + // requested expiration in the request and the max permitted expiration. + ExpireTime *timestamp.Timestamp `protobuf:"bytes,2,opt,name=expire_time,json=expireTime,proto3" json:"expire_time,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetNotificationChannelVerificationCodeResponse) Reset() { + *m = GetNotificationChannelVerificationCodeResponse{} +} +func (m *GetNotificationChannelVerificationCodeResponse) String() string { + return proto.CompactTextString(m) +} +func (*GetNotificationChannelVerificationCodeResponse) ProtoMessage() {} +func (*GetNotificationChannelVerificationCodeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_7e2bcd7194b305fe, []int{11} +} + +func (m *GetNotificationChannelVerificationCodeResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetNotificationChannelVerificationCodeResponse.Unmarshal(m, b) +} +func (m *GetNotificationChannelVerificationCodeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetNotificationChannelVerificationCodeResponse.Marshal(b, m, deterministic) +} +func (m *GetNotificationChannelVerificationCodeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetNotificationChannelVerificationCodeResponse.Merge(m, src) +} +func (m *GetNotificationChannelVerificationCodeResponse) XXX_Size() int { + return xxx_messageInfo_GetNotificationChannelVerificationCodeResponse.Size(m) +} +func (m *GetNotificationChannelVerificationCodeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetNotificationChannelVerificationCodeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetNotificationChannelVerificationCodeResponse proto.InternalMessageInfo + +func (m *GetNotificationChannelVerificationCodeResponse) GetCode() string { + if m != nil { + return m.Code + } + return "" +} + +func (m *GetNotificationChannelVerificationCodeResponse) GetExpireTime() *timestamp.Timestamp { + if m != nil { + return m.ExpireTime + } + return nil +} + +// The `VerifyNotificationChannel` request. +type VerifyNotificationChannelRequest struct { + // The notification channel to verify. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The verification code that was delivered to the channel as + // a result of invoking the `SendNotificationChannelVerificationCode` API + // method or that was retrieved from a verified channel via + // `GetNotificationChannelVerificationCode`. For example, one might have + // "G-123456" or "TKNZGhhd2EyN3I1MnRnMjRv" (in general, one is only + // guaranteed that the code is valid UTF-8; one should not + // make any assumptions regarding the structure or format of the code). + Code string `protobuf:"bytes,2,opt,name=code,proto3" json:"code,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VerifyNotificationChannelRequest) Reset() { *m = VerifyNotificationChannelRequest{} } +func (m *VerifyNotificationChannelRequest) String() string { return proto.CompactTextString(m) } +func (*VerifyNotificationChannelRequest) ProtoMessage() {} +func (*VerifyNotificationChannelRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7e2bcd7194b305fe, []int{12} +} + +func (m *VerifyNotificationChannelRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VerifyNotificationChannelRequest.Unmarshal(m, b) +} +func (m *VerifyNotificationChannelRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VerifyNotificationChannelRequest.Marshal(b, m, deterministic) +} +func (m *VerifyNotificationChannelRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_VerifyNotificationChannelRequest.Merge(m, src) +} +func (m *VerifyNotificationChannelRequest) XXX_Size() int { + return xxx_messageInfo_VerifyNotificationChannelRequest.Size(m) +} +func (m *VerifyNotificationChannelRequest) XXX_DiscardUnknown() { + xxx_messageInfo_VerifyNotificationChannelRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_VerifyNotificationChannelRequest proto.InternalMessageInfo + +func (m *VerifyNotificationChannelRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *VerifyNotificationChannelRequest) GetCode() string { + if m != nil { + return m.Code + } + return "" +} + +func init() { + proto.RegisterType((*ListNotificationChannelDescriptorsRequest)(nil), "google.monitoring.v3.ListNotificationChannelDescriptorsRequest") + proto.RegisterType((*ListNotificationChannelDescriptorsResponse)(nil), "google.monitoring.v3.ListNotificationChannelDescriptorsResponse") + proto.RegisterType((*GetNotificationChannelDescriptorRequest)(nil), "google.monitoring.v3.GetNotificationChannelDescriptorRequest") + proto.RegisterType((*CreateNotificationChannelRequest)(nil), "google.monitoring.v3.CreateNotificationChannelRequest") + proto.RegisterType((*ListNotificationChannelsRequest)(nil), "google.monitoring.v3.ListNotificationChannelsRequest") + proto.RegisterType((*ListNotificationChannelsResponse)(nil), "google.monitoring.v3.ListNotificationChannelsResponse") + proto.RegisterType((*GetNotificationChannelRequest)(nil), "google.monitoring.v3.GetNotificationChannelRequest") + proto.RegisterType((*UpdateNotificationChannelRequest)(nil), "google.monitoring.v3.UpdateNotificationChannelRequest") + proto.RegisterType((*DeleteNotificationChannelRequest)(nil), "google.monitoring.v3.DeleteNotificationChannelRequest") + proto.RegisterType((*SendNotificationChannelVerificationCodeRequest)(nil), "google.monitoring.v3.SendNotificationChannelVerificationCodeRequest") + proto.RegisterType((*GetNotificationChannelVerificationCodeRequest)(nil), "google.monitoring.v3.GetNotificationChannelVerificationCodeRequest") + proto.RegisterType((*GetNotificationChannelVerificationCodeResponse)(nil), "google.monitoring.v3.GetNotificationChannelVerificationCodeResponse") + proto.RegisterType((*VerifyNotificationChannelRequest)(nil), "google.monitoring.v3.VerifyNotificationChannelRequest") +} + +func init() { + proto.RegisterFile("google/monitoring/v3/notification_service.proto", fileDescriptor_7e2bcd7194b305fe) +} + +var fileDescriptor_7e2bcd7194b305fe = []byte{ + // 1011 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x57, 0x41, 0x6f, 0xdc, 0x44, + 0x14, 0xd6, 0xec, 0x26, 0x69, 0xfa, 0x22, 0x04, 0x9a, 0x86, 0xc8, 0xbb, 0xa5, 0xaa, 0xe5, 0x43, + 0x93, 0xae, 0x8a, 0x2d, 0xad, 0x4b, 0x84, 0x52, 0x52, 0xda, 0x64, 0xdb, 0x22, 0x48, 0x51, 0xb4, + 0x29, 0x91, 0x40, 0x11, 0x2b, 0xc7, 0x7e, 0x6b, 0x4c, 0x76, 0x67, 0x8c, 0x3d, 0x89, 0x9a, 0x56, + 0x95, 0x0a, 0x7f, 0x01, 0xfe, 0x00, 0x12, 0xa7, 0x1e, 0x10, 0x67, 0x50, 0x39, 0x23, 0xae, 0x08, + 0xae, 0x5c, 0xe0, 0x7f, 0x20, 0x8f, 0xbd, 0xd9, 0xcd, 0x66, 0xbc, 0x6b, 0x37, 0xdc, 0x3c, 0xf3, + 0xde, 0xbc, 0xf7, 0xbd, 0xef, 0x7d, 0x9e, 0x67, 0x83, 0xe5, 0x73, 0xee, 0xf7, 0xd0, 0xea, 0x73, + 0x16, 0x08, 0x1e, 0x05, 0xcc, 0xb7, 0x8e, 0x6c, 0x8b, 0x71, 0x11, 0x74, 0x03, 0xd7, 0x11, 0x01, + 0x67, 0x9d, 0x18, 0xa3, 0xa3, 0xc0, 0x45, 0x33, 0x8c, 0xb8, 0xe0, 0x74, 0x31, 0x3d, 0x60, 0x0e, + 0x0f, 0x98, 0x47, 0x76, 0xfd, 0xad, 0x2c, 0x8c, 0x13, 0x06, 0x96, 0xc3, 0x18, 0x17, 0xf2, 0x68, + 0x9c, 0x9e, 0xa9, 0x2f, 0x4f, 0x4d, 0x92, 0x39, 0x5e, 0xce, 0x1c, 0xe5, 0x6a, 0xff, 0xb0, 0x6b, + 0x61, 0x3f, 0x14, 0xc7, 0x99, 0x51, 0x1f, 0x37, 0x76, 0x03, 0xec, 0x79, 0x9d, 0xbe, 0x13, 0x1f, + 0x64, 0x1e, 0x57, 0xc7, 0x3d, 0x44, 0xd0, 0xc7, 0x58, 0x38, 0xfd, 0x30, 0x75, 0x30, 0x9e, 0xc2, + 0xf5, 0xad, 0x20, 0x16, 0x1f, 0x8f, 0x64, 0xde, 0xfc, 0xc2, 0x61, 0x0c, 0x7b, 0x2d, 0x8c, 0xdd, + 0x28, 0x08, 0x05, 0x8f, 0xe2, 0x36, 0x7e, 0x75, 0x88, 0xb1, 0xa0, 0x14, 0x66, 0x98, 0xd3, 0x47, + 0x6d, 0x46, 0x27, 0x2b, 0x17, 0xdb, 0xf2, 0x99, 0x5e, 0x86, 0x8b, 0xa1, 0xe3, 0x63, 0x27, 0x0e, + 0x9e, 0xa0, 0x56, 0xd1, 0xc9, 0xca, 0x6c, 0x7b, 0x3e, 0xd9, 0xd8, 0x09, 0x9e, 0x20, 0xbd, 0x02, + 0x20, 0x8d, 0x82, 0x1f, 0x20, 0xd3, 0xaa, 0xf2, 0x98, 0x74, 0x7f, 0x94, 0x6c, 0x18, 0x3f, 0x13, + 0x68, 0x14, 0xc9, 0x1e, 0x87, 0x9c, 0xc5, 0x48, 0x3d, 0xb8, 0xe4, 0xa6, 0xd6, 0x8e, 0x37, 0x34, + 0x6b, 0x44, 0xaf, 0xae, 0x2c, 0x34, 0x6d, 0x53, 0xd5, 0x06, 0x73, 0x62, 0xe8, 0x36, 0x75, 0xcf, + 0x64, 0xa3, 0xd7, 0xe0, 0x75, 0x86, 0x8f, 0x45, 0x67, 0x04, 0x78, 0x45, 0x02, 0x7f, 0x2d, 0xd9, + 0xde, 0x3e, 0x01, 0xbf, 0x0e, 0xcb, 0x0f, 0x70, 0x32, 0xf4, 0x71, 0xde, 0xaa, 0x43, 0xde, 0x8c, + 0xef, 0x08, 0xe8, 0x9b, 0x11, 0x3a, 0x02, 0x15, 0x21, 0x26, 0x1c, 0xa4, 0x7b, 0xb0, 0x78, 0x4a, + 0x8c, 0x59, 0x09, 0x12, 0xe4, 0x42, 0xf3, 0x7a, 0x61, 0x1a, 0xda, 0x97, 0xd8, 0xd9, 0x4d, 0xe3, + 0x07, 0x02, 0x57, 0x73, 0x5a, 0x72, 0x46, 0x06, 0xb3, 0x23, 0xa8, 0x96, 0x60, 0xae, 0x1b, 0xf4, + 0x04, 0x46, 0xda, 0x9c, 0xdc, 0xcd, 0x56, 0xb4, 0x06, 0xf3, 0x3c, 0xf2, 0x30, 0xea, 0xec, 0x1f, + 0x6b, 0x17, 0xa4, 0xe5, 0x82, 0x5c, 0x6f, 0x1c, 0x9f, 0x56, 0x4e, 0x75, 0xa2, 0x72, 0x66, 0xc6, + 0x95, 0xf3, 0x82, 0x80, 0x9e, 0x0f, 0x33, 0xd3, 0xcb, 0xe7, 0xf0, 0xa6, 0x8a, 0xa9, 0x58, 0xab, + 0x4a, 0xc5, 0x94, 0xa0, 0x6a, 0x51, 0x41, 0x55, 0x71, 0xa5, 0xd8, 0x70, 0x45, 0xad, 0x94, 0x49, + 0xfa, 0x78, 0x49, 0x40, 0xff, 0x24, 0xf4, 0x26, 0xeb, 0xe3, 0x16, 0x2c, 0x1c, 0x4a, 0x1f, 0xf9, + 0xce, 0x67, 0x12, 0xa8, 0x0f, 0xea, 0x1a, 0xbc, 0xf4, 0xe6, 0xfd, 0xe4, 0x5a, 0x78, 0xe8, 0xc4, + 0x07, 0x6d, 0x48, 0xdd, 0x93, 0xe7, 0x5c, 0x21, 0x55, 0xff, 0x17, 0x21, 0x6d, 0x81, 0xde, 0xc2, + 0x1e, 0x96, 0x96, 0xf7, 0x22, 0xcc, 0x76, 0x79, 0xe4, 0xa6, 0xea, 0x9a, 0x6f, 0xa7, 0x0b, 0xa3, + 0x05, 0xe6, 0x0e, 0x32, 0x4f, 0x11, 0x6b, 0x17, 0xa3, 0xe1, 0x16, 0xf7, 0x70, 0x3c, 0x36, 0x19, + 0xe1, 0xf4, 0x39, 0x81, 0xb7, 0xd5, 0x9d, 0x28, 0x11, 0x25, 0x21, 0x1d, 0x1f, 0x87, 0x41, 0x84, + 0x9d, 0xe4, 0x32, 0xcd, 0x25, 0xfd, 0xd1, 0xe0, 0xa6, 0x6d, 0x43, 0xea, 0x9e, 0x6c, 0x18, 0x5f, + 0x13, 0x30, 0x8b, 0x42, 0xc8, 0x64, 0x4c, 0x61, 0xc6, 0xe5, 0xde, 0x09, 0x86, 0xe4, 0xf9, 0x7c, + 0x18, 0x3e, 0x04, 0x5d, 0x26, 0x3b, 0x2e, 0xd0, 0x9a, 0xd1, 0xc2, 0x07, 0x40, 0x2a, 0x43, 0x20, + 0xcd, 0x5f, 0xde, 0x80, 0xba, 0x22, 0xcc, 0x4e, 0x3a, 0x21, 0xe9, 0xbf, 0x04, 0x8c, 0xe9, 0x37, + 0x3c, 0x7d, 0x5f, 0x2d, 0xb6, 0xc2, 0x93, 0xa9, 0x7e, 0xe7, 0xd5, 0x03, 0xa4, 0x2c, 0x1b, 0xef, + 0x7d, 0xf3, 0xc7, 0x3f, 0xdf, 0x56, 0x56, 0xe9, 0xcd, 0x64, 0x10, 0x3f, 0x4d, 0xea, 0x5d, 0x0f, + 0x23, 0xfe, 0x25, 0xba, 0x22, 0xb6, 0x1a, 0xcf, 0x2c, 0x36, 0xb9, 0x80, 0xbf, 0x08, 0xe8, 0xd3, + 0xa6, 0x01, 0x5d, 0x57, 0x83, 0x2c, 0x38, 0x45, 0xea, 0xaf, 0x32, 0xe1, 0x8c, 0xdb, 0xb2, 0xac, + 0x77, 0xe9, 0xaa, 0xaa, 0xac, 0x29, 0x55, 0x59, 0x8d, 0x67, 0xf4, 0x25, 0x01, 0x2d, 0xef, 0xa2, + 0xa5, 0xef, 0x94, 0x62, 0xfd, 0xa4, 0x59, 0xab, 0x65, 0x8f, 0x65, 0x2d, 0x6a, 0xca, 0x5a, 0x6e, + 0xd0, 0x46, 0xe1, 0x16, 0xc5, 0xf4, 0x47, 0x02, 0x4b, 0x6a, 0x82, 0xa9, 0x5d, 0xa6, 0x1d, 0x03, + 0xec, 0xc5, 0xaf, 0x45, 0xe3, 0xa6, 0x84, 0x6b, 0xd2, 0x1b, 0x45, 0xa9, 0x97, 0x84, 0xff, 0x46, + 0xa0, 0x96, 0xfb, 0x5d, 0x40, 0x73, 0xa8, 0x9b, 0xf6, 0x21, 0x51, 0x06, 0xf6, 0x07, 0x12, 0xf6, + 0x86, 0x51, 0x82, 0xe5, 0x35, 0xe5, 0x20, 0xa1, 0x7f, 0x13, 0xa8, 0xe5, 0x8e, 0xb0, 0xbc, 0x52, + 0xa6, 0xcd, 0xbc, 0x32, 0xa5, 0x74, 0x64, 0x29, 0x9f, 0x36, 0xef, 0xa6, 0xa5, 0x28, 0x30, 0x9a, + 0x05, 0xdb, 0x92, 0x53, 0xe1, 0xf7, 0x04, 0x6a, 0xb9, 0x53, 0x2e, 0xaf, 0xc2, 0x69, 0x63, 0xb1, + 0xbe, 0x74, 0xe6, 0x1e, 0xbf, 0x97, 0x7c, 0xf4, 0x0f, 0x04, 0xd5, 0x28, 0x27, 0xa8, 0x3f, 0x09, + 0x2c, 0x17, 0x9c, 0x9d, 0xb4, 0xa5, 0x46, 0x5c, 0x6e, 0xf4, 0xe6, 0xe2, 0xdf, 0x92, 0xf8, 0xef, + 0x1b, 0x77, 0xcb, 0xe0, 0x5f, 0x8b, 0x91, 0x79, 0xe3, 0x99, 0xd6, 0x48, 0x83, 0x3e, 0xaf, 0xc0, + 0xb5, 0x62, 0x93, 0x94, 0x6e, 0x96, 0x79, 0xd3, 0xf3, 0xaa, 0x6a, 0x9d, 0x2f, 0x48, 0x76, 0x87, + 0x7d, 0x24, 0x39, 0xb8, 0x67, 0xdc, 0x29, 0xc5, 0x81, 0x8f, 0x42, 0x45, 0xc1, 0xaf, 0x04, 0x6a, + 0xb9, 0x93, 0x3c, 0x4f, 0x7e, 0xd3, 0x46, 0x7f, 0x99, 0x17, 0x2c, 0x9b, 0x2e, 0x86, 0x5d, 0xaa, + 0x9a, 0x23, 0x89, 0x60, 0x8d, 0x34, 0x36, 0x7e, 0x22, 0xa0, 0xb9, 0xbc, 0xaf, 0x4c, 0xb8, 0xa1, + 0x8d, 0x66, 0xcc, 0x3e, 0x28, 0xb6, 0x13, 0x45, 0x6d, 0x93, 0xcf, 0x6e, 0x67, 0x27, 0x7c, 0xde, + 0x73, 0x98, 0x6f, 0xf2, 0xc8, 0xb7, 0x7c, 0x64, 0x52, 0x6f, 0xd9, 0xff, 0xbb, 0x13, 0x06, 0xf1, + 0xe9, 0xdf, 0xeb, 0x5b, 0xc3, 0xd5, 0x8b, 0x4a, 0xfd, 0x41, 0x1a, 0x60, 0xb3, 0xc7, 0x0f, 0x3d, + 0xf3, 0xe1, 0x30, 0xf1, 0xae, 0xfd, 0xfb, 0xc0, 0xb8, 0x27, 0x8d, 0x7b, 0x43, 0xe3, 0xde, 0xae, + 0xbd, 0x3f, 0x27, 0x93, 0xd8, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0x3b, 0xf3, 0x96, 0xf5, 0x27, + 0x10, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// NotificationChannelServiceClient is the client API for NotificationChannelService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type NotificationChannelServiceClient interface { + // Lists the descriptors for supported channel types. The use of descriptors + // makes it possible for new channel types to be dynamically added. + ListNotificationChannelDescriptors(ctx context.Context, in *ListNotificationChannelDescriptorsRequest, opts ...grpc.CallOption) (*ListNotificationChannelDescriptorsResponse, error) + // Gets a single channel descriptor. The descriptor indicates which fields + // are expected / permitted for a notification channel of the given type. + GetNotificationChannelDescriptor(ctx context.Context, in *GetNotificationChannelDescriptorRequest, opts ...grpc.CallOption) (*NotificationChannelDescriptor, error) + // Lists the notification channels that have been created for the project. + ListNotificationChannels(ctx context.Context, in *ListNotificationChannelsRequest, opts ...grpc.CallOption) (*ListNotificationChannelsResponse, error) + // Gets a single notification channel. The channel includes the relevant + // configuration details with which the channel was created. However, the + // response may truncate or omit passwords, API keys, or other private key + // matter and thus the response may not be 100% identical to the information + // that was supplied in the call to the create method. + GetNotificationChannel(ctx context.Context, in *GetNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) + // Creates a new notification channel, representing a single notification + // endpoint such as an email address, SMS number, or pagerduty service. + CreateNotificationChannel(ctx context.Context, in *CreateNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) + // Updates a notification channel. Fields not specified in the field mask + // remain unchanged. + UpdateNotificationChannel(ctx context.Context, in *UpdateNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) + // Deletes a notification channel. + DeleteNotificationChannel(ctx context.Context, in *DeleteNotificationChannelRequest, opts ...grpc.CallOption) (*empty.Empty, error) + // Causes a verification code to be delivered to the channel. The code + // can then be supplied in `VerifyNotificationChannel` to verify the channel. + SendNotificationChannelVerificationCode(ctx context.Context, in *SendNotificationChannelVerificationCodeRequest, opts ...grpc.CallOption) (*empty.Empty, error) + // Requests a verification code for an already verified channel that can then + // be used in a call to VerifyNotificationChannel() on a different channel + // with an equivalent identity in the same or in a different project. This + // makes it possible to copy a channel between projects without requiring + // manual reverification of the channel. If the channel is not in the + // verified state, this method will fail (in other words, this may only be + // used if the SendNotificationChannelVerificationCode and + // VerifyNotificationChannel paths have already been used to put the given + // channel into the verified state). + // + // There is no guarantee that the verification codes returned by this method + // will be of a similar structure or form as the ones that are delivered + // to the channel via SendNotificationChannelVerificationCode; while + // VerifyNotificationChannel() will recognize both the codes delivered via + // SendNotificationChannelVerificationCode() and returned from + // GetNotificationChannelVerificationCode(), it is typically the case that + // the verification codes delivered via + // SendNotificationChannelVerificationCode() will be shorter and also + // have a shorter expiration (e.g. codes such as "G-123456") whereas + // GetVerificationCode() will typically return a much longer, websafe base + // 64 encoded string that has a longer expiration time. + GetNotificationChannelVerificationCode(ctx context.Context, in *GetNotificationChannelVerificationCodeRequest, opts ...grpc.CallOption) (*GetNotificationChannelVerificationCodeResponse, error) + // Verifies a `NotificationChannel` by proving receipt of the code + // delivered to the channel as a result of calling + // `SendNotificationChannelVerificationCode`. + VerifyNotificationChannel(ctx context.Context, in *VerifyNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) +} + +type notificationChannelServiceClient struct { + cc *grpc.ClientConn +} + +func NewNotificationChannelServiceClient(cc *grpc.ClientConn) NotificationChannelServiceClient { + return ¬ificationChannelServiceClient{cc} +} + +func (c *notificationChannelServiceClient) ListNotificationChannelDescriptors(ctx context.Context, in *ListNotificationChannelDescriptorsRequest, opts ...grpc.CallOption) (*ListNotificationChannelDescriptorsResponse, error) { + out := new(ListNotificationChannelDescriptorsResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/ListNotificationChannelDescriptors", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *notificationChannelServiceClient) GetNotificationChannelDescriptor(ctx context.Context, in *GetNotificationChannelDescriptorRequest, opts ...grpc.CallOption) (*NotificationChannelDescriptor, error) { + out := new(NotificationChannelDescriptor) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/GetNotificationChannelDescriptor", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *notificationChannelServiceClient) ListNotificationChannels(ctx context.Context, in *ListNotificationChannelsRequest, opts ...grpc.CallOption) (*ListNotificationChannelsResponse, error) { + out := new(ListNotificationChannelsResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/ListNotificationChannels", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *notificationChannelServiceClient) GetNotificationChannel(ctx context.Context, in *GetNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) { + out := new(NotificationChannel) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/GetNotificationChannel", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *notificationChannelServiceClient) CreateNotificationChannel(ctx context.Context, in *CreateNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) { + out := new(NotificationChannel) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/CreateNotificationChannel", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *notificationChannelServiceClient) UpdateNotificationChannel(ctx context.Context, in *UpdateNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) { + out := new(NotificationChannel) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/UpdateNotificationChannel", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *notificationChannelServiceClient) DeleteNotificationChannel(ctx context.Context, in *DeleteNotificationChannelRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/DeleteNotificationChannel", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *notificationChannelServiceClient) SendNotificationChannelVerificationCode(ctx context.Context, in *SendNotificationChannelVerificationCodeRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/SendNotificationChannelVerificationCode", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *notificationChannelServiceClient) GetNotificationChannelVerificationCode(ctx context.Context, in *GetNotificationChannelVerificationCodeRequest, opts ...grpc.CallOption) (*GetNotificationChannelVerificationCodeResponse, error) { + out := new(GetNotificationChannelVerificationCodeResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/GetNotificationChannelVerificationCode", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *notificationChannelServiceClient) VerifyNotificationChannel(ctx context.Context, in *VerifyNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) { + out := new(NotificationChannel) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/VerifyNotificationChannel", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// NotificationChannelServiceServer is the server API for NotificationChannelService service. +type NotificationChannelServiceServer interface { + // Lists the descriptors for supported channel types. The use of descriptors + // makes it possible for new channel types to be dynamically added. + ListNotificationChannelDescriptors(context.Context, *ListNotificationChannelDescriptorsRequest) (*ListNotificationChannelDescriptorsResponse, error) + // Gets a single channel descriptor. The descriptor indicates which fields + // are expected / permitted for a notification channel of the given type. + GetNotificationChannelDescriptor(context.Context, *GetNotificationChannelDescriptorRequest) (*NotificationChannelDescriptor, error) + // Lists the notification channels that have been created for the project. + ListNotificationChannels(context.Context, *ListNotificationChannelsRequest) (*ListNotificationChannelsResponse, error) + // Gets a single notification channel. The channel includes the relevant + // configuration details with which the channel was created. However, the + // response may truncate or omit passwords, API keys, or other private key + // matter and thus the response may not be 100% identical to the information + // that was supplied in the call to the create method. + GetNotificationChannel(context.Context, *GetNotificationChannelRequest) (*NotificationChannel, error) + // Creates a new notification channel, representing a single notification + // endpoint such as an email address, SMS number, or pagerduty service. + CreateNotificationChannel(context.Context, *CreateNotificationChannelRequest) (*NotificationChannel, error) + // Updates a notification channel. Fields not specified in the field mask + // remain unchanged. + UpdateNotificationChannel(context.Context, *UpdateNotificationChannelRequest) (*NotificationChannel, error) + // Deletes a notification channel. + DeleteNotificationChannel(context.Context, *DeleteNotificationChannelRequest) (*empty.Empty, error) + // Causes a verification code to be delivered to the channel. The code + // can then be supplied in `VerifyNotificationChannel` to verify the channel. + SendNotificationChannelVerificationCode(context.Context, *SendNotificationChannelVerificationCodeRequest) (*empty.Empty, error) + // Requests a verification code for an already verified channel that can then + // be used in a call to VerifyNotificationChannel() on a different channel + // with an equivalent identity in the same or in a different project. This + // makes it possible to copy a channel between projects without requiring + // manual reverification of the channel. If the channel is not in the + // verified state, this method will fail (in other words, this may only be + // used if the SendNotificationChannelVerificationCode and + // VerifyNotificationChannel paths have already been used to put the given + // channel into the verified state). + // + // There is no guarantee that the verification codes returned by this method + // will be of a similar structure or form as the ones that are delivered + // to the channel via SendNotificationChannelVerificationCode; while + // VerifyNotificationChannel() will recognize both the codes delivered via + // SendNotificationChannelVerificationCode() and returned from + // GetNotificationChannelVerificationCode(), it is typically the case that + // the verification codes delivered via + // SendNotificationChannelVerificationCode() will be shorter and also + // have a shorter expiration (e.g. codes such as "G-123456") whereas + // GetVerificationCode() will typically return a much longer, websafe base + // 64 encoded string that has a longer expiration time. + GetNotificationChannelVerificationCode(context.Context, *GetNotificationChannelVerificationCodeRequest) (*GetNotificationChannelVerificationCodeResponse, error) + // Verifies a `NotificationChannel` by proving receipt of the code + // delivered to the channel as a result of calling + // `SendNotificationChannelVerificationCode`. + VerifyNotificationChannel(context.Context, *VerifyNotificationChannelRequest) (*NotificationChannel, error) +} + +func RegisterNotificationChannelServiceServer(s *grpc.Server, srv NotificationChannelServiceServer) { + s.RegisterService(&_NotificationChannelService_serviceDesc, srv) +} + +func _NotificationChannelService_ListNotificationChannelDescriptors_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListNotificationChannelDescriptorsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotificationChannelServiceServer).ListNotificationChannelDescriptors(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.NotificationChannelService/ListNotificationChannelDescriptors", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotificationChannelServiceServer).ListNotificationChannelDescriptors(ctx, req.(*ListNotificationChannelDescriptorsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NotificationChannelService_GetNotificationChannelDescriptor_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetNotificationChannelDescriptorRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotificationChannelServiceServer).GetNotificationChannelDescriptor(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.NotificationChannelService/GetNotificationChannelDescriptor", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotificationChannelServiceServer).GetNotificationChannelDescriptor(ctx, req.(*GetNotificationChannelDescriptorRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NotificationChannelService_ListNotificationChannels_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListNotificationChannelsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotificationChannelServiceServer).ListNotificationChannels(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.NotificationChannelService/ListNotificationChannels", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotificationChannelServiceServer).ListNotificationChannels(ctx, req.(*ListNotificationChannelsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NotificationChannelService_GetNotificationChannel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetNotificationChannelRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotificationChannelServiceServer).GetNotificationChannel(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.NotificationChannelService/GetNotificationChannel", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotificationChannelServiceServer).GetNotificationChannel(ctx, req.(*GetNotificationChannelRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NotificationChannelService_CreateNotificationChannel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateNotificationChannelRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotificationChannelServiceServer).CreateNotificationChannel(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.NotificationChannelService/CreateNotificationChannel", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotificationChannelServiceServer).CreateNotificationChannel(ctx, req.(*CreateNotificationChannelRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NotificationChannelService_UpdateNotificationChannel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateNotificationChannelRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotificationChannelServiceServer).UpdateNotificationChannel(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.NotificationChannelService/UpdateNotificationChannel", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotificationChannelServiceServer).UpdateNotificationChannel(ctx, req.(*UpdateNotificationChannelRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NotificationChannelService_DeleteNotificationChannel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteNotificationChannelRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotificationChannelServiceServer).DeleteNotificationChannel(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.NotificationChannelService/DeleteNotificationChannel", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotificationChannelServiceServer).DeleteNotificationChannel(ctx, req.(*DeleteNotificationChannelRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NotificationChannelService_SendNotificationChannelVerificationCode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SendNotificationChannelVerificationCodeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotificationChannelServiceServer).SendNotificationChannelVerificationCode(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.NotificationChannelService/SendNotificationChannelVerificationCode", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotificationChannelServiceServer).SendNotificationChannelVerificationCode(ctx, req.(*SendNotificationChannelVerificationCodeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NotificationChannelService_GetNotificationChannelVerificationCode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetNotificationChannelVerificationCodeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotificationChannelServiceServer).GetNotificationChannelVerificationCode(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.NotificationChannelService/GetNotificationChannelVerificationCode", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotificationChannelServiceServer).GetNotificationChannelVerificationCode(ctx, req.(*GetNotificationChannelVerificationCodeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NotificationChannelService_VerifyNotificationChannel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VerifyNotificationChannelRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotificationChannelServiceServer).VerifyNotificationChannel(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.NotificationChannelService/VerifyNotificationChannel", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotificationChannelServiceServer).VerifyNotificationChannel(ctx, req.(*VerifyNotificationChannelRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _NotificationChannelService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.monitoring.v3.NotificationChannelService", + HandlerType: (*NotificationChannelServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListNotificationChannelDescriptors", + Handler: _NotificationChannelService_ListNotificationChannelDescriptors_Handler, + }, + { + MethodName: "GetNotificationChannelDescriptor", + Handler: _NotificationChannelService_GetNotificationChannelDescriptor_Handler, + }, + { + MethodName: "ListNotificationChannels", + Handler: _NotificationChannelService_ListNotificationChannels_Handler, + }, + { + MethodName: "GetNotificationChannel", + Handler: _NotificationChannelService_GetNotificationChannel_Handler, + }, + { + MethodName: "CreateNotificationChannel", + Handler: _NotificationChannelService_CreateNotificationChannel_Handler, + }, + { + MethodName: "UpdateNotificationChannel", + Handler: _NotificationChannelService_UpdateNotificationChannel_Handler, + }, + { + MethodName: "DeleteNotificationChannel", + Handler: _NotificationChannelService_DeleteNotificationChannel_Handler, + }, + { + MethodName: "SendNotificationChannelVerificationCode", + Handler: _NotificationChannelService_SendNotificationChannelVerificationCode_Handler, + }, + { + MethodName: "GetNotificationChannelVerificationCode", + Handler: _NotificationChannelService_GetNotificationChannelVerificationCode_Handler, + }, + { + MethodName: "VerifyNotificationChannel", + Handler: _NotificationChannelService_VerifyNotificationChannel_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/monitoring/v3/notification_service.proto", +} diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/span_context.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/span_context.pb.go new file mode 100644 index 000000000..b42b8d9f7 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/span_context.pb.go @@ -0,0 +1,99 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/monitoring/v3/span_context.proto + +package monitoring + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// The context of a span, attached to google.api.Distribution.Exemplars +// in google.api.Distribution values during aggregation. +// +// It contains the name of a span with format: +// projects/[PROJECT_ID]/traces/[TRACE_ID]/spans/[SPAN_ID] +type SpanContext struct { + // The resource name of the span in the following format: + // + // projects/[PROJECT_ID]/traces/[TRACE_ID]/spans/[SPAN_ID] + // + // [TRACE_ID] is a unique identifier for a trace within a project; + // it is a 32-character hexadecimal encoding of a 16-byte array. + // + // [SPAN_ID] is a unique identifier for a span within a trace; it + // is a 16-character hexadecimal encoding of an 8-byte array. + SpanName string `protobuf:"bytes,1,opt,name=span_name,json=spanName,proto3" json:"span_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SpanContext) Reset() { *m = SpanContext{} } +func (m *SpanContext) String() string { return proto.CompactTextString(m) } +func (*SpanContext) ProtoMessage() {} +func (*SpanContext) Descriptor() ([]byte, []int) { + return fileDescriptor_933032e252f1c5e4, []int{0} +} + +func (m *SpanContext) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SpanContext.Unmarshal(m, b) +} +func (m *SpanContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SpanContext.Marshal(b, m, deterministic) +} +func (m *SpanContext) XXX_Merge(src proto.Message) { + xxx_messageInfo_SpanContext.Merge(m, src) +} +func (m *SpanContext) XXX_Size() int { + return xxx_messageInfo_SpanContext.Size(m) +} +func (m *SpanContext) XXX_DiscardUnknown() { + xxx_messageInfo_SpanContext.DiscardUnknown(m) +} + +var xxx_messageInfo_SpanContext proto.InternalMessageInfo + +func (m *SpanContext) GetSpanName() string { + if m != nil { + return m.SpanName + } + return "" +} + +func init() { + proto.RegisterType((*SpanContext)(nil), "google.monitoring.v3.SpanContext") +} + +func init() { + proto.RegisterFile("google/monitoring/v3/span_context.proto", fileDescriptor_933032e252f1c5e4) +} + +var fileDescriptor_933032e252f1c5e4 = []byte{ + // 197 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4f, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0xcf, 0xcd, 0xcf, 0xcb, 0x2c, 0xc9, 0x2f, 0xca, 0xcc, 0x4b, 0xd7, 0x2f, 0x33, + 0xd6, 0x2f, 0x2e, 0x48, 0xcc, 0x8b, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xad, 0x28, 0xd1, 0x2b, 0x28, + 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x81, 0x28, 0xd4, 0x43, 0x28, 0xd4, 0x2b, 0x33, 0x56, 0xd2, 0xe2, + 0xe2, 0x0e, 0x2e, 0x48, 0xcc, 0x73, 0x86, 0x28, 0x15, 0x92, 0xe6, 0xe2, 0x04, 0x6b, 0xcd, 0x4b, + 0xcc, 0x4d, 0x95, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x0c, 0xe2, 0x00, 0x09, 0xf8, 0x25, 0xe6, 0xa6, + 0x3a, 0xad, 0x60, 0xe4, 0x92, 0x48, 0xce, 0xcf, 0xd5, 0xc3, 0x66, 0x90, 0x93, 0x00, 0x92, 0x31, + 0x01, 0x20, 0x0b, 0x03, 0x18, 0xa3, 0xec, 0xa0, 0x2a, 0xd3, 0xf3, 0x73, 0x12, 0xf3, 0xd2, 0xf5, + 0xf2, 0x8b, 0xd2, 0xf5, 0xd3, 0x53, 0xf3, 0xc0, 0xce, 0xd1, 0x87, 0x48, 0x25, 0x16, 0x64, 0x16, + 0xa3, 0x3a, 0xdd, 0x1a, 0xc1, 0x5b, 0xc5, 0x24, 0xe5, 0x0e, 0x31, 0xc0, 0x39, 0x27, 0xbf, 0x34, + 0x45, 0xcf, 0x17, 0x61, 0x61, 0x98, 0xf1, 0x29, 0x98, 0x64, 0x0c, 0x58, 0x32, 0x06, 0x21, 0x19, + 0x13, 0x66, 0x9c, 0xc4, 0x06, 0xb6, 0xc4, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0xbd, 0x19, 0x01, + 0xcb, 0x1e, 0x01, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/uptime.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/uptime.pb.go new file mode 100644 index 000000000..f7dd41a43 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/uptime.pb.go @@ -0,0 +1,969 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/monitoring/v3/uptime.proto + +package monitoring + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + duration "github.com/golang/protobuf/ptypes/duration" + monitoredres "google.golang.org/genproto/googleapis/api/monitoredres" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// The regions from which an uptime check can be run. +type UptimeCheckRegion int32 + +const ( + // Default value if no region is specified. Will result in uptime checks + // running from all regions. + UptimeCheckRegion_REGION_UNSPECIFIED UptimeCheckRegion = 0 + // Allows checks to run from locations within the United States of America. + UptimeCheckRegion_USA UptimeCheckRegion = 1 + // Allows checks to run from locations within the continent of Europe. + UptimeCheckRegion_EUROPE UptimeCheckRegion = 2 + // Allows checks to run from locations within the continent of South + // America. + UptimeCheckRegion_SOUTH_AMERICA UptimeCheckRegion = 3 + // Allows checks to run from locations within the Asia Pacific area (ex: + // Singapore). + UptimeCheckRegion_ASIA_PACIFIC UptimeCheckRegion = 4 +) + +var UptimeCheckRegion_name = map[int32]string{ + 0: "REGION_UNSPECIFIED", + 1: "USA", + 2: "EUROPE", + 3: "SOUTH_AMERICA", + 4: "ASIA_PACIFIC", +} + +var UptimeCheckRegion_value = map[string]int32{ + "REGION_UNSPECIFIED": 0, + "USA": 1, + "EUROPE": 2, + "SOUTH_AMERICA": 3, + "ASIA_PACIFIC": 4, +} + +func (x UptimeCheckRegion) String() string { + return proto.EnumName(UptimeCheckRegion_name, int32(x)) +} + +func (UptimeCheckRegion) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_7ca0e36dfc8221d8, []int{0} +} + +// The supported resource types that can be used as values of +// `group_resource.resource_type`. +// `INSTANCE` includes `gce_instance` and `aws_ec2_instance` resource types. +// The resource types `gae_app` and `uptime_url` are not valid here because +// group checks on App Engine modules and URLs are not allowed. +type GroupResourceType int32 + +const ( + // Default value (not valid). + GroupResourceType_RESOURCE_TYPE_UNSPECIFIED GroupResourceType = 0 + // A group of instances from Google Cloud Platform (GCP) or + // Amazon Web Services (AWS). + GroupResourceType_INSTANCE GroupResourceType = 1 + // A group of Amazon ELB load balancers. + GroupResourceType_AWS_ELB_LOAD_BALANCER GroupResourceType = 2 +) + +var GroupResourceType_name = map[int32]string{ + 0: "RESOURCE_TYPE_UNSPECIFIED", + 1: "INSTANCE", + 2: "AWS_ELB_LOAD_BALANCER", +} + +var GroupResourceType_value = map[string]int32{ + "RESOURCE_TYPE_UNSPECIFIED": 0, + "INSTANCE": 1, + "AWS_ELB_LOAD_BALANCER": 2, +} + +func (x GroupResourceType) String() string { + return proto.EnumName(GroupResourceType_name, int32(x)) +} + +func (GroupResourceType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_7ca0e36dfc8221d8, []int{1} +} + +// Nimbus InternalCheckers. +type InternalChecker struct { + // The GCP project ID. Not necessarily the same as the project_id for the + // config. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` + // The internal network to perform this uptime check on. + Network string `protobuf:"bytes,2,opt,name=network,proto3" json:"network,omitempty"` + // The GCP zone the uptime check should egress from. Only respected for + // internal uptime checks, where internal_network is specified. + GcpZone string `protobuf:"bytes,3,opt,name=gcp_zone,json=gcpZone,proto3" json:"gcp_zone,omitempty"` + // The checker ID. + CheckerId string `protobuf:"bytes,4,opt,name=checker_id,json=checkerId,proto3" json:"checker_id,omitempty"` + // The checker's human-readable name. + DisplayName string `protobuf:"bytes,5,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InternalChecker) Reset() { *m = InternalChecker{} } +func (m *InternalChecker) String() string { return proto.CompactTextString(m) } +func (*InternalChecker) ProtoMessage() {} +func (*InternalChecker) Descriptor() ([]byte, []int) { + return fileDescriptor_7ca0e36dfc8221d8, []int{0} +} + +func (m *InternalChecker) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_InternalChecker.Unmarshal(m, b) +} +func (m *InternalChecker) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_InternalChecker.Marshal(b, m, deterministic) +} +func (m *InternalChecker) XXX_Merge(src proto.Message) { + xxx_messageInfo_InternalChecker.Merge(m, src) +} +func (m *InternalChecker) XXX_Size() int { + return xxx_messageInfo_InternalChecker.Size(m) +} +func (m *InternalChecker) XXX_DiscardUnknown() { + xxx_messageInfo_InternalChecker.DiscardUnknown(m) +} + +var xxx_messageInfo_InternalChecker proto.InternalMessageInfo + +func (m *InternalChecker) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *InternalChecker) GetNetwork() string { + if m != nil { + return m.Network + } + return "" +} + +func (m *InternalChecker) GetGcpZone() string { + if m != nil { + return m.GcpZone + } + return "" +} + +func (m *InternalChecker) GetCheckerId() string { + if m != nil { + return m.CheckerId + } + return "" +} + +func (m *InternalChecker) GetDisplayName() string { + if m != nil { + return m.DisplayName + } + return "" +} + +// This message configures which resources and services to monitor for +// availability. +type UptimeCheckConfig struct { + // A unique resource name for this UptimeCheckConfig. The format is: + // + // + // `projects/[PROJECT_ID]/uptimeCheckConfigs/[UPTIME_CHECK_ID]`. + // + // This field should be omitted when creating the uptime check configuration; + // on create, the resource name is assigned by the server and included in the + // response. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // A human-friendly name for the uptime check configuration. The display name + // should be unique within a Stackdriver Account in order to make it easier + // to identify; however, uniqueness is not enforced. Required. + DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // The resource the check is checking. Required. + // + // Types that are valid to be assigned to Resource: + // *UptimeCheckConfig_MonitoredResource + // *UptimeCheckConfig_ResourceGroup_ + Resource isUptimeCheckConfig_Resource `protobuf_oneof:"resource"` + // The type of uptime check request. + // + // Types that are valid to be assigned to CheckRequestType: + // *UptimeCheckConfig_HttpCheck_ + // *UptimeCheckConfig_TcpCheck_ + CheckRequestType isUptimeCheckConfig_CheckRequestType `protobuf_oneof:"check_request_type"` + // How often, in seconds, the uptime check is performed. + // Currently, the only supported values are `60s` (1 minute), `300s` + // (5 minutes), `600s` (10 minutes), and `900s` (15 minutes). Optional, + // defaults to `300s`. + Period *duration.Duration `protobuf:"bytes,7,opt,name=period,proto3" json:"period,omitempty"` + // The maximum amount of time to wait for the request to complete (must be + // between 1 and 60 seconds). Required. + Timeout *duration.Duration `protobuf:"bytes,8,opt,name=timeout,proto3" json:"timeout,omitempty"` + // The expected content on the page the check is run against. + // Currently, only the first entry in the list is supported, and other entries + // will be ignored. The server will look for an exact match of the string in + // the page response's content. This field is optional and should only be + // specified if a content match is required. + ContentMatchers []*UptimeCheckConfig_ContentMatcher `protobuf:"bytes,9,rep,name=content_matchers,json=contentMatchers,proto3" json:"content_matchers,omitempty"` + // The list of regions from which the check will be run. + // If this field is specified, enough regions to include a minimum of + // 3 locations must be provided, or an error message is returned. + // Not specifying this field will result in uptime checks running from all + // regions. + SelectedRegions []UptimeCheckRegion `protobuf:"varint,10,rep,packed,name=selected_regions,json=selectedRegions,proto3,enum=google.monitoring.v3.UptimeCheckRegion" json:"selected_regions,omitempty"` + // Denotes whether this is a check that egresses from InternalCheckers. + IsInternal bool `protobuf:"varint,15,opt,name=is_internal,json=isInternal,proto3" json:"is_internal,omitempty"` + // The internal checkers that this check will egress from. If `is_internal` is + // true and this list is empty, the check will egress from all + // InternalCheckers configured for the project that owns this CheckConfig. + InternalCheckers []*InternalChecker `protobuf:"bytes,14,rep,name=internal_checkers,json=internalCheckers,proto3" json:"internal_checkers,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UptimeCheckConfig) Reset() { *m = UptimeCheckConfig{} } +func (m *UptimeCheckConfig) String() string { return proto.CompactTextString(m) } +func (*UptimeCheckConfig) ProtoMessage() {} +func (*UptimeCheckConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_7ca0e36dfc8221d8, []int{1} +} + +func (m *UptimeCheckConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UptimeCheckConfig.Unmarshal(m, b) +} +func (m *UptimeCheckConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UptimeCheckConfig.Marshal(b, m, deterministic) +} +func (m *UptimeCheckConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_UptimeCheckConfig.Merge(m, src) +} +func (m *UptimeCheckConfig) XXX_Size() int { + return xxx_messageInfo_UptimeCheckConfig.Size(m) +} +func (m *UptimeCheckConfig) XXX_DiscardUnknown() { + xxx_messageInfo_UptimeCheckConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_UptimeCheckConfig proto.InternalMessageInfo + +func (m *UptimeCheckConfig) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *UptimeCheckConfig) GetDisplayName() string { + if m != nil { + return m.DisplayName + } + return "" +} + +type isUptimeCheckConfig_Resource interface { + isUptimeCheckConfig_Resource() +} + +type UptimeCheckConfig_MonitoredResource struct { + MonitoredResource *monitoredres.MonitoredResource `protobuf:"bytes,3,opt,name=monitored_resource,json=monitoredResource,proto3,oneof"` +} + +type UptimeCheckConfig_ResourceGroup_ struct { + ResourceGroup *UptimeCheckConfig_ResourceGroup `protobuf:"bytes,4,opt,name=resource_group,json=resourceGroup,proto3,oneof"` +} + +func (*UptimeCheckConfig_MonitoredResource) isUptimeCheckConfig_Resource() {} + +func (*UptimeCheckConfig_ResourceGroup_) isUptimeCheckConfig_Resource() {} + +func (m *UptimeCheckConfig) GetResource() isUptimeCheckConfig_Resource { + if m != nil { + return m.Resource + } + return nil +} + +func (m *UptimeCheckConfig) GetMonitoredResource() *monitoredres.MonitoredResource { + if x, ok := m.GetResource().(*UptimeCheckConfig_MonitoredResource); ok { + return x.MonitoredResource + } + return nil +} + +func (m *UptimeCheckConfig) GetResourceGroup() *UptimeCheckConfig_ResourceGroup { + if x, ok := m.GetResource().(*UptimeCheckConfig_ResourceGroup_); ok { + return x.ResourceGroup + } + return nil +} + +type isUptimeCheckConfig_CheckRequestType interface { + isUptimeCheckConfig_CheckRequestType() +} + +type UptimeCheckConfig_HttpCheck_ struct { + HttpCheck *UptimeCheckConfig_HttpCheck `protobuf:"bytes,5,opt,name=http_check,json=httpCheck,proto3,oneof"` +} + +type UptimeCheckConfig_TcpCheck_ struct { + TcpCheck *UptimeCheckConfig_TcpCheck `protobuf:"bytes,6,opt,name=tcp_check,json=tcpCheck,proto3,oneof"` +} + +func (*UptimeCheckConfig_HttpCheck_) isUptimeCheckConfig_CheckRequestType() {} + +func (*UptimeCheckConfig_TcpCheck_) isUptimeCheckConfig_CheckRequestType() {} + +func (m *UptimeCheckConfig) GetCheckRequestType() isUptimeCheckConfig_CheckRequestType { + if m != nil { + return m.CheckRequestType + } + return nil +} + +func (m *UptimeCheckConfig) GetHttpCheck() *UptimeCheckConfig_HttpCheck { + if x, ok := m.GetCheckRequestType().(*UptimeCheckConfig_HttpCheck_); ok { + return x.HttpCheck + } + return nil +} + +func (m *UptimeCheckConfig) GetTcpCheck() *UptimeCheckConfig_TcpCheck { + if x, ok := m.GetCheckRequestType().(*UptimeCheckConfig_TcpCheck_); ok { + return x.TcpCheck + } + return nil +} + +func (m *UptimeCheckConfig) GetPeriod() *duration.Duration { + if m != nil { + return m.Period + } + return nil +} + +func (m *UptimeCheckConfig) GetTimeout() *duration.Duration { + if m != nil { + return m.Timeout + } + return nil +} + +func (m *UptimeCheckConfig) GetContentMatchers() []*UptimeCheckConfig_ContentMatcher { + if m != nil { + return m.ContentMatchers + } + return nil +} + +func (m *UptimeCheckConfig) GetSelectedRegions() []UptimeCheckRegion { + if m != nil { + return m.SelectedRegions + } + return nil +} + +func (m *UptimeCheckConfig) GetIsInternal() bool { + if m != nil { + return m.IsInternal + } + return false +} + +func (m *UptimeCheckConfig) GetInternalCheckers() []*InternalChecker { + if m != nil { + return m.InternalCheckers + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*UptimeCheckConfig) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _UptimeCheckConfig_OneofMarshaler, _UptimeCheckConfig_OneofUnmarshaler, _UptimeCheckConfig_OneofSizer, []interface{}{ + (*UptimeCheckConfig_MonitoredResource)(nil), + (*UptimeCheckConfig_ResourceGroup_)(nil), + (*UptimeCheckConfig_HttpCheck_)(nil), + (*UptimeCheckConfig_TcpCheck_)(nil), + } +} + +func _UptimeCheckConfig_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*UptimeCheckConfig) + // resource + switch x := m.Resource.(type) { + case *UptimeCheckConfig_MonitoredResource: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.MonitoredResource); err != nil { + return err + } + case *UptimeCheckConfig_ResourceGroup_: + b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ResourceGroup); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("UptimeCheckConfig.Resource has unexpected type %T", x) + } + // check_request_type + switch x := m.CheckRequestType.(type) { + case *UptimeCheckConfig_HttpCheck_: + b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.HttpCheck); err != nil { + return err + } + case *UptimeCheckConfig_TcpCheck_: + b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.TcpCheck); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("UptimeCheckConfig.CheckRequestType has unexpected type %T", x) + } + return nil +} + +func _UptimeCheckConfig_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*UptimeCheckConfig) + switch tag { + case 3: // resource.monitored_resource + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(monitoredres.MonitoredResource) + err := b.DecodeMessage(msg) + m.Resource = &UptimeCheckConfig_MonitoredResource{msg} + return true, err + case 4: // resource.resource_group + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(UptimeCheckConfig_ResourceGroup) + err := b.DecodeMessage(msg) + m.Resource = &UptimeCheckConfig_ResourceGroup_{msg} + return true, err + case 5: // check_request_type.http_check + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(UptimeCheckConfig_HttpCheck) + err := b.DecodeMessage(msg) + m.CheckRequestType = &UptimeCheckConfig_HttpCheck_{msg} + return true, err + case 6: // check_request_type.tcp_check + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(UptimeCheckConfig_TcpCheck) + err := b.DecodeMessage(msg) + m.CheckRequestType = &UptimeCheckConfig_TcpCheck_{msg} + return true, err + default: + return false, nil + } +} + +func _UptimeCheckConfig_OneofSizer(msg proto.Message) (n int) { + m := msg.(*UptimeCheckConfig) + // resource + switch x := m.Resource.(type) { + case *UptimeCheckConfig_MonitoredResource: + s := proto.Size(x.MonitoredResource) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *UptimeCheckConfig_ResourceGroup_: + s := proto.Size(x.ResourceGroup) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + // check_request_type + switch x := m.CheckRequestType.(type) { + case *UptimeCheckConfig_HttpCheck_: + s := proto.Size(x.HttpCheck) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *UptimeCheckConfig_TcpCheck_: + s := proto.Size(x.TcpCheck) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// The resource submessage for group checks. It can be used instead of a +// monitored resource, when multiple resources are being monitored. +type UptimeCheckConfig_ResourceGroup struct { + // The group of resources being monitored. Should be only the + // group_id, not projects//groups/. + GroupId string `protobuf:"bytes,1,opt,name=group_id,json=groupId,proto3" json:"group_id,omitempty"` + // The resource type of the group members. + ResourceType GroupResourceType `protobuf:"varint,2,opt,name=resource_type,json=resourceType,proto3,enum=google.monitoring.v3.GroupResourceType" json:"resource_type,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UptimeCheckConfig_ResourceGroup) Reset() { *m = UptimeCheckConfig_ResourceGroup{} } +func (m *UptimeCheckConfig_ResourceGroup) String() string { return proto.CompactTextString(m) } +func (*UptimeCheckConfig_ResourceGroup) ProtoMessage() {} +func (*UptimeCheckConfig_ResourceGroup) Descriptor() ([]byte, []int) { + return fileDescriptor_7ca0e36dfc8221d8, []int{1, 0} +} + +func (m *UptimeCheckConfig_ResourceGroup) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UptimeCheckConfig_ResourceGroup.Unmarshal(m, b) +} +func (m *UptimeCheckConfig_ResourceGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UptimeCheckConfig_ResourceGroup.Marshal(b, m, deterministic) +} +func (m *UptimeCheckConfig_ResourceGroup) XXX_Merge(src proto.Message) { + xxx_messageInfo_UptimeCheckConfig_ResourceGroup.Merge(m, src) +} +func (m *UptimeCheckConfig_ResourceGroup) XXX_Size() int { + return xxx_messageInfo_UptimeCheckConfig_ResourceGroup.Size(m) +} +func (m *UptimeCheckConfig_ResourceGroup) XXX_DiscardUnknown() { + xxx_messageInfo_UptimeCheckConfig_ResourceGroup.DiscardUnknown(m) +} + +var xxx_messageInfo_UptimeCheckConfig_ResourceGroup proto.InternalMessageInfo + +func (m *UptimeCheckConfig_ResourceGroup) GetGroupId() string { + if m != nil { + return m.GroupId + } + return "" +} + +func (m *UptimeCheckConfig_ResourceGroup) GetResourceType() GroupResourceType { + if m != nil { + return m.ResourceType + } + return GroupResourceType_RESOURCE_TYPE_UNSPECIFIED +} + +// Information involved in an HTTP/HTTPS uptime check request. +type UptimeCheckConfig_HttpCheck struct { + // If true, use HTTPS instead of HTTP to run the check. + UseSsl bool `protobuf:"varint,1,opt,name=use_ssl,json=useSsl,proto3" json:"use_ssl,omitempty"` + // The path to the page to run the check against. Will be combined with the + // host (specified within the MonitoredResource) and port to construct the + // full URL. Optional (defaults to "/"). + Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"` + // The port to the page to run the check against. Will be combined with host + // (specified within the MonitoredResource) and path to construct the full + // URL. Optional (defaults to 80 without SSL, or 443 with SSL). + Port int32 `protobuf:"varint,3,opt,name=port,proto3" json:"port,omitempty"` + // The authentication information. Optional when creating an HTTP check; + // defaults to empty. + AuthInfo *UptimeCheckConfig_HttpCheck_BasicAuthentication `protobuf:"bytes,4,opt,name=auth_info,json=authInfo,proto3" json:"auth_info,omitempty"` + // Boolean specifiying whether to encrypt the header information. + // Encryption should be specified for any headers related to authentication + // that you do not wish to be seen when retrieving the configuration. The + // server will be responsible for encrypting the headers. + // On Get/List calls, if mask_headers is set to True then the headers + // will be obscured with ******. + MaskHeaders bool `protobuf:"varint,5,opt,name=mask_headers,json=maskHeaders,proto3" json:"mask_headers,omitempty"` + // The list of headers to send as part of the uptime check request. + // If two headers have the same key and different values, they should + // be entered as a single header, with the value being a comma-separated + // list of all the desired values as described at + // https://www.w3.org/Protocols/rfc2616/rfc2616.txt (page 31). + // Entering two separate headers with the same key in a Create call will + // cause the first to be overwritten by the second. + // The maximum number of headers allowed is 100. + Headers map[string]string `protobuf:"bytes,6,rep,name=headers,proto3" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UptimeCheckConfig_HttpCheck) Reset() { *m = UptimeCheckConfig_HttpCheck{} } +func (m *UptimeCheckConfig_HttpCheck) String() string { return proto.CompactTextString(m) } +func (*UptimeCheckConfig_HttpCheck) ProtoMessage() {} +func (*UptimeCheckConfig_HttpCheck) Descriptor() ([]byte, []int) { + return fileDescriptor_7ca0e36dfc8221d8, []int{1, 1} +} + +func (m *UptimeCheckConfig_HttpCheck) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UptimeCheckConfig_HttpCheck.Unmarshal(m, b) +} +func (m *UptimeCheckConfig_HttpCheck) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UptimeCheckConfig_HttpCheck.Marshal(b, m, deterministic) +} +func (m *UptimeCheckConfig_HttpCheck) XXX_Merge(src proto.Message) { + xxx_messageInfo_UptimeCheckConfig_HttpCheck.Merge(m, src) +} +func (m *UptimeCheckConfig_HttpCheck) XXX_Size() int { + return xxx_messageInfo_UptimeCheckConfig_HttpCheck.Size(m) +} +func (m *UptimeCheckConfig_HttpCheck) XXX_DiscardUnknown() { + xxx_messageInfo_UptimeCheckConfig_HttpCheck.DiscardUnknown(m) +} + +var xxx_messageInfo_UptimeCheckConfig_HttpCheck proto.InternalMessageInfo + +func (m *UptimeCheckConfig_HttpCheck) GetUseSsl() bool { + if m != nil { + return m.UseSsl + } + return false +} + +func (m *UptimeCheckConfig_HttpCheck) GetPath() string { + if m != nil { + return m.Path + } + return "" +} + +func (m *UptimeCheckConfig_HttpCheck) GetPort() int32 { + if m != nil { + return m.Port + } + return 0 +} + +func (m *UptimeCheckConfig_HttpCheck) GetAuthInfo() *UptimeCheckConfig_HttpCheck_BasicAuthentication { + if m != nil { + return m.AuthInfo + } + return nil +} + +func (m *UptimeCheckConfig_HttpCheck) GetMaskHeaders() bool { + if m != nil { + return m.MaskHeaders + } + return false +} + +func (m *UptimeCheckConfig_HttpCheck) GetHeaders() map[string]string { + if m != nil { + return m.Headers + } + return nil +} + +// A type of authentication to perform against the specified resource or URL +// that uses username and password. +// Currently, only Basic authentication is supported in Uptime Monitoring. +type UptimeCheckConfig_HttpCheck_BasicAuthentication struct { + // The username to authenticate. + Username string `protobuf:"bytes,1,opt,name=username,proto3" json:"username,omitempty"` + // The password to authenticate. + Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UptimeCheckConfig_HttpCheck_BasicAuthentication) Reset() { + *m = UptimeCheckConfig_HttpCheck_BasicAuthentication{} +} +func (m *UptimeCheckConfig_HttpCheck_BasicAuthentication) String() string { + return proto.CompactTextString(m) +} +func (*UptimeCheckConfig_HttpCheck_BasicAuthentication) ProtoMessage() {} +func (*UptimeCheckConfig_HttpCheck_BasicAuthentication) Descriptor() ([]byte, []int) { + return fileDescriptor_7ca0e36dfc8221d8, []int{1, 1, 0} +} + +func (m *UptimeCheckConfig_HttpCheck_BasicAuthentication) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UptimeCheckConfig_HttpCheck_BasicAuthentication.Unmarshal(m, b) +} +func (m *UptimeCheckConfig_HttpCheck_BasicAuthentication) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UptimeCheckConfig_HttpCheck_BasicAuthentication.Marshal(b, m, deterministic) +} +func (m *UptimeCheckConfig_HttpCheck_BasicAuthentication) XXX_Merge(src proto.Message) { + xxx_messageInfo_UptimeCheckConfig_HttpCheck_BasicAuthentication.Merge(m, src) +} +func (m *UptimeCheckConfig_HttpCheck_BasicAuthentication) XXX_Size() int { + return xxx_messageInfo_UptimeCheckConfig_HttpCheck_BasicAuthentication.Size(m) +} +func (m *UptimeCheckConfig_HttpCheck_BasicAuthentication) XXX_DiscardUnknown() { + xxx_messageInfo_UptimeCheckConfig_HttpCheck_BasicAuthentication.DiscardUnknown(m) +} + +var xxx_messageInfo_UptimeCheckConfig_HttpCheck_BasicAuthentication proto.InternalMessageInfo + +func (m *UptimeCheckConfig_HttpCheck_BasicAuthentication) GetUsername() string { + if m != nil { + return m.Username + } + return "" +} + +func (m *UptimeCheckConfig_HttpCheck_BasicAuthentication) GetPassword() string { + if m != nil { + return m.Password + } + return "" +} + +// Information required for a TCP uptime check request. +type UptimeCheckConfig_TcpCheck struct { + // The port to the page to run the check against. Will be combined with host + // (specified within the MonitoredResource) to construct the full URL. + // Required. + Port int32 `protobuf:"varint,1,opt,name=port,proto3" json:"port,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UptimeCheckConfig_TcpCheck) Reset() { *m = UptimeCheckConfig_TcpCheck{} } +func (m *UptimeCheckConfig_TcpCheck) String() string { return proto.CompactTextString(m) } +func (*UptimeCheckConfig_TcpCheck) ProtoMessage() {} +func (*UptimeCheckConfig_TcpCheck) Descriptor() ([]byte, []int) { + return fileDescriptor_7ca0e36dfc8221d8, []int{1, 2} +} + +func (m *UptimeCheckConfig_TcpCheck) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UptimeCheckConfig_TcpCheck.Unmarshal(m, b) +} +func (m *UptimeCheckConfig_TcpCheck) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UptimeCheckConfig_TcpCheck.Marshal(b, m, deterministic) +} +func (m *UptimeCheckConfig_TcpCheck) XXX_Merge(src proto.Message) { + xxx_messageInfo_UptimeCheckConfig_TcpCheck.Merge(m, src) +} +func (m *UptimeCheckConfig_TcpCheck) XXX_Size() int { + return xxx_messageInfo_UptimeCheckConfig_TcpCheck.Size(m) +} +func (m *UptimeCheckConfig_TcpCheck) XXX_DiscardUnknown() { + xxx_messageInfo_UptimeCheckConfig_TcpCheck.DiscardUnknown(m) +} + +var xxx_messageInfo_UptimeCheckConfig_TcpCheck proto.InternalMessageInfo + +func (m *UptimeCheckConfig_TcpCheck) GetPort() int32 { + if m != nil { + return m.Port + } + return 0 +} + +// Used to perform string matching. Currently, this matches on the exact +// content. In the future, it can be expanded to allow for regular expressions +// and more complex matching. +type UptimeCheckConfig_ContentMatcher struct { + // String content to match (max 1024 bytes) + Content string `protobuf:"bytes,1,opt,name=content,proto3" json:"content,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UptimeCheckConfig_ContentMatcher) Reset() { *m = UptimeCheckConfig_ContentMatcher{} } +func (m *UptimeCheckConfig_ContentMatcher) String() string { return proto.CompactTextString(m) } +func (*UptimeCheckConfig_ContentMatcher) ProtoMessage() {} +func (*UptimeCheckConfig_ContentMatcher) Descriptor() ([]byte, []int) { + return fileDescriptor_7ca0e36dfc8221d8, []int{1, 3} +} + +func (m *UptimeCheckConfig_ContentMatcher) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UptimeCheckConfig_ContentMatcher.Unmarshal(m, b) +} +func (m *UptimeCheckConfig_ContentMatcher) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UptimeCheckConfig_ContentMatcher.Marshal(b, m, deterministic) +} +func (m *UptimeCheckConfig_ContentMatcher) XXX_Merge(src proto.Message) { + xxx_messageInfo_UptimeCheckConfig_ContentMatcher.Merge(m, src) +} +func (m *UptimeCheckConfig_ContentMatcher) XXX_Size() int { + return xxx_messageInfo_UptimeCheckConfig_ContentMatcher.Size(m) +} +func (m *UptimeCheckConfig_ContentMatcher) XXX_DiscardUnknown() { + xxx_messageInfo_UptimeCheckConfig_ContentMatcher.DiscardUnknown(m) +} + +var xxx_messageInfo_UptimeCheckConfig_ContentMatcher proto.InternalMessageInfo + +func (m *UptimeCheckConfig_ContentMatcher) GetContent() string { + if m != nil { + return m.Content + } + return "" +} + +// Contains the region, location, and list of IP +// addresses where checkers in the location run from. +type UptimeCheckIp struct { + // A broad region category in which the IP address is located. + Region UptimeCheckRegion `protobuf:"varint,1,opt,name=region,proto3,enum=google.monitoring.v3.UptimeCheckRegion" json:"region,omitempty"` + // A more specific location within the region that typically encodes + // a particular city/town/metro (and its containing state/province or country) + // within the broader umbrella region category. + Location string `protobuf:"bytes,2,opt,name=location,proto3" json:"location,omitempty"` + // The IP address from which the uptime check originates. This is a full + // IP address (not an IP address range). Most IP addresses, as of this + // publication, are in IPv4 format; however, one should not rely on the + // IP addresses being in IPv4 format indefinitely and should support + // interpreting this field in either IPv4 or IPv6 format. + IpAddress string `protobuf:"bytes,3,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UptimeCheckIp) Reset() { *m = UptimeCheckIp{} } +func (m *UptimeCheckIp) String() string { return proto.CompactTextString(m) } +func (*UptimeCheckIp) ProtoMessage() {} +func (*UptimeCheckIp) Descriptor() ([]byte, []int) { + return fileDescriptor_7ca0e36dfc8221d8, []int{2} +} + +func (m *UptimeCheckIp) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UptimeCheckIp.Unmarshal(m, b) +} +func (m *UptimeCheckIp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UptimeCheckIp.Marshal(b, m, deterministic) +} +func (m *UptimeCheckIp) XXX_Merge(src proto.Message) { + xxx_messageInfo_UptimeCheckIp.Merge(m, src) +} +func (m *UptimeCheckIp) XXX_Size() int { + return xxx_messageInfo_UptimeCheckIp.Size(m) +} +func (m *UptimeCheckIp) XXX_DiscardUnknown() { + xxx_messageInfo_UptimeCheckIp.DiscardUnknown(m) +} + +var xxx_messageInfo_UptimeCheckIp proto.InternalMessageInfo + +func (m *UptimeCheckIp) GetRegion() UptimeCheckRegion { + if m != nil { + return m.Region + } + return UptimeCheckRegion_REGION_UNSPECIFIED +} + +func (m *UptimeCheckIp) GetLocation() string { + if m != nil { + return m.Location + } + return "" +} + +func (m *UptimeCheckIp) GetIpAddress() string { + if m != nil { + return m.IpAddress + } + return "" +} + +func init() { + proto.RegisterEnum("google.monitoring.v3.UptimeCheckRegion", UptimeCheckRegion_name, UptimeCheckRegion_value) + proto.RegisterEnum("google.monitoring.v3.GroupResourceType", GroupResourceType_name, GroupResourceType_value) + proto.RegisterType((*InternalChecker)(nil), "google.monitoring.v3.InternalChecker") + proto.RegisterType((*UptimeCheckConfig)(nil), "google.monitoring.v3.UptimeCheckConfig") + proto.RegisterType((*UptimeCheckConfig_ResourceGroup)(nil), "google.monitoring.v3.UptimeCheckConfig.ResourceGroup") + proto.RegisterType((*UptimeCheckConfig_HttpCheck)(nil), "google.monitoring.v3.UptimeCheckConfig.HttpCheck") + proto.RegisterMapType((map[string]string)(nil), "google.monitoring.v3.UptimeCheckConfig.HttpCheck.HeadersEntry") + proto.RegisterType((*UptimeCheckConfig_HttpCheck_BasicAuthentication)(nil), "google.monitoring.v3.UptimeCheckConfig.HttpCheck.BasicAuthentication") + proto.RegisterType((*UptimeCheckConfig_TcpCheck)(nil), "google.monitoring.v3.UptimeCheckConfig.TcpCheck") + proto.RegisterType((*UptimeCheckConfig_ContentMatcher)(nil), "google.monitoring.v3.UptimeCheckConfig.ContentMatcher") + proto.RegisterType((*UptimeCheckIp)(nil), "google.monitoring.v3.UptimeCheckIp") +} + +func init() { proto.RegisterFile("google/monitoring/v3/uptime.proto", fileDescriptor_7ca0e36dfc8221d8) } + +var fileDescriptor_7ca0e36dfc8221d8 = []byte{ + // 1043 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x56, 0xed, 0x6e, 0xe3, 0x44, + 0x17, 0x5e, 0x27, 0x6d, 0x3e, 0x4e, 0xfa, 0xe1, 0xce, 0xdb, 0x17, 0xdc, 0x48, 0x5d, 0xba, 0x45, + 0x88, 0xaa, 0x3f, 0x1c, 0xb6, 0x11, 0x08, 0x2d, 0xd2, 0x22, 0x27, 0x35, 0x8d, 0xa5, 0x36, 0x89, + 0x26, 0xcd, 0x02, 0x4b, 0x85, 0xe5, 0xda, 0x53, 0xc7, 0x34, 0xf1, 0x18, 0xcf, 0xb8, 0x4b, 0xb9, + 0x05, 0x2e, 0x83, 0x1f, 0x48, 0x5c, 0x01, 0xd7, 0xc0, 0x05, 0x70, 0x3d, 0xc8, 0xe3, 0x99, 0xb4, + 0x69, 0x8b, 0xb6, 0xfd, 0x37, 0xcf, 0xf9, 0x78, 0xe6, 0xf8, 0xcc, 0x79, 0x66, 0x0c, 0x2f, 0x42, + 0x4a, 0xc3, 0x29, 0x69, 0xcd, 0x68, 0x1c, 0x71, 0x9a, 0x46, 0x71, 0xd8, 0xba, 0x6a, 0xb7, 0xb2, + 0x84, 0x47, 0x33, 0x62, 0x26, 0x29, 0xe5, 0x14, 0x6d, 0x16, 0x21, 0xe6, 0x4d, 0x88, 0x79, 0xd5, + 0x6e, 0x7e, 0x2c, 0x13, 0xbd, 0x24, 0x52, 0xc9, 0x24, 0x70, 0x53, 0xc2, 0x68, 0x96, 0xfa, 0x32, + 0xb5, 0xf9, 0x5c, 0x06, 0x09, 0x74, 0x9e, 0x5d, 0xb4, 0x82, 0x2c, 0xf5, 0x78, 0x44, 0xe3, 0xc2, + 0xbf, 0xfb, 0x87, 0x06, 0xeb, 0x4e, 0xcc, 0x49, 0x1a, 0x7b, 0xd3, 0xee, 0x84, 0xf8, 0x97, 0x24, + 0x45, 0xdb, 0x00, 0x49, 0x4a, 0x7f, 0x22, 0x3e, 0x77, 0xa3, 0xc0, 0xd0, 0x76, 0xb4, 0xbd, 0x3a, + 0xae, 0x4b, 0x8b, 0x13, 0x20, 0x03, 0xaa, 0x31, 0xe1, 0xef, 0x68, 0x7a, 0x69, 0x94, 0x84, 0x4f, + 0x41, 0xb4, 0x05, 0xb5, 0xd0, 0x4f, 0xdc, 0x5f, 0x69, 0x4c, 0x8c, 0x72, 0xe1, 0x0a, 0xfd, 0xe4, + 0x2d, 0x8d, 0x49, 0xce, 0xe9, 0x17, 0xf4, 0x39, 0xe7, 0x52, 0xc1, 0x29, 0x2d, 0x4e, 0x80, 0x5e, + 0xc0, 0x4a, 0x10, 0xb1, 0x64, 0xea, 0x5d, 0xbb, 0xb1, 0x37, 0x23, 0xc6, 0xb2, 0x08, 0x68, 0x48, + 0x5b, 0xdf, 0x9b, 0x91, 0xdd, 0x7f, 0x1a, 0xb0, 0x31, 0x16, 0x5d, 0x11, 0x75, 0x76, 0x69, 0x7c, + 0x11, 0x85, 0x08, 0xc1, 0x92, 0x48, 0x28, 0xaa, 0x14, 0xeb, 0x7b, 0x64, 0xa5, 0x7b, 0x64, 0xa8, + 0x0f, 0xe8, 0x7e, 0xcb, 0x44, 0xcd, 0x8d, 0x83, 0x6d, 0x53, 0xb6, 0xdb, 0x4b, 0x22, 0xf3, 0x44, + 0x45, 0x61, 0x19, 0xd4, 0x7b, 0x86, 0x37, 0x66, 0x77, 0x8d, 0xe8, 0x47, 0x58, 0x53, 0x2c, 0x6e, + 0x98, 0xd2, 0x2c, 0x11, 0x9f, 0xd8, 0x38, 0xf8, 0xdc, 0x7c, 0xe8, 0xe8, 0xcc, 0x7b, 0xdf, 0x61, + 0x2a, 0xa6, 0xa3, 0x3c, 0xb9, 0xf7, 0x0c, 0xaf, 0xa6, 0xb7, 0x0d, 0x08, 0x03, 0x4c, 0x38, 0x4f, + 0x5c, 0xd1, 0x31, 0xd1, 0x9d, 0xc6, 0xc1, 0xcb, 0xc7, 0x72, 0xf7, 0x38, 0x4f, 0x04, 0xee, 0x69, + 0xb8, 0x3e, 0x51, 0x00, 0x0d, 0xa0, 0xce, 0x7d, 0x45, 0x59, 0x11, 0x94, 0x9f, 0x3d, 0x96, 0xf2, + 0xd4, 0x9f, 0x33, 0xd6, 0xb8, 0x5c, 0xa3, 0x97, 0x50, 0x49, 0x48, 0x1a, 0xd1, 0xc0, 0xa8, 0x0a, + 0xb6, 0x2d, 0xc5, 0xa6, 0x86, 0xcf, 0x3c, 0x94, 0xc3, 0x87, 0x65, 0x20, 0x6a, 0x43, 0x35, 0xa7, + 0xa6, 0x19, 0x37, 0x6a, 0xef, 0xcb, 0x51, 0x91, 0xc8, 0x03, 0xdd, 0xa7, 0x31, 0x27, 0x31, 0x77, + 0x67, 0x1e, 0xf7, 0x27, 0x24, 0x65, 0x46, 0x7d, 0xa7, 0xbc, 0xd7, 0x38, 0xf8, 0xe2, 0xb1, 0xf5, + 0x77, 0x8b, 0xfc, 0x93, 0x22, 0x1d, 0xaf, 0xfb, 0x0b, 0x98, 0x21, 0x0c, 0x3a, 0x23, 0x53, 0xe2, + 0x73, 0x31, 0x1e, 0x61, 0x44, 0x63, 0x66, 0xc0, 0x4e, 0x79, 0x6f, 0xed, 0xe0, 0xd3, 0xf7, 0x6e, + 0x81, 0x45, 0x3c, 0x5e, 0x57, 0x04, 0x05, 0x66, 0xe8, 0x23, 0x68, 0x44, 0xcc, 0x8d, 0xa4, 0xd8, + 0x8c, 0xf5, 0x1d, 0x6d, 0xaf, 0x86, 0x21, 0x62, 0x4a, 0x7e, 0x08, 0xc3, 0x86, 0xf2, 0xba, 0x52, + 0x1a, 0xcc, 0x58, 0x13, 0x1f, 0xf6, 0xc9, 0xc3, 0xbb, 0xde, 0x51, 0x2e, 0xd6, 0xa3, 0x45, 0x03, + 0x6b, 0xfe, 0x02, 0xab, 0x0b, 0xa3, 0x25, 0x34, 0x9a, 0x2f, 0x6e, 0xa4, 0x5d, 0x15, 0xd8, 0x09, + 0xd0, 0x31, 0xcc, 0xa7, 0xce, 0xe5, 0xd7, 0x49, 0x21, 0x9c, 0xff, 0xfc, 0x62, 0x41, 0xa7, 0xb8, + 0x4f, 0xaf, 0x13, 0x82, 0x57, 0xd2, 0x5b, 0xa8, 0xf9, 0x57, 0x19, 0xea, 0xf3, 0xc9, 0x43, 0x1f, + 0x42, 0x35, 0x63, 0xc4, 0x65, 0x6c, 0x2a, 0x76, 0xad, 0xe1, 0x4a, 0xc6, 0xc8, 0x88, 0x4d, 0x73, + 0x01, 0x27, 0x1e, 0x9f, 0x48, 0x91, 0x8a, 0xb5, 0xb0, 0xd1, 0x94, 0x0b, 0x3d, 0x2e, 0x63, 0xb1, + 0x46, 0xe7, 0x50, 0xf7, 0x32, 0x3e, 0x71, 0xa3, 0xf8, 0x82, 0x4a, 0x71, 0xd9, 0x4f, 0x16, 0x80, + 0xd9, 0xf1, 0x58, 0xe4, 0x5b, 0x19, 0x9f, 0x90, 0x98, 0x47, 0x7e, 0x31, 0x57, 0xb5, 0x9c, 0xd7, + 0x89, 0x2f, 0x68, 0x7e, 0x71, 0xcc, 0x3c, 0x76, 0xe9, 0x4e, 0x88, 0x17, 0xe4, 0xbd, 0x5f, 0x16, + 0x95, 0x36, 0x72, 0x5b, 0xaf, 0x30, 0xa1, 0xef, 0xa0, 0xaa, 0xbc, 0x15, 0x71, 0x32, 0xaf, 0x9f, + 0x5e, 0x84, 0xe4, 0xb2, 0x63, 0x9e, 0x5e, 0x63, 0x45, 0xd7, 0x3c, 0x81, 0xff, 0x3d, 0x50, 0x1d, + 0x6a, 0x42, 0x2d, 0x63, 0xf9, 0x99, 0xce, 0x2f, 0xb9, 0x39, 0xce, 0x7d, 0x89, 0xc7, 0xd8, 0x3b, + 0x9a, 0x06, 0xb2, 0x7f, 0x73, 0xdc, 0x7c, 0x05, 0x2b, 0xb7, 0xf7, 0x41, 0x3a, 0x94, 0x2f, 0xc9, + 0xb5, 0xa4, 0xc8, 0x97, 0x68, 0x13, 0x96, 0xaf, 0xbc, 0x69, 0xa6, 0xee, 0xc7, 0x02, 0xbc, 0x2a, + 0x7d, 0xa9, 0x35, 0x9f, 0x43, 0x4d, 0x09, 0x7c, 0x7e, 0x16, 0xda, 0xcd, 0x59, 0x34, 0xf7, 0x61, + 0x6d, 0x51, 0x40, 0xf9, 0x9b, 0x20, 0x25, 0xa4, 0x86, 0x4a, 0xc2, 0x0e, 0x40, 0x4d, 0x8d, 0x45, + 0x67, 0x13, 0x90, 0x98, 0x6b, 0x37, 0x25, 0x3f, 0x67, 0x84, 0x71, 0x31, 0x65, 0xbb, 0xbf, 0x69, + 0xb0, 0x7a, 0xab, 0x5d, 0x4e, 0x82, 0xbe, 0x86, 0x4a, 0x21, 0x3a, 0x41, 0xf6, 0x04, 0xcd, 0xc9, + 0xb4, 0xbc, 0x31, 0x53, 0x5a, 0x34, 0x50, 0x35, 0x46, 0xe1, 0xfc, 0x25, 0x8a, 0x12, 0xd7, 0x0b, + 0x82, 0x94, 0x30, 0x26, 0x9f, 0xa9, 0x7a, 0x94, 0x58, 0x85, 0x61, 0x9f, 0x2c, 0xbc, 0x32, 0x05, + 0x2f, 0xfa, 0x00, 0x10, 0xb6, 0x8f, 0x9c, 0x41, 0xdf, 0x1d, 0xf7, 0x47, 0x43, 0xbb, 0xeb, 0x7c, + 0xe3, 0xd8, 0x87, 0xfa, 0x33, 0x54, 0x85, 0xf2, 0x78, 0x64, 0xe9, 0x1a, 0x02, 0xa8, 0xd8, 0x63, + 0x3c, 0x18, 0xda, 0x7a, 0x09, 0x6d, 0xc0, 0xea, 0x68, 0x30, 0x3e, 0xed, 0xb9, 0xd6, 0x89, 0x8d, + 0x9d, 0xae, 0xa5, 0x97, 0x91, 0x0e, 0x2b, 0xd6, 0xc8, 0xb1, 0xdc, 0xa1, 0x95, 0xa7, 0x76, 0xf5, + 0xa5, 0xfd, 0x1f, 0x60, 0xe3, 0x9e, 0x80, 0xd0, 0x36, 0x6c, 0x61, 0x7b, 0x34, 0x18, 0xe3, 0xae, + 0xed, 0x9e, 0x7e, 0x3f, 0xb4, 0xef, 0xec, 0xb6, 0x02, 0x35, 0xa7, 0x3f, 0x3a, 0xb5, 0xfa, 0x5d, + 0x5b, 0xd7, 0xd0, 0x16, 0xfc, 0xdf, 0xfa, 0x76, 0xe4, 0xda, 0xc7, 0x1d, 0xf7, 0x78, 0x60, 0x1d, + 0xba, 0x1d, 0xeb, 0x38, 0xf7, 0x60, 0xbd, 0xd4, 0xf9, 0x5d, 0x03, 0xc3, 0xa7, 0xb3, 0x07, 0xbb, + 0xd6, 0x69, 0x14, 0x9f, 0x37, 0xcc, 0xef, 0xd7, 0xa1, 0xf6, 0xf6, 0xb5, 0x0c, 0x0a, 0xe9, 0xd4, + 0x8b, 0x43, 0x93, 0xa6, 0x61, 0x2b, 0x24, 0xb1, 0xb8, 0x7d, 0x5b, 0x85, 0xcb, 0x4b, 0x22, 0xb6, + 0xf8, 0x77, 0xf2, 0xd5, 0x0d, 0xfa, 0xb3, 0xd4, 0x3c, 0x2a, 0x08, 0xba, 0x53, 0x9a, 0x05, 0xea, + 0xbd, 0xcc, 0xf7, 0x7a, 0xd3, 0xfe, 0x5b, 0x39, 0xcf, 0x84, 0xf3, 0xec, 0xc6, 0x79, 0xf6, 0xa6, + 0x7d, 0x5e, 0x11, 0x9b, 0xb4, 0xff, 0x0d, 0x00, 0x00, 0xff, 0xff, 0x01, 0x68, 0xd9, 0xde, 0x01, + 0x09, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/uptime_service.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/uptime_service.pb.go new file mode 100644 index 000000000..b2c363070 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/uptime_service.pb.go @@ -0,0 +1,793 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/monitoring/v3/uptime_service.proto + +package monitoring + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + empty "github.com/golang/protobuf/ptypes/empty" + context "golang.org/x/net/context" + _ "google.golang.org/genproto/googleapis/api/annotations" + field_mask "google.golang.org/genproto/protobuf/field_mask" + grpc "google.golang.org/grpc" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// The protocol for the `ListUptimeCheckConfigs` request. +type ListUptimeCheckConfigsRequest struct { + // The project whose uptime check configurations are listed. The format + // is `projects/[PROJECT_ID]`. + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // The maximum number of results to return in a single response. The server + // may further constrain the maximum number of results returned in a single + // page. If the page_size is <=0, the server will decide the number of results + // to be returned. + PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // If this field is not empty then it must contain the `nextPageToken` value + // returned by a previous call to this method. Using this field causes the + // method to return more results from the previous method call. + PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListUptimeCheckConfigsRequest) Reset() { *m = ListUptimeCheckConfigsRequest{} } +func (m *ListUptimeCheckConfigsRequest) String() string { return proto.CompactTextString(m) } +func (*ListUptimeCheckConfigsRequest) ProtoMessage() {} +func (*ListUptimeCheckConfigsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_6222dd2aa0db8eee, []int{0} +} + +func (m *ListUptimeCheckConfigsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListUptimeCheckConfigsRequest.Unmarshal(m, b) +} +func (m *ListUptimeCheckConfigsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListUptimeCheckConfigsRequest.Marshal(b, m, deterministic) +} +func (m *ListUptimeCheckConfigsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListUptimeCheckConfigsRequest.Merge(m, src) +} +func (m *ListUptimeCheckConfigsRequest) XXX_Size() int { + return xxx_messageInfo_ListUptimeCheckConfigsRequest.Size(m) +} +func (m *ListUptimeCheckConfigsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListUptimeCheckConfigsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListUptimeCheckConfigsRequest proto.InternalMessageInfo + +func (m *ListUptimeCheckConfigsRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *ListUptimeCheckConfigsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListUptimeCheckConfigsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// The protocol for the `ListUptimeCheckConfigs` response. +type ListUptimeCheckConfigsResponse struct { + // The returned uptime check configurations. + UptimeCheckConfigs []*UptimeCheckConfig `protobuf:"bytes,1,rep,name=uptime_check_configs,json=uptimeCheckConfigs,proto3" json:"uptime_check_configs,omitempty"` + // This field represents the pagination token to retrieve the next page of + // results. If the value is empty, it means no further results for the + // request. To retrieve the next page of results, the value of the + // next_page_token is passed to the subsequent List method call (in the + // request message's page_token field). + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + // The total number of uptime check configurations for the project, + // irrespective of any pagination. + TotalSize int32 `protobuf:"varint,3,opt,name=total_size,json=totalSize,proto3" json:"total_size,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListUptimeCheckConfigsResponse) Reset() { *m = ListUptimeCheckConfigsResponse{} } +func (m *ListUptimeCheckConfigsResponse) String() string { return proto.CompactTextString(m) } +func (*ListUptimeCheckConfigsResponse) ProtoMessage() {} +func (*ListUptimeCheckConfigsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_6222dd2aa0db8eee, []int{1} +} + +func (m *ListUptimeCheckConfigsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListUptimeCheckConfigsResponse.Unmarshal(m, b) +} +func (m *ListUptimeCheckConfigsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListUptimeCheckConfigsResponse.Marshal(b, m, deterministic) +} +func (m *ListUptimeCheckConfigsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListUptimeCheckConfigsResponse.Merge(m, src) +} +func (m *ListUptimeCheckConfigsResponse) XXX_Size() int { + return xxx_messageInfo_ListUptimeCheckConfigsResponse.Size(m) +} +func (m *ListUptimeCheckConfigsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListUptimeCheckConfigsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListUptimeCheckConfigsResponse proto.InternalMessageInfo + +func (m *ListUptimeCheckConfigsResponse) GetUptimeCheckConfigs() []*UptimeCheckConfig { + if m != nil { + return m.UptimeCheckConfigs + } + return nil +} + +func (m *ListUptimeCheckConfigsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +func (m *ListUptimeCheckConfigsResponse) GetTotalSize() int32 { + if m != nil { + return m.TotalSize + } + return 0 +} + +// The protocol for the `GetUptimeCheckConfig` request. +type GetUptimeCheckConfigRequest struct { + // The uptime check configuration to retrieve. The format + // is `projects/[PROJECT_ID]/uptimeCheckConfigs/[UPTIME_CHECK_ID]`. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetUptimeCheckConfigRequest) Reset() { *m = GetUptimeCheckConfigRequest{} } +func (m *GetUptimeCheckConfigRequest) String() string { return proto.CompactTextString(m) } +func (*GetUptimeCheckConfigRequest) ProtoMessage() {} +func (*GetUptimeCheckConfigRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_6222dd2aa0db8eee, []int{2} +} + +func (m *GetUptimeCheckConfigRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetUptimeCheckConfigRequest.Unmarshal(m, b) +} +func (m *GetUptimeCheckConfigRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetUptimeCheckConfigRequest.Marshal(b, m, deterministic) +} +func (m *GetUptimeCheckConfigRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetUptimeCheckConfigRequest.Merge(m, src) +} +func (m *GetUptimeCheckConfigRequest) XXX_Size() int { + return xxx_messageInfo_GetUptimeCheckConfigRequest.Size(m) +} +func (m *GetUptimeCheckConfigRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetUptimeCheckConfigRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetUptimeCheckConfigRequest proto.InternalMessageInfo + +func (m *GetUptimeCheckConfigRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// The protocol for the `CreateUptimeCheckConfig` request. +type CreateUptimeCheckConfigRequest struct { + // The project in which to create the uptime check. The format + // is `projects/[PROJECT_ID]`. + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // The new uptime check configuration. + UptimeCheckConfig *UptimeCheckConfig `protobuf:"bytes,2,opt,name=uptime_check_config,json=uptimeCheckConfig,proto3" json:"uptime_check_config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateUptimeCheckConfigRequest) Reset() { *m = CreateUptimeCheckConfigRequest{} } +func (m *CreateUptimeCheckConfigRequest) String() string { return proto.CompactTextString(m) } +func (*CreateUptimeCheckConfigRequest) ProtoMessage() {} +func (*CreateUptimeCheckConfigRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_6222dd2aa0db8eee, []int{3} +} + +func (m *CreateUptimeCheckConfigRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateUptimeCheckConfigRequest.Unmarshal(m, b) +} +func (m *CreateUptimeCheckConfigRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateUptimeCheckConfigRequest.Marshal(b, m, deterministic) +} +func (m *CreateUptimeCheckConfigRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateUptimeCheckConfigRequest.Merge(m, src) +} +func (m *CreateUptimeCheckConfigRequest) XXX_Size() int { + return xxx_messageInfo_CreateUptimeCheckConfigRequest.Size(m) +} +func (m *CreateUptimeCheckConfigRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateUptimeCheckConfigRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateUptimeCheckConfigRequest proto.InternalMessageInfo + +func (m *CreateUptimeCheckConfigRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *CreateUptimeCheckConfigRequest) GetUptimeCheckConfig() *UptimeCheckConfig { + if m != nil { + return m.UptimeCheckConfig + } + return nil +} + +// The protocol for the `UpdateUptimeCheckConfig` request. +type UpdateUptimeCheckConfigRequest struct { + // Optional. If present, only the listed fields in the current uptime check + // configuration are updated with values from the new configuration. If this + // field is empty, then the current configuration is completely replaced with + // the new configuration. + UpdateMask *field_mask.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` + // Required. If an `"updateMask"` has been specified, this field gives + // the values for the set of fields mentioned in the `"updateMask"`. If an + // `"updateMask"` has not been given, this uptime check configuration replaces + // the current configuration. If a field is mentioned in `"updateMask"` but + // the corresonding field is omitted in this partial uptime check + // configuration, it has the effect of deleting/clearing the field from the + // configuration on the server. + // + // The following fields can be updated: `display_name`, + // `http_check`, `tcp_check`, `timeout`, `content_matchers`, and + // `selected_regions`. + UptimeCheckConfig *UptimeCheckConfig `protobuf:"bytes,3,opt,name=uptime_check_config,json=uptimeCheckConfig,proto3" json:"uptime_check_config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateUptimeCheckConfigRequest) Reset() { *m = UpdateUptimeCheckConfigRequest{} } +func (m *UpdateUptimeCheckConfigRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateUptimeCheckConfigRequest) ProtoMessage() {} +func (*UpdateUptimeCheckConfigRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_6222dd2aa0db8eee, []int{4} +} + +func (m *UpdateUptimeCheckConfigRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateUptimeCheckConfigRequest.Unmarshal(m, b) +} +func (m *UpdateUptimeCheckConfigRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateUptimeCheckConfigRequest.Marshal(b, m, deterministic) +} +func (m *UpdateUptimeCheckConfigRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateUptimeCheckConfigRequest.Merge(m, src) +} +func (m *UpdateUptimeCheckConfigRequest) XXX_Size() int { + return xxx_messageInfo_UpdateUptimeCheckConfigRequest.Size(m) +} +func (m *UpdateUptimeCheckConfigRequest) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateUptimeCheckConfigRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateUptimeCheckConfigRequest proto.InternalMessageInfo + +func (m *UpdateUptimeCheckConfigRequest) GetUpdateMask() *field_mask.FieldMask { + if m != nil { + return m.UpdateMask + } + return nil +} + +func (m *UpdateUptimeCheckConfigRequest) GetUptimeCheckConfig() *UptimeCheckConfig { + if m != nil { + return m.UptimeCheckConfig + } + return nil +} + +// The protocol for the `DeleteUptimeCheckConfig` request. +type DeleteUptimeCheckConfigRequest struct { + // The uptime check configuration to delete. The format + // is `projects/[PROJECT_ID]/uptimeCheckConfigs/[UPTIME_CHECK_ID]`. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteUptimeCheckConfigRequest) Reset() { *m = DeleteUptimeCheckConfigRequest{} } +func (m *DeleteUptimeCheckConfigRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteUptimeCheckConfigRequest) ProtoMessage() {} +func (*DeleteUptimeCheckConfigRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_6222dd2aa0db8eee, []int{5} +} + +func (m *DeleteUptimeCheckConfigRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteUptimeCheckConfigRequest.Unmarshal(m, b) +} +func (m *DeleteUptimeCheckConfigRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteUptimeCheckConfigRequest.Marshal(b, m, deterministic) +} +func (m *DeleteUptimeCheckConfigRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteUptimeCheckConfigRequest.Merge(m, src) +} +func (m *DeleteUptimeCheckConfigRequest) XXX_Size() int { + return xxx_messageInfo_DeleteUptimeCheckConfigRequest.Size(m) +} +func (m *DeleteUptimeCheckConfigRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteUptimeCheckConfigRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteUptimeCheckConfigRequest proto.InternalMessageInfo + +func (m *DeleteUptimeCheckConfigRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// The protocol for the `ListUptimeCheckIps` request. +type ListUptimeCheckIpsRequest struct { + // The maximum number of results to return in a single response. The server + // may further constrain the maximum number of results returned in a single + // page. If the page_size is <=0, the server will decide the number of results + // to be returned. + // NOTE: this field is not yet implemented + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // If this field is not empty then it must contain the `nextPageToken` value + // returned by a previous call to this method. Using this field causes the + // method to return more results from the previous method call. + // NOTE: this field is not yet implemented + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListUptimeCheckIpsRequest) Reset() { *m = ListUptimeCheckIpsRequest{} } +func (m *ListUptimeCheckIpsRequest) String() string { return proto.CompactTextString(m) } +func (*ListUptimeCheckIpsRequest) ProtoMessage() {} +func (*ListUptimeCheckIpsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_6222dd2aa0db8eee, []int{6} +} + +func (m *ListUptimeCheckIpsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListUptimeCheckIpsRequest.Unmarshal(m, b) +} +func (m *ListUptimeCheckIpsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListUptimeCheckIpsRequest.Marshal(b, m, deterministic) +} +func (m *ListUptimeCheckIpsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListUptimeCheckIpsRequest.Merge(m, src) +} +func (m *ListUptimeCheckIpsRequest) XXX_Size() int { + return xxx_messageInfo_ListUptimeCheckIpsRequest.Size(m) +} +func (m *ListUptimeCheckIpsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListUptimeCheckIpsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListUptimeCheckIpsRequest proto.InternalMessageInfo + +func (m *ListUptimeCheckIpsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListUptimeCheckIpsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// The protocol for the `ListUptimeCheckIps` response. +type ListUptimeCheckIpsResponse struct { + // The returned list of IP addresses (including region and location) that the + // checkers run from. + UptimeCheckIps []*UptimeCheckIp `protobuf:"bytes,1,rep,name=uptime_check_ips,json=uptimeCheckIps,proto3" json:"uptime_check_ips,omitempty"` + // This field represents the pagination token to retrieve the next page of + // results. If the value is empty, it means no further results for the + // request. To retrieve the next page of results, the value of the + // next_page_token is passed to the subsequent List method call (in the + // request message's page_token field). + // NOTE: this field is not yet implemented + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListUptimeCheckIpsResponse) Reset() { *m = ListUptimeCheckIpsResponse{} } +func (m *ListUptimeCheckIpsResponse) String() string { return proto.CompactTextString(m) } +func (*ListUptimeCheckIpsResponse) ProtoMessage() {} +func (*ListUptimeCheckIpsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_6222dd2aa0db8eee, []int{7} +} + +func (m *ListUptimeCheckIpsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListUptimeCheckIpsResponse.Unmarshal(m, b) +} +func (m *ListUptimeCheckIpsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListUptimeCheckIpsResponse.Marshal(b, m, deterministic) +} +func (m *ListUptimeCheckIpsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListUptimeCheckIpsResponse.Merge(m, src) +} +func (m *ListUptimeCheckIpsResponse) XXX_Size() int { + return xxx_messageInfo_ListUptimeCheckIpsResponse.Size(m) +} +func (m *ListUptimeCheckIpsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListUptimeCheckIpsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListUptimeCheckIpsResponse proto.InternalMessageInfo + +func (m *ListUptimeCheckIpsResponse) GetUptimeCheckIps() []*UptimeCheckIp { + if m != nil { + return m.UptimeCheckIps + } + return nil +} + +func (m *ListUptimeCheckIpsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +func init() { + proto.RegisterType((*ListUptimeCheckConfigsRequest)(nil), "google.monitoring.v3.ListUptimeCheckConfigsRequest") + proto.RegisterType((*ListUptimeCheckConfigsResponse)(nil), "google.monitoring.v3.ListUptimeCheckConfigsResponse") + proto.RegisterType((*GetUptimeCheckConfigRequest)(nil), "google.monitoring.v3.GetUptimeCheckConfigRequest") + proto.RegisterType((*CreateUptimeCheckConfigRequest)(nil), "google.monitoring.v3.CreateUptimeCheckConfigRequest") + proto.RegisterType((*UpdateUptimeCheckConfigRequest)(nil), "google.monitoring.v3.UpdateUptimeCheckConfigRequest") + proto.RegisterType((*DeleteUptimeCheckConfigRequest)(nil), "google.monitoring.v3.DeleteUptimeCheckConfigRequest") + proto.RegisterType((*ListUptimeCheckIpsRequest)(nil), "google.monitoring.v3.ListUptimeCheckIpsRequest") + proto.RegisterType((*ListUptimeCheckIpsResponse)(nil), "google.monitoring.v3.ListUptimeCheckIpsResponse") +} + +func init() { + proto.RegisterFile("google/monitoring/v3/uptime_service.proto", fileDescriptor_6222dd2aa0db8eee) +} + +var fileDescriptor_6222dd2aa0db8eee = []byte{ + // 747 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xcd, 0x6e, 0xd3, 0x4a, + 0x14, 0xd6, 0x24, 0xbd, 0x55, 0x7b, 0xaa, 0x7b, 0x2f, 0x0c, 0x51, 0x1b, 0x5c, 0x1a, 0x05, 0x23, + 0x41, 0x89, 0x90, 0x4d, 0x93, 0xae, 0xa8, 0xa8, 0x44, 0x03, 0x54, 0x95, 0xa8, 0x54, 0xa5, 0xb4, + 0x15, 0x50, 0x29, 0x72, 0xd3, 0xa9, 0x31, 0x49, 0x3c, 0xc6, 0x33, 0xae, 0xa0, 0xa8, 0x1b, 0xde, + 0x00, 0x75, 0xc9, 0x9e, 0x45, 0x1f, 0x00, 0xd6, 0xb0, 0x41, 0x62, 0x8b, 0x78, 0x03, 0x1e, 0x04, + 0x79, 0x3c, 0x26, 0x7f, 0x63, 0xe3, 0x88, 0x5d, 0x3c, 0xe7, 0xcc, 0x39, 0xdf, 0xf9, 0xfc, 0x9d, + 0x2f, 0x86, 0x9b, 0x36, 0xa5, 0x76, 0x87, 0x98, 0x5d, 0xea, 0x3a, 0x9c, 0xfa, 0x8e, 0x6b, 0x9b, + 0xc7, 0x35, 0x33, 0xf0, 0xb8, 0xd3, 0x25, 0x4d, 0x46, 0xfc, 0x63, 0xa7, 0x45, 0x0c, 0xcf, 0xa7, + 0x9c, 0xe2, 0x42, 0x94, 0x6a, 0xf4, 0x52, 0x8d, 0xe3, 0x9a, 0x76, 0x45, 0x16, 0xb0, 0x3c, 0xc7, + 0xb4, 0x5c, 0x97, 0x72, 0x8b, 0x3b, 0xd4, 0x65, 0xd1, 0x1d, 0xed, 0x6a, 0x4a, 0x79, 0x99, 0x32, + 0x2f, 0x53, 0xc4, 0xd3, 0x41, 0x70, 0x64, 0x92, 0xae, 0xc7, 0x5f, 0xcb, 0x60, 0x79, 0x38, 0x78, + 0xe4, 0x90, 0xce, 0x61, 0xb3, 0x6b, 0xb1, 0x76, 0x94, 0xa1, 0x33, 0x58, 0x78, 0xe4, 0x30, 0xbe, + 0x23, 0x4a, 0xd6, 0x9f, 0x93, 0x56, 0xbb, 0x4e, 0xdd, 0x23, 0xc7, 0x66, 0x0d, 0xf2, 0x32, 0x20, + 0x8c, 0xe3, 0x59, 0x98, 0xf4, 0x2c, 0x9f, 0xb8, 0xbc, 0x88, 0xca, 0x68, 0x71, 0xba, 0x21, 0x9f, + 0xf0, 0x3c, 0x4c, 0x7b, 0x96, 0x4d, 0x9a, 0xcc, 0x39, 0x21, 0xc5, 0x7c, 0x19, 0x2d, 0xfe, 0xd3, + 0x98, 0x0a, 0x0f, 0xb6, 0x9d, 0x13, 0x82, 0x17, 0x00, 0x44, 0x90, 0xd3, 0x36, 0x71, 0x8b, 0x13, + 0xe2, 0xa2, 0x48, 0x7f, 0x1c, 0x1e, 0xe8, 0x5f, 0x10, 0x94, 0x92, 0xba, 0x32, 0x8f, 0xba, 0x8c, + 0xe0, 0x27, 0x50, 0x90, 0x2c, 0xb6, 0xc2, 0x70, 0xb3, 0x15, 0xc5, 0x8b, 0xa8, 0x9c, 0x5f, 0x9c, + 0xa9, 0xde, 0x30, 0x54, 0x64, 0x1a, 0x23, 0xf5, 0x1a, 0x38, 0x18, 0x69, 0x81, 0xaf, 0xc3, 0xff, + 0x2e, 0x79, 0xc5, 0x9b, 0x7d, 0x08, 0x73, 0x02, 0xe1, 0xbf, 0xe1, 0xf1, 0x56, 0x8c, 0x32, 0x1c, + 0x82, 0x53, 0x6e, 0x75, 0xfa, 0x47, 0x9c, 0x16, 0x27, 0xe1, 0x8c, 0xfa, 0x12, 0xcc, 0xaf, 0x93, + 0xd1, 0x11, 0x62, 0xde, 0x30, 0x4c, 0xb8, 0x56, 0x97, 0x48, 0xd6, 0xc4, 0x6f, 0xfd, 0x1d, 0x82, + 0x52, 0xdd, 0x27, 0x16, 0x27, 0x89, 0xd7, 0x92, 0xe8, 0xde, 0x83, 0x4b, 0x0a, 0x3e, 0x04, 0xf0, + 0x31, 0xe8, 0xb8, 0x38, 0x42, 0x87, 0xfe, 0x11, 0x41, 0x69, 0xc7, 0x3b, 0x4c, 0xc3, 0xb4, 0x02, + 0x33, 0x81, 0xc8, 0x10, 0xc2, 0x91, 0x3d, 0xb5, 0xb8, 0x67, 0xac, 0x2d, 0xe3, 0x61, 0xa8, 0xad, + 0x4d, 0x8b, 0xb5, 0x1b, 0x10, 0xa5, 0x87, 0xbf, 0x93, 0x80, 0xe7, 0xff, 0x1a, 0xf8, 0x32, 0x94, + 0xee, 0x93, 0x0e, 0x49, 0xc1, 0xad, 0x7a, 0x05, 0x7b, 0x70, 0x79, 0x48, 0x79, 0x1b, 0xde, 0x6f, + 0xad, 0x0f, 0x68, 0x3a, 0x97, 0xaa, 0xe9, 0xfc, 0xb0, 0xa6, 0xcf, 0x10, 0x68, 0xaa, 0xca, 0x52, + 0xcf, 0x9b, 0x70, 0x61, 0x80, 0x06, 0xc7, 0x8b, 0xb5, 0x7c, 0xed, 0x8f, 0x1c, 0x6c, 0x78, 0x8d, + 0xff, 0x82, 0x81, 0xb2, 0x59, 0x35, 0x5c, 0xfd, 0x3a, 0x05, 0xb8, 0xaf, 0xd2, 0x76, 0xe4, 0x48, + 0xf8, 0x13, 0x82, 0x59, 0xf5, 0x02, 0xe2, 0x9a, 0x1a, 0x4e, 0xaa, 0x49, 0x68, 0xcb, 0xe3, 0x5d, + 0x8a, 0x38, 0xd1, 0xab, 0x6f, 0xbf, 0xff, 0x3c, 0xcb, 0xdd, 0xc2, 0x95, 0xd0, 0xd4, 0xde, 0x44, + 0x42, 0xbf, 0xeb, 0xf9, 0xf4, 0x05, 0x69, 0x71, 0x66, 0x56, 0x4e, 0x4d, 0xc5, 0xf2, 0x7e, 0x40, + 0x50, 0x50, 0xad, 0x1d, 0x5e, 0x52, 0x43, 0x48, 0x59, 0x51, 0x2d, 0xab, 0xfa, 0x86, 0x80, 0x86, + 0x3a, 0xea, 0x83, 0xa9, 0x40, 0x69, 0x56, 0x4e, 0xf1, 0x67, 0x04, 0x73, 0x09, 0xbb, 0x8e, 0x13, + 0xe8, 0x4a, 0xb7, 0x86, 0xec, 0x70, 0xd7, 0x05, 0xdc, 0x7b, 0xfa, 0x18, 0xbc, 0xde, 0x51, 0x2d, + 0x29, 0xfe, 0x81, 0x60, 0x2e, 0xc1, 0x1b, 0x92, 0x66, 0x48, 0xb7, 0x92, 0xec, 0x33, 0x3c, 0x13, + 0x33, 0xec, 0x54, 0x57, 0xc5, 0x0c, 0x0a, 0x70, 0x46, 0xa6, 0xd7, 0xa0, 0x9e, 0xeb, 0x3d, 0x82, + 0xb9, 0x04, 0xef, 0x48, 0x9a, 0x2b, 0xdd, 0x6a, 0xb4, 0xd9, 0x11, 0x37, 0x7c, 0x10, 0xfe, 0x0d, + 0xc7, 0xca, 0xa9, 0x8c, 0xa3, 0x9c, 0x33, 0x04, 0x78, 0xd4, 0x49, 0xb0, 0x99, 0x69, 0xc7, 0x7a, + 0x6e, 0xa6, 0xdd, 0xce, 0x7e, 0x41, 0x2e, 0xa4, 0x26, 0xd0, 0x16, 0x30, 0xee, 0x7d, 0x65, 0xc4, + 0x39, 0x6b, 0xe7, 0x08, 0x8a, 0x2d, 0xda, 0x55, 0xd6, 0x5c, 0x93, 0x1e, 0x23, 0xed, 0x65, 0x2b, + 0xe4, 0x60, 0x0b, 0x3d, 0x5d, 0x95, 0xb9, 0x36, 0xed, 0x58, 0xae, 0x6d, 0x50, 0xdf, 0x36, 0x6d, + 0xe2, 0x0a, 0x86, 0xcc, 0x28, 0x64, 0x79, 0x0e, 0x1b, 0xfc, 0xb8, 0x59, 0xe9, 0x3d, 0x9d, 0xe7, + 0xb4, 0xf5, 0xa8, 0x40, 0xbd, 0x43, 0x83, 0x43, 0x63, 0xb3, 0xd7, 0x72, 0xb7, 0xf6, 0x2d, 0x0e, + 0xee, 0x8b, 0xe0, 0x7e, 0x2f, 0xb8, 0xbf, 0x5b, 0x3b, 0x98, 0x14, 0x4d, 0x6a, 0xbf, 0x02, 0x00, + 0x00, 0xff, 0xff, 0x27, 0xb8, 0x65, 0x92, 0x9f, 0x09, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// UptimeCheckServiceClient is the client API for UptimeCheckService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type UptimeCheckServiceClient interface { + // Lists the existing valid uptime check configurations for the project, + // leaving out any invalid configurations. + ListUptimeCheckConfigs(ctx context.Context, in *ListUptimeCheckConfigsRequest, opts ...grpc.CallOption) (*ListUptimeCheckConfigsResponse, error) + // Gets a single uptime check configuration. + GetUptimeCheckConfig(ctx context.Context, in *GetUptimeCheckConfigRequest, opts ...grpc.CallOption) (*UptimeCheckConfig, error) + // Creates a new uptime check configuration. + CreateUptimeCheckConfig(ctx context.Context, in *CreateUptimeCheckConfigRequest, opts ...grpc.CallOption) (*UptimeCheckConfig, error) + // Updates an uptime check configuration. You can either replace the entire + // configuration with a new one or replace only certain fields in the current + // configuration by specifying the fields to be updated via `"updateMask"`. + // Returns the updated configuration. + UpdateUptimeCheckConfig(ctx context.Context, in *UpdateUptimeCheckConfigRequest, opts ...grpc.CallOption) (*UptimeCheckConfig, error) + // Deletes an uptime check configuration. Note that this method will fail + // if the uptime check configuration is referenced by an alert policy or + // other dependent configs that would be rendered invalid by the deletion. + DeleteUptimeCheckConfig(ctx context.Context, in *DeleteUptimeCheckConfigRequest, opts ...grpc.CallOption) (*empty.Empty, error) + // Returns the list of IPs that checkers run from + ListUptimeCheckIps(ctx context.Context, in *ListUptimeCheckIpsRequest, opts ...grpc.CallOption) (*ListUptimeCheckIpsResponse, error) +} + +type uptimeCheckServiceClient struct { + cc *grpc.ClientConn +} + +func NewUptimeCheckServiceClient(cc *grpc.ClientConn) UptimeCheckServiceClient { + return &uptimeCheckServiceClient{cc} +} + +func (c *uptimeCheckServiceClient) ListUptimeCheckConfigs(ctx context.Context, in *ListUptimeCheckConfigsRequest, opts ...grpc.CallOption) (*ListUptimeCheckConfigsResponse, error) { + out := new(ListUptimeCheckConfigsResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.UptimeCheckService/ListUptimeCheckConfigs", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *uptimeCheckServiceClient) GetUptimeCheckConfig(ctx context.Context, in *GetUptimeCheckConfigRequest, opts ...grpc.CallOption) (*UptimeCheckConfig, error) { + out := new(UptimeCheckConfig) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.UptimeCheckService/GetUptimeCheckConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *uptimeCheckServiceClient) CreateUptimeCheckConfig(ctx context.Context, in *CreateUptimeCheckConfigRequest, opts ...grpc.CallOption) (*UptimeCheckConfig, error) { + out := new(UptimeCheckConfig) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.UptimeCheckService/CreateUptimeCheckConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *uptimeCheckServiceClient) UpdateUptimeCheckConfig(ctx context.Context, in *UpdateUptimeCheckConfigRequest, opts ...grpc.CallOption) (*UptimeCheckConfig, error) { + out := new(UptimeCheckConfig) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.UptimeCheckService/UpdateUptimeCheckConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *uptimeCheckServiceClient) DeleteUptimeCheckConfig(ctx context.Context, in *DeleteUptimeCheckConfigRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.UptimeCheckService/DeleteUptimeCheckConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *uptimeCheckServiceClient) ListUptimeCheckIps(ctx context.Context, in *ListUptimeCheckIpsRequest, opts ...grpc.CallOption) (*ListUptimeCheckIpsResponse, error) { + out := new(ListUptimeCheckIpsResponse) + err := c.cc.Invoke(ctx, "/google.monitoring.v3.UptimeCheckService/ListUptimeCheckIps", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// UptimeCheckServiceServer is the server API for UptimeCheckService service. +type UptimeCheckServiceServer interface { + // Lists the existing valid uptime check configurations for the project, + // leaving out any invalid configurations. + ListUptimeCheckConfigs(context.Context, *ListUptimeCheckConfigsRequest) (*ListUptimeCheckConfigsResponse, error) + // Gets a single uptime check configuration. + GetUptimeCheckConfig(context.Context, *GetUptimeCheckConfigRequest) (*UptimeCheckConfig, error) + // Creates a new uptime check configuration. + CreateUptimeCheckConfig(context.Context, *CreateUptimeCheckConfigRequest) (*UptimeCheckConfig, error) + // Updates an uptime check configuration. You can either replace the entire + // configuration with a new one or replace only certain fields in the current + // configuration by specifying the fields to be updated via `"updateMask"`. + // Returns the updated configuration. + UpdateUptimeCheckConfig(context.Context, *UpdateUptimeCheckConfigRequest) (*UptimeCheckConfig, error) + // Deletes an uptime check configuration. Note that this method will fail + // if the uptime check configuration is referenced by an alert policy or + // other dependent configs that would be rendered invalid by the deletion. + DeleteUptimeCheckConfig(context.Context, *DeleteUptimeCheckConfigRequest) (*empty.Empty, error) + // Returns the list of IPs that checkers run from + ListUptimeCheckIps(context.Context, *ListUptimeCheckIpsRequest) (*ListUptimeCheckIpsResponse, error) +} + +func RegisterUptimeCheckServiceServer(s *grpc.Server, srv UptimeCheckServiceServer) { + s.RegisterService(&_UptimeCheckService_serviceDesc, srv) +} + +func _UptimeCheckService_ListUptimeCheckConfigs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListUptimeCheckConfigsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UptimeCheckServiceServer).ListUptimeCheckConfigs(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.UptimeCheckService/ListUptimeCheckConfigs", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UptimeCheckServiceServer).ListUptimeCheckConfigs(ctx, req.(*ListUptimeCheckConfigsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _UptimeCheckService_GetUptimeCheckConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetUptimeCheckConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UptimeCheckServiceServer).GetUptimeCheckConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.UptimeCheckService/GetUptimeCheckConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UptimeCheckServiceServer).GetUptimeCheckConfig(ctx, req.(*GetUptimeCheckConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _UptimeCheckService_CreateUptimeCheckConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateUptimeCheckConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UptimeCheckServiceServer).CreateUptimeCheckConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.UptimeCheckService/CreateUptimeCheckConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UptimeCheckServiceServer).CreateUptimeCheckConfig(ctx, req.(*CreateUptimeCheckConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _UptimeCheckService_UpdateUptimeCheckConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateUptimeCheckConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UptimeCheckServiceServer).UpdateUptimeCheckConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.UptimeCheckService/UpdateUptimeCheckConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UptimeCheckServiceServer).UpdateUptimeCheckConfig(ctx, req.(*UpdateUptimeCheckConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _UptimeCheckService_DeleteUptimeCheckConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteUptimeCheckConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UptimeCheckServiceServer).DeleteUptimeCheckConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.UptimeCheckService/DeleteUptimeCheckConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UptimeCheckServiceServer).DeleteUptimeCheckConfig(ctx, req.(*DeleteUptimeCheckConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _UptimeCheckService_ListUptimeCheckIps_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListUptimeCheckIpsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UptimeCheckServiceServer).ListUptimeCheckIps(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.UptimeCheckService/ListUptimeCheckIps", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UptimeCheckServiceServer).ListUptimeCheckIps(ctx, req.(*ListUptimeCheckIpsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _UptimeCheckService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.monitoring.v3.UptimeCheckService", + HandlerType: (*UptimeCheckServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListUptimeCheckConfigs", + Handler: _UptimeCheckService_ListUptimeCheckConfigs_Handler, + }, + { + MethodName: "GetUptimeCheckConfig", + Handler: _UptimeCheckService_GetUptimeCheckConfig_Handler, + }, + { + MethodName: "CreateUptimeCheckConfig", + Handler: _UptimeCheckService_CreateUptimeCheckConfig_Handler, + }, + { + MethodName: "UpdateUptimeCheckConfig", + Handler: _UptimeCheckService_UpdateUptimeCheckConfig_Handler, + }, + { + MethodName: "DeleteUptimeCheckConfig", + Handler: _UptimeCheckService_DeleteUptimeCheckConfig_Handler, + }, + { + MethodName: "ListUptimeCheckIps", + Handler: _UptimeCheckService_ListUptimeCheckIps_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/monitoring/v3/uptime_service.proto", +} diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go new file mode 100644 index 000000000..d13bcbaf4 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go @@ -0,0 +1,159 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/rpc/status.proto + +package status + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + any "github.com/golang/protobuf/ptypes/any" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// The `Status` type defines a logical error model that is suitable for different +// programming environments, including REST APIs and RPC APIs. It is used by +// [gRPC](https://github.com/grpc). The error model is designed to be: +// +// - Simple to use and understand for most users +// - Flexible enough to meet unexpected needs +// +// # Overview +// +// The `Status` message contains three pieces of data: error code, error message, +// and error details. The error code should be an enum value of +// [google.rpc.Code][google.rpc.Code], but it may accept additional error codes if needed. The +// error message should be a developer-facing English message that helps +// developers *understand* and *resolve* the error. If a localized user-facing +// error message is needed, put the localized message in the error details or +// localize it in the client. The optional error details may contain arbitrary +// information about the error. There is a predefined set of error detail types +// in the package `google.rpc` that can be used for common error conditions. +// +// # Language mapping +// +// The `Status` message is the logical representation of the error model, but it +// is not necessarily the actual wire format. When the `Status` message is +// exposed in different client libraries and different wire protocols, it can be +// mapped differently. For example, it will likely be mapped to some exceptions +// in Java, but more likely mapped to some error codes in C. +// +// # Other uses +// +// The error model and the `Status` message can be used in a variety of +// environments, either with or without APIs, to provide a +// consistent developer experience across different environments. +// +// Example uses of this error model include: +// +// - Partial errors. If a service needs to return partial errors to the client, +// it may embed the `Status` in the normal response to indicate the partial +// errors. +// +// - Workflow errors. A typical workflow has multiple steps. Each step may +// have a `Status` message for error reporting. +// +// - Batch operations. If a client uses batch request and batch response, the +// `Status` message should be used directly inside batch response, one for +// each error sub-response. +// +// - Asynchronous operations. If an API call embeds asynchronous operation +// results in its response, the status of those operations should be +// represented directly using the `Status` message. +// +// - Logging. If some API errors are stored in logs, the message `Status` could +// be used directly after any stripping needed for security/privacy reasons. +type Status struct { + // The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]. + Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + // A developer-facing error message, which should be in English. Any + // user-facing error message should be localized and sent in the + // [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client. + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + // A list of messages that carry the error details. There is a common set of + // message types for APIs to use. + Details []*any.Any `protobuf:"bytes,3,rep,name=details,proto3" json:"details,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Status) Reset() { *m = Status{} } +func (m *Status) String() string { return proto.CompactTextString(m) } +func (*Status) ProtoMessage() {} +func (*Status) Descriptor() ([]byte, []int) { + return fileDescriptor_24d244abaf643bfe, []int{0} +} + +func (m *Status) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Status.Unmarshal(m, b) +} +func (m *Status) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Status.Marshal(b, m, deterministic) +} +func (m *Status) XXX_Merge(src proto.Message) { + xxx_messageInfo_Status.Merge(m, src) +} +func (m *Status) XXX_Size() int { + return xxx_messageInfo_Status.Size(m) +} +func (m *Status) XXX_DiscardUnknown() { + xxx_messageInfo_Status.DiscardUnknown(m) +} + +var xxx_messageInfo_Status proto.InternalMessageInfo + +func (m *Status) GetCode() int32 { + if m != nil { + return m.Code + } + return 0 +} + +func (m *Status) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +func (m *Status) GetDetails() []*any.Any { + if m != nil { + return m.Details + } + return nil +} + +func init() { + proto.RegisterType((*Status)(nil), "google.rpc.Status") +} + +func init() { proto.RegisterFile("google/rpc/status.proto", fileDescriptor_24d244abaf643bfe) } + +var fileDescriptor_24d244abaf643bfe = []byte{ + // 209 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4f, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x2a, 0x48, 0xd6, 0x2f, 0x2e, 0x49, 0x2c, 0x29, 0x2d, 0xd6, 0x2b, 0x28, + 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x82, 0x48, 0xe8, 0x15, 0x15, 0x24, 0x4b, 0x49, 0x42, 0x15, 0x81, + 0x65, 0x92, 0x4a, 0xd3, 0xf4, 0x13, 0xf3, 0x2a, 0x21, 0xca, 0x94, 0xd2, 0xb8, 0xd8, 0x82, 0xc1, + 0xda, 0x84, 0x84, 0xb8, 0x58, 0x92, 0xf3, 0x53, 0x52, 0x25, 0x18, 0x15, 0x18, 0x35, 0x58, 0x83, + 0xc0, 0x6c, 0x21, 0x09, 0x2e, 0xf6, 0xdc, 0xd4, 0xe2, 0xe2, 0xc4, 0xf4, 0x54, 0x09, 0x26, 0x05, + 0x46, 0x0d, 0xce, 0x20, 0x18, 0x57, 0x48, 0x8f, 0x8b, 0x3d, 0x25, 0xb5, 0x24, 0x31, 0x33, 0xa7, + 0x58, 0x82, 0x59, 0x81, 0x59, 0x83, 0xdb, 0x48, 0x44, 0x0f, 0x6a, 0x21, 0xcc, 0x12, 0x3d, 0xc7, + 0xbc, 0xca, 0x20, 0x98, 0x22, 0xa7, 0x38, 0x2e, 0xbe, 0xe4, 0xfc, 0x5c, 0x3d, 0x84, 0xa3, 0x9c, + 0xb8, 0x21, 0xf6, 0x06, 0x80, 0x94, 0x07, 0x30, 0x46, 0x99, 0x43, 0xa5, 0xd2, 0xf3, 0x73, 0x12, + 0xf3, 0xd2, 0xf5, 0xf2, 0x8b, 0xd2, 0xf5, 0xd3, 0x53, 0xf3, 0xc0, 0x86, 0xe9, 0x43, 0xa4, 0x12, + 0x0b, 0x32, 0x8b, 0x91, 0xfc, 0x69, 0x0d, 0xa1, 0x16, 0x31, 0x31, 0x07, 0x05, 0x38, 0x27, 0xb1, + 0x81, 0x55, 0x1a, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0xa4, 0x53, 0xf0, 0x7c, 0x10, 0x01, 0x00, + 0x00, +} diff --git a/vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.pb.go b/vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.pb.go new file mode 100644 index 000000000..3acaf97bd --- /dev/null +++ b/vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.pb.go @@ -0,0 +1,281 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/field_mask.proto + +package field_mask + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// `FieldMask` represents a set of symbolic field paths, for example: +// +// paths: "f.a" +// paths: "f.b.d" +// +// Here `f` represents a field in some root message, `a` and `b` +// fields in the message found in `f`, and `d` a field found in the +// message in `f.b`. +// +// Field masks are used to specify a subset of fields that should be +// returned by a get operation or modified by an update operation. +// Field masks also have a custom JSON encoding (see below). +// +// # Field Masks in Projections +// +// When used in the context of a projection, a response message or +// sub-message is filtered by the API to only contain those fields as +// specified in the mask. For example, if the mask in the previous +// example is applied to a response message as follows: +// +// f { +// a : 22 +// b { +// d : 1 +// x : 2 +// } +// y : 13 +// } +// z: 8 +// +// The result will not contain specific values for fields x,y and z +// (their value will be set to the default, and omitted in proto text +// output): +// +// +// f { +// a : 22 +// b { +// d : 1 +// } +// } +// +// A repeated field is not allowed except at the last position of a +// paths string. +// +// If a FieldMask object is not present in a get operation, the +// operation applies to all fields (as if a FieldMask of all fields +// had been specified). +// +// Note that a field mask does not necessarily apply to the +// top-level response message. In case of a REST get operation, the +// field mask applies directly to the response, but in case of a REST +// list operation, the mask instead applies to each individual message +// in the returned resource list. In case of a REST custom method, +// other definitions may be used. Where the mask applies will be +// clearly documented together with its declaration in the API. In +// any case, the effect on the returned resource/resources is required +// behavior for APIs. +// +// # Field Masks in Update Operations +// +// A field mask in update operations specifies which fields of the +// targeted resource are going to be updated. The API is required +// to only change the values of the fields as specified in the mask +// and leave the others untouched. If a resource is passed in to +// describe the updated values, the API ignores the values of all +// fields not covered by the mask. +// +// If a repeated field is specified for an update operation, new values will +// be appended to the existing repeated field in the target resource. Note that +// a repeated field is only allowed in the last position of a `paths` string. +// +// If a sub-message is specified in the last position of the field mask for an +// update operation, then new value will be merged into the existing sub-message +// in the target resource. +// +// For example, given the target message: +// +// f { +// b { +// d: 1 +// x: 2 +// } +// c: [1] +// } +// +// And an update message: +// +// f { +// b { +// d: 10 +// } +// c: [2] +// } +// +// then if the field mask is: +// +// paths: ["f.b", "f.c"] +// +// then the result will be: +// +// f { +// b { +// d: 10 +// x: 2 +// } +// c: [1, 2] +// } +// +// An implementation may provide options to override this default behavior for +// repeated and message fields. +// +// In order to reset a field's value to the default, the field must +// be in the mask and set to the default value in the provided resource. +// Hence, in order to reset all fields of a resource, provide a default +// instance of the resource and set all fields in the mask, or do +// not provide a mask as described below. +// +// If a field mask is not present on update, the operation applies to +// all fields (as if a field mask of all fields has been specified). +// Note that in the presence of schema evolution, this may mean that +// fields the client does not know and has therefore not filled into +// the request will be reset to their default. If this is unwanted +// behavior, a specific service may require a client to always specify +// a field mask, producing an error if not. +// +// As with get operations, the location of the resource which +// describes the updated values in the request message depends on the +// operation kind. In any case, the effect of the field mask is +// required to be honored by the API. +// +// ## Considerations for HTTP REST +// +// The HTTP kind of an update operation which uses a field mask must +// be set to PATCH instead of PUT in order to satisfy HTTP semantics +// (PUT must only be used for full updates). +// +// # JSON Encoding of Field Masks +// +// In JSON, a field mask is encoded as a single string where paths are +// separated by a comma. Fields name in each path are converted +// to/from lower-camel naming conventions. +// +// As an example, consider the following message declarations: +// +// message Profile { +// User user = 1; +// Photo photo = 2; +// } +// message User { +// string display_name = 1; +// string address = 2; +// } +// +// In proto a field mask for `Profile` may look as such: +// +// mask { +// paths: "user.display_name" +// paths: "photo" +// } +// +// In JSON, the same mask is represented as below: +// +// { +// mask: "user.displayName,photo" +// } +// +// # Field Masks and Oneof Fields +// +// Field masks treat fields in oneofs just as regular fields. Consider the +// following message: +// +// message SampleMessage { +// oneof test_oneof { +// string name = 4; +// SubMessage sub_message = 9; +// } +// } +// +// The field mask can be: +// +// mask { +// paths: "name" +// } +// +// Or: +// +// mask { +// paths: "sub_message" +// } +// +// Note that oneof type names ("test_oneof" in this case) cannot be used in +// paths. +// +// ## Field Mask Verification +// +// The implementation of any API method which has a FieldMask type field in the +// request should verify the included field paths, and return an +// `INVALID_ARGUMENT` error if any path is duplicated or unmappable. +type FieldMask struct { + // The set of field mask paths. + Paths []string `protobuf:"bytes,1,rep,name=paths,proto3" json:"paths,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FieldMask) Reset() { *m = FieldMask{} } +func (m *FieldMask) String() string { return proto.CompactTextString(m) } +func (*FieldMask) ProtoMessage() {} +func (*FieldMask) Descriptor() ([]byte, []int) { + return fileDescriptor_5158202634f0da48, []int{0} +} + +func (m *FieldMask) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FieldMask.Unmarshal(m, b) +} +func (m *FieldMask) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FieldMask.Marshal(b, m, deterministic) +} +func (m *FieldMask) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldMask.Merge(m, src) +} +func (m *FieldMask) XXX_Size() int { + return xxx_messageInfo_FieldMask.Size(m) +} +func (m *FieldMask) XXX_DiscardUnknown() { + xxx_messageInfo_FieldMask.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldMask proto.InternalMessageInfo + +func (m *FieldMask) GetPaths() []string { + if m != nil { + return m.Paths + } + return nil +} + +func init() { + proto.RegisterType((*FieldMask)(nil), "google.protobuf.FieldMask") +} + +func init() { proto.RegisterFile("google/protobuf/field_mask.proto", fileDescriptor_5158202634f0da48) } + +var fileDescriptor_5158202634f0da48 = []byte{ + // 175 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x48, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcb, 0x4c, 0xcd, + 0x49, 0x89, 0xcf, 0x4d, 0x2c, 0xce, 0xd6, 0x03, 0x8b, 0x09, 0xf1, 0x43, 0x54, 0xe8, 0xc1, 0x54, + 0x28, 0x29, 0x72, 0x71, 0xba, 0x81, 0x14, 0xf9, 0x26, 0x16, 0x67, 0x0b, 0x89, 0x70, 0xb1, 0x16, + 0x24, 0x96, 0x64, 0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6b, 0x70, 0x06, 0x41, 0x38, 0x4e, 0x3d, 0x8c, + 0x5c, 0xc2, 0xc9, 0xf9, 0xb9, 0x7a, 0x68, 0x5a, 0x9d, 0xf8, 0xe0, 0x1a, 0x03, 0x40, 0x42, 0x01, + 0x8c, 0x51, 0x96, 0x50, 0x25, 0xe9, 0xf9, 0x39, 0x89, 0x79, 0xe9, 0x7a, 0xf9, 0x45, 0xe9, 0xfa, + 0xe9, 0xa9, 0x79, 0x60, 0x0d, 0xd8, 0xdc, 0x64, 0x8d, 0x60, 0xfe, 0x60, 0x64, 0x5c, 0xc4, 0xc4, + 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x48, 0x00, 0x54, 0x83, 0x5e, 0x78, 0x6a, + 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x48, 0x65, 0x41, 0x6a, 0x71, 0x12, 0x1b, 0xd8, 0x24, + 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xfd, 0xda, 0xb7, 0xa8, 0xed, 0x00, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/grpc/AUTHORS b/vendor/google.golang.org/grpc/AUTHORS new file mode 100644 index 000000000..e491a9e7f --- /dev/null +++ b/vendor/google.golang.org/grpc/AUTHORS @@ -0,0 +1 @@ +Google Inc. diff --git a/vendor/google.golang.org/grpc/LICENSE b/vendor/google.golang.org/grpc/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/google.golang.org/grpc/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/google.golang.org/grpc/backoff.go b/vendor/google.golang.org/grpc/backoff.go new file mode 100644 index 000000000..fa31565fd --- /dev/null +++ b/vendor/google.golang.org/grpc/backoff.go @@ -0,0 +1,38 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// See internal/backoff package for the backoff implementation. This file is +// kept for the exported types and API backward compatility. + +package grpc + +import ( + "time" +) + +// DefaultBackoffConfig uses values specified for backoff in +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +var DefaultBackoffConfig = BackoffConfig{ + MaxDelay: 120 * time.Second, +} + +// BackoffConfig defines the parameters for the default gRPC backoff strategy. +type BackoffConfig struct { + // MaxDelay is the upper bound of backoff delay. + MaxDelay time.Duration +} diff --git a/vendor/google.golang.org/grpc/balancer.go b/vendor/google.golang.org/grpc/balancer.go new file mode 100644 index 000000000..5aeb646d1 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer.go @@ -0,0 +1,391 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "net" + "sync" + + "golang.org/x/net/context" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/naming" + "google.golang.org/grpc/status" +) + +// Address represents a server the client connects to. +// +// Deprecated: please use package balancer. +type Address struct { + // Addr is the server address on which a connection will be established. + Addr string + // Metadata is the information associated with Addr, which may be used + // to make load balancing decision. + Metadata interface{} +} + +// BalancerConfig specifies the configurations for Balancer. +// +// Deprecated: please use package balancer. +type BalancerConfig struct { + // DialCreds is the transport credential the Balancer implementation can + // use to dial to a remote load balancer server. The Balancer implementations + // can ignore this if it does not need to talk to another party securely. + DialCreds credentials.TransportCredentials + // Dialer is the custom dialer the Balancer implementation can use to dial + // to a remote load balancer server. The Balancer implementations + // can ignore this if it doesn't need to talk to remote balancer. + Dialer func(context.Context, string) (net.Conn, error) +} + +// BalancerGetOptions configures a Get call. +// +// Deprecated: please use package balancer. +type BalancerGetOptions struct { + // BlockingWait specifies whether Get should block when there is no + // connected address. + BlockingWait bool +} + +// Balancer chooses network addresses for RPCs. +// +// Deprecated: please use package balancer. +type Balancer interface { + // Start does the initialization work to bootstrap a Balancer. For example, + // this function may start the name resolution and watch the updates. It will + // be called when dialing. + Start(target string, config BalancerConfig) error + // Up informs the Balancer that gRPC has a connection to the server at + // addr. It returns down which is called once the connection to addr gets + // lost or closed. + // TODO: It is not clear how to construct and take advantage of the meaningful error + // parameter for down. Need realistic demands to guide. + Up(addr Address) (down func(error)) + // Get gets the address of a server for the RPC corresponding to ctx. + // i) If it returns a connected address, gRPC internals issues the RPC on the + // connection to this address; + // ii) If it returns an address on which the connection is under construction + // (initiated by Notify(...)) but not connected, gRPC internals + // * fails RPC if the RPC is fail-fast and connection is in the TransientFailure or + // Shutdown state; + // or + // * issues RPC on the connection otherwise. + // iii) If it returns an address on which the connection does not exist, gRPC + // internals treats it as an error and will fail the corresponding RPC. + // + // Therefore, the following is the recommended rule when writing a custom Balancer. + // If opts.BlockingWait is true, it should return a connected address or + // block if there is no connected address. It should respect the timeout or + // cancellation of ctx when blocking. If opts.BlockingWait is false (for fail-fast + // RPCs), it should return an address it has notified via Notify(...) immediately + // instead of blocking. + // + // The function returns put which is called once the rpc has completed or failed. + // put can collect and report RPC stats to a remote load balancer. + // + // This function should only return the errors Balancer cannot recover by itself. + // gRPC internals will fail the RPC if an error is returned. + Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) + // Notify returns a channel that is used by gRPC internals to watch the addresses + // gRPC needs to connect. The addresses might be from a name resolver or remote + // load balancer. gRPC internals will compare it with the existing connected + // addresses. If the address Balancer notified is not in the existing connected + // addresses, gRPC starts to connect the address. If an address in the existing + // connected addresses is not in the notification list, the corresponding connection + // is shutdown gracefully. Otherwise, there are no operations to take. Note that + // the Address slice must be the full list of the Addresses which should be connected. + // It is NOT delta. + Notify() <-chan []Address + // Close shuts down the balancer. + Close() error +} + +// RoundRobin returns a Balancer that selects addresses round-robin. It uses r to watch +// the name resolution updates and updates the addresses available correspondingly. +// +// Deprecated: please use package balancer/roundrobin. +func RoundRobin(r naming.Resolver) Balancer { + return &roundRobin{r: r} +} + +type addrInfo struct { + addr Address + connected bool +} + +type roundRobin struct { + r naming.Resolver + w naming.Watcher + addrs []*addrInfo // all the addresses the client should potentially connect + mu sync.Mutex + addrCh chan []Address // the channel to notify gRPC internals the list of addresses the client should connect to. + next int // index of the next address to return for Get() + waitCh chan struct{} // the channel to block when there is no connected address available + done bool // The Balancer is closed. +} + +func (rr *roundRobin) watchAddrUpdates() error { + updates, err := rr.w.Next() + if err != nil { + grpclog.Warningf("grpc: the naming watcher stops working due to %v.", err) + return err + } + rr.mu.Lock() + defer rr.mu.Unlock() + for _, update := range updates { + addr := Address{ + Addr: update.Addr, + Metadata: update.Metadata, + } + switch update.Op { + case naming.Add: + var exist bool + for _, v := range rr.addrs { + if addr == v.addr { + exist = true + grpclog.Infoln("grpc: The name resolver wanted to add an existing address: ", addr) + break + } + } + if exist { + continue + } + rr.addrs = append(rr.addrs, &addrInfo{addr: addr}) + case naming.Delete: + for i, v := range rr.addrs { + if addr == v.addr { + copy(rr.addrs[i:], rr.addrs[i+1:]) + rr.addrs = rr.addrs[:len(rr.addrs)-1] + break + } + } + default: + grpclog.Errorln("Unknown update.Op ", update.Op) + } + } + // Make a copy of rr.addrs and write it onto rr.addrCh so that gRPC internals gets notified. + open := make([]Address, len(rr.addrs)) + for i, v := range rr.addrs { + open[i] = v.addr + } + if rr.done { + return ErrClientConnClosing + } + select { + case <-rr.addrCh: + default: + } + rr.addrCh <- open + return nil +} + +func (rr *roundRobin) Start(target string, config BalancerConfig) error { + rr.mu.Lock() + defer rr.mu.Unlock() + if rr.done { + return ErrClientConnClosing + } + if rr.r == nil { + // If there is no name resolver installed, it is not needed to + // do name resolution. In this case, target is added into rr.addrs + // as the only address available and rr.addrCh stays nil. + rr.addrs = append(rr.addrs, &addrInfo{addr: Address{Addr: target}}) + return nil + } + w, err := rr.r.Resolve(target) + if err != nil { + return err + } + rr.w = w + rr.addrCh = make(chan []Address, 1) + go func() { + for { + if err := rr.watchAddrUpdates(); err != nil { + return + } + } + }() + return nil +} + +// Up sets the connected state of addr and sends notification if there are pending +// Get() calls. +func (rr *roundRobin) Up(addr Address) func(error) { + rr.mu.Lock() + defer rr.mu.Unlock() + var cnt int + for _, a := range rr.addrs { + if a.addr == addr { + if a.connected { + return nil + } + a.connected = true + } + if a.connected { + cnt++ + } + } + // addr is only one which is connected. Notify the Get() callers who are blocking. + if cnt == 1 && rr.waitCh != nil { + close(rr.waitCh) + rr.waitCh = nil + } + return func(err error) { + rr.down(addr, err) + } +} + +// down unsets the connected state of addr. +func (rr *roundRobin) down(addr Address, err error) { + rr.mu.Lock() + defer rr.mu.Unlock() + for _, a := range rr.addrs { + if addr == a.addr { + a.connected = false + break + } + } +} + +// Get returns the next addr in the rotation. +func (rr *roundRobin) Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) { + var ch chan struct{} + rr.mu.Lock() + if rr.done { + rr.mu.Unlock() + err = ErrClientConnClosing + return + } + + if len(rr.addrs) > 0 { + if rr.next >= len(rr.addrs) { + rr.next = 0 + } + next := rr.next + for { + a := rr.addrs[next] + next = (next + 1) % len(rr.addrs) + if a.connected { + addr = a.addr + rr.next = next + rr.mu.Unlock() + return + } + if next == rr.next { + // Has iterated all the possible address but none is connected. + break + } + } + } + if !opts.BlockingWait { + if len(rr.addrs) == 0 { + rr.mu.Unlock() + err = status.Errorf(codes.Unavailable, "there is no address available") + return + } + // Returns the next addr on rr.addrs for failfast RPCs. + addr = rr.addrs[rr.next].addr + rr.next++ + rr.mu.Unlock() + return + } + // Wait on rr.waitCh for non-failfast RPCs. + if rr.waitCh == nil { + ch = make(chan struct{}) + rr.waitCh = ch + } else { + ch = rr.waitCh + } + rr.mu.Unlock() + for { + select { + case <-ctx.Done(): + err = ctx.Err() + return + case <-ch: + rr.mu.Lock() + if rr.done { + rr.mu.Unlock() + err = ErrClientConnClosing + return + } + + if len(rr.addrs) > 0 { + if rr.next >= len(rr.addrs) { + rr.next = 0 + } + next := rr.next + for { + a := rr.addrs[next] + next = (next + 1) % len(rr.addrs) + if a.connected { + addr = a.addr + rr.next = next + rr.mu.Unlock() + return + } + if next == rr.next { + // Has iterated all the possible address but none is connected. + break + } + } + } + // The newly added addr got removed by Down() again. + if rr.waitCh == nil { + ch = make(chan struct{}) + rr.waitCh = ch + } else { + ch = rr.waitCh + } + rr.mu.Unlock() + } + } +} + +func (rr *roundRobin) Notify() <-chan []Address { + return rr.addrCh +} + +func (rr *roundRobin) Close() error { + rr.mu.Lock() + defer rr.mu.Unlock() + if rr.done { + return errBalancerClosed + } + rr.done = true + if rr.w != nil { + rr.w.Close() + } + if rr.waitCh != nil { + close(rr.waitCh) + rr.waitCh = nil + } + if rr.addrCh != nil { + close(rr.addrCh) + } + return nil +} + +// pickFirst is used to test multi-addresses in one addrConn in which all addresses share the same addrConn. +// It is a wrapper around roundRobin balancer. The logic of all methods works fine because balancer.Get() +// returns the only address Up by resetTransport(). +type pickFirst struct { + *roundRobin +} diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go new file mode 100644 index 000000000..069feb1e7 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -0,0 +1,274 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package balancer defines APIs for load balancing in gRPC. +// All APIs in this package are experimental. +package balancer + +import ( + "errors" + "net" + "strings" + + "golang.org/x/net/context" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/resolver" +) + +var ( + // m is a map from name to balancer builder. + m = make(map[string]Builder) +) + +// Register registers the balancer builder to the balancer map. b.Name +// (lowercased) will be used as the name registered with this builder. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Balancers are +// registered with the same name, the one registered last will take effect. +func Register(b Builder) { + m[strings.ToLower(b.Name())] = b +} + +// Get returns the resolver builder registered with the given name. +// Note that the compare is done in a case-insenstive fashion. +// If no builder is register with the name, nil will be returned. +func Get(name string) Builder { + if b, ok := m[strings.ToLower(name)]; ok { + return b + } + return nil +} + +// SubConn represents a gRPC sub connection. +// Each sub connection contains a list of addresses. gRPC will +// try to connect to them (in sequence), and stop trying the +// remainder once one connection is successful. +// +// The reconnect backoff will be applied on the list, not a single address. +// For example, try_on_all_addresses -> backoff -> try_on_all_addresses. +// +// All SubConns start in IDLE, and will not try to connect. To trigger +// the connecting, Balancers must call Connect. +// When the connection encounters an error, it will reconnect immediately. +// When the connection becomes IDLE, it will not reconnect unless Connect is +// called. +// +// This interface is to be implemented by gRPC. Users should not need a +// brand new implementation of this interface. For the situations like +// testing, the new implementation should embed this interface. This allows +// gRPC to add new methods to this interface. +type SubConn interface { + // UpdateAddresses updates the addresses used in this SubConn. + // gRPC checks if currently-connected address is still in the new list. + // If it's in the list, the connection will be kept. + // If it's not in the list, the connection will gracefully closed, and + // a new connection will be created. + // + // This will trigger a state transition for the SubConn. + UpdateAddresses([]resolver.Address) + // Connect starts the connecting for this SubConn. + Connect() +} + +// NewSubConnOptions contains options to create new SubConn. +type NewSubConnOptions struct{} + +// ClientConn represents a gRPC ClientConn. +// +// This interface is to be implemented by gRPC. Users should not need a +// brand new implementation of this interface. For the situations like +// testing, the new implementation should embed this interface. This allows +// gRPC to add new methods to this interface. +type ClientConn interface { + // NewSubConn is called by balancer to create a new SubConn. + // It doesn't block and wait for the connections to be established. + // Behaviors of the SubConn can be controlled by options. + NewSubConn([]resolver.Address, NewSubConnOptions) (SubConn, error) + // RemoveSubConn removes the SubConn from ClientConn. + // The SubConn will be shutdown. + RemoveSubConn(SubConn) + + // UpdateBalancerState is called by balancer to nofity gRPC that some internal + // state in balancer has changed. + // + // gRPC will update the connectivity state of the ClientConn, and will call pick + // on the new picker to pick new SubConn. + UpdateBalancerState(s connectivity.State, p Picker) + + // ResolveNow is called by balancer to notify gRPC to do a name resolving. + ResolveNow(resolver.ResolveNowOption) + + // Target returns the dial target for this ClientConn. + Target() string +} + +// BuildOptions contains additional information for Build. +type BuildOptions struct { + // DialCreds is the transport credential the Balancer implementation can + // use to dial to a remote load balancer server. The Balancer implementations + // can ignore this if it does not need to talk to another party securely. + DialCreds credentials.TransportCredentials + // Dialer is the custom dialer the Balancer implementation can use to dial + // to a remote load balancer server. The Balancer implementations + // can ignore this if it doesn't need to talk to remote balancer. + Dialer func(context.Context, string) (net.Conn, error) + // ChannelzParentID is the entity parent's channelz unique identification number. + ChannelzParentID int64 +} + +// Builder creates a balancer. +type Builder interface { + // Build creates a new balancer with the ClientConn. + Build(cc ClientConn, opts BuildOptions) Balancer + // Name returns the name of balancers built by this builder. + // It will be used to pick balancers (for example in service config). + Name() string +} + +// PickOptions contains addition information for the Pick operation. +type PickOptions struct { + // FullMethodName is the method name that NewClientStream() is called + // with. The canonical format is /service/Method. + FullMethodName string +} + +// DoneInfo contains additional information for done. +type DoneInfo struct { + // Err is the rpc error the RPC finished with. It could be nil. + Err error + // BytesSent indicates if any bytes have been sent to the server. + BytesSent bool + // BytesReceived indicates if any byte has been received from the server. + BytesReceived bool +} + +var ( + // ErrNoSubConnAvailable indicates no SubConn is available for pick(). + // gRPC will block the RPC until a new picker is available via UpdateBalancerState(). + ErrNoSubConnAvailable = errors.New("no SubConn is available") + // ErrTransientFailure indicates all SubConns are in TransientFailure. + // WaitForReady RPCs will block, non-WaitForReady RPCs will fail. + ErrTransientFailure = errors.New("all SubConns are in TransientFailure") +) + +// Picker is used by gRPC to pick a SubConn to send an RPC. +// Balancer is expected to generate a new picker from its snapshot every time its +// internal state has changed. +// +// The pickers used by gRPC can be updated by ClientConn.UpdateBalancerState(). +type Picker interface { + // Pick returns the SubConn to be used to send the RPC. + // The returned SubConn must be one returned by NewSubConn(). + // + // This functions is expected to return: + // - a SubConn that is known to be READY; + // - ErrNoSubConnAvailable if no SubConn is available, but progress is being + // made (for example, some SubConn is in CONNECTING mode); + // - other errors if no active connecting is happening (for example, all SubConn + // are in TRANSIENT_FAILURE mode). + // + // If a SubConn is returned: + // - If it is READY, gRPC will send the RPC on it; + // - If it is not ready, or becomes not ready after it's returned, gRPC will block + // until UpdateBalancerState() is called and will call pick on the new picker. + // + // If the returned error is not nil: + // - If the error is ErrNoSubConnAvailable, gRPC will block until UpdateBalancerState() + // - If the error is ErrTransientFailure: + // - If the RPC is wait-for-ready, gRPC will block until UpdateBalancerState() + // is called to pick again; + // - Otherwise, RPC will fail with unavailable error. + // - Else (error is other non-nil error): + // - The RPC will fail with unavailable error. + // + // The returned done() function will be called once the rpc has finished, with the + // final status of that RPC. + // done may be nil if balancer doesn't care about the RPC status. + Pick(ctx context.Context, opts PickOptions) (conn SubConn, done func(DoneInfo), err error) +} + +// Balancer takes input from gRPC, manages SubConns, and collects and aggregates +// the connectivity states. +// +// It also generates and updates the Picker used by gRPC to pick SubConns for RPCs. +// +// HandleSubConnectionStateChange, HandleResolvedAddrs and Close are guaranteed +// to be called synchronously from the same goroutine. +// There's no guarantee on picker.Pick, it may be called anytime. +type Balancer interface { + // HandleSubConnStateChange is called by gRPC when the connectivity state + // of sc has changed. + // Balancer is expected to aggregate all the state of SubConn and report + // that back to gRPC. + // Balancer should also generate and update Pickers when its internal state has + // been changed by the new state. + HandleSubConnStateChange(sc SubConn, state connectivity.State) + // HandleResolvedAddrs is called by gRPC to send updated resolved addresses to + // balancers. + // Balancer can create new SubConn or remove SubConn with the addresses. + // An empty address slice and a non-nil error will be passed if the resolver returns + // non-nil error to gRPC. + HandleResolvedAddrs([]resolver.Address, error) + // Close closes the balancer. The balancer is not required to call + // ClientConn.RemoveSubConn for its existing SubConns. + Close() +} + +// ConnectivityStateEvaluator takes the connectivity states of multiple SubConns +// and returns one aggregated connectivity state. +// +// It's not thread safe. +type ConnectivityStateEvaluator struct { + numReady uint64 // Number of addrConns in ready state. + numConnecting uint64 // Number of addrConns in connecting state. + numTransientFailure uint64 // Number of addrConns in transientFailure. +} + +// RecordTransition records state change happening in subConn and based on that +// it evaluates what aggregated state should be. +// +// - If at least one SubConn in Ready, the aggregated state is Ready; +// - Else if at least one SubConn in Connecting, the aggregated state is Connecting; +// - Else the aggregated state is TransientFailure. +// +// Idle and Shutdown are not considered. +func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State { + // Update counters. + for idx, state := range []connectivity.State{oldState, newState} { + updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new. + switch state { + case connectivity.Ready: + cse.numReady += updateVal + case connectivity.Connecting: + cse.numConnecting += updateVal + case connectivity.TransientFailure: + cse.numTransientFailure += updateVal + } + } + + // Evaluate. + if cse.numReady > 0 { + return connectivity.Ready + } + if cse.numConnecting > 0 { + return connectivity.Connecting + } + return connectivity.TransientFailure +} diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go new file mode 100644 index 000000000..23d13511b --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -0,0 +1,208 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package base + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/resolver" +) + +type baseBuilder struct { + name string + pickerBuilder PickerBuilder +} + +func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { + return &baseBalancer{ + cc: cc, + pickerBuilder: bb.pickerBuilder, + + subConns: make(map[resolver.Address]balancer.SubConn), + scStates: make(map[balancer.SubConn]connectivity.State), + csEvltr: &connectivityStateEvaluator{}, + // Initialize picker to a picker that always return + // ErrNoSubConnAvailable, because when state of a SubConn changes, we + // may call UpdateBalancerState with this picker. + picker: NewErrPicker(balancer.ErrNoSubConnAvailable), + } +} + +func (bb *baseBuilder) Name() string { + return bb.name +} + +type baseBalancer struct { + cc balancer.ClientConn + pickerBuilder PickerBuilder + + csEvltr *connectivityStateEvaluator + state connectivity.State + + subConns map[resolver.Address]balancer.SubConn + scStates map[balancer.SubConn]connectivity.State + picker balancer.Picker +} + +func (b *baseBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) { + if err != nil { + grpclog.Infof("base.baseBalancer: HandleResolvedAddrs called with error %v", err) + return + } + grpclog.Infoln("base.baseBalancer: got new resolved addresses: ", addrs) + // addrsSet is the set converted from addrs, it's used for quick lookup of an address. + addrsSet := make(map[resolver.Address]struct{}) + for _, a := range addrs { + addrsSet[a] = struct{}{} + if _, ok := b.subConns[a]; !ok { + // a is a new address (not existing in b.subConns). + sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{}) + if err != nil { + grpclog.Warningf("base.baseBalancer: failed to create new SubConn: %v", err) + continue + } + b.subConns[a] = sc + b.scStates[sc] = connectivity.Idle + sc.Connect() + } + } + for a, sc := range b.subConns { + // a was removed by resolver. + if _, ok := addrsSet[a]; !ok { + b.cc.RemoveSubConn(sc) + delete(b.subConns, a) + // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. + // The entry will be deleted in HandleSubConnStateChange. + } + } +} + +// regeneratePicker takes a snapshot of the balancer, and generates a picker +// from it. The picker is +// - errPicker with ErrTransientFailure if the balancer is in TransientFailure, +// - built by the pickerBuilder with all READY SubConns otherwise. +func (b *baseBalancer) regeneratePicker() { + if b.state == connectivity.TransientFailure { + b.picker = NewErrPicker(balancer.ErrTransientFailure) + return + } + readySCs := make(map[resolver.Address]balancer.SubConn) + + // Filter out all ready SCs from full subConn map. + for addr, sc := range b.subConns { + if st, ok := b.scStates[sc]; ok && st == connectivity.Ready { + readySCs[addr] = sc + } + } + b.picker = b.pickerBuilder.Build(readySCs) +} + +func (b *baseBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { + grpclog.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s) + oldS, ok := b.scStates[sc] + if !ok { + grpclog.Infof("base.baseBalancer: got state changes for an unknown SubConn: %p, %v", sc, s) + return + } + b.scStates[sc] = s + switch s { + case connectivity.Idle: + sc.Connect() + case connectivity.Shutdown: + // When an address was removed by resolver, b called RemoveSubConn but + // kept the sc's state in scStates. Remove state for this sc here. + delete(b.scStates, sc) + } + + oldAggrState := b.state + b.state = b.csEvltr.recordTransition(oldS, s) + + // Regenerate picker when one of the following happens: + // - this sc became ready from not-ready + // - this sc became not-ready from ready + // - the aggregated state of balancer became TransientFailure from non-TransientFailure + // - the aggregated state of balancer became non-TransientFailure from TransientFailure + if (s == connectivity.Ready) != (oldS == connectivity.Ready) || + (b.state == connectivity.TransientFailure) != (oldAggrState == connectivity.TransientFailure) { + b.regeneratePicker() + } + + b.cc.UpdateBalancerState(b.state, b.picker) +} + +// Close is a nop because base balancer doesn't have internal state to clean up, +// and it doesn't need to call RemoveSubConn for the SubConns. +func (b *baseBalancer) Close() { +} + +// NewErrPicker returns a picker that always returns err on Pick(). +func NewErrPicker(err error) balancer.Picker { + return &errPicker{err: err} +} + +type errPicker struct { + err error // Pick() always returns this err. +} + +func (p *errPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) { + return nil, nil, p.err +} + +// connectivityStateEvaluator gets updated by addrConns when their +// states transition, based on which it evaluates the state of +// ClientConn. +type connectivityStateEvaluator struct { + numReady uint64 // Number of addrConns in ready state. + numConnecting uint64 // Number of addrConns in connecting state. + numTransientFailure uint64 // Number of addrConns in transientFailure. +} + +// recordTransition records state change happening in every subConn and based on +// that it evaluates what aggregated state should be. +// It can only transition between Ready, Connecting and TransientFailure. Other states, +// Idle and Shutdown are transitioned into by ClientConn; in the beginning of the connection +// before any subConn is created ClientConn is in idle state. In the end when ClientConn +// closes it is in Shutdown state. +// +// recordTransition should only be called synchronously from the same goroutine. +func (cse *connectivityStateEvaluator) recordTransition(oldState, newState connectivity.State) connectivity.State { + // Update counters. + for idx, state := range []connectivity.State{oldState, newState} { + updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new. + switch state { + case connectivity.Ready: + cse.numReady += updateVal + case connectivity.Connecting: + cse.numConnecting += updateVal + case connectivity.TransientFailure: + cse.numTransientFailure += updateVal + } + } + + // Evaluate. + if cse.numReady > 0 { + return connectivity.Ready + } + if cse.numConnecting > 0 { + return connectivity.Connecting + } + return connectivity.TransientFailure +} diff --git a/vendor/google.golang.org/grpc/balancer/base/base.go b/vendor/google.golang.org/grpc/balancer/base/base.go new file mode 100644 index 000000000..012ace2f2 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/base/base.go @@ -0,0 +1,52 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package base defines a balancer base that can be used to build balancers with +// different picking algorithms. +// +// The base balancer creates a new SubConn for each resolved address. The +// provided picker will only be notified about READY SubConns. +// +// This package is the base of round_robin balancer, its purpose is to be used +// to build round_robin like balancers with complex picking algorithms. +// Balancers with more complicated logic should try to implement a balancer +// builder from scratch. +// +// All APIs in this package are experimental. +package base + +import ( + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/resolver" +) + +// PickerBuilder creates balancer.Picker. +type PickerBuilder interface { + // Build takes a slice of ready SubConns, and returns a picker that will be + // used by gRPC to pick a SubConn. + Build(readySCs map[resolver.Address]balancer.SubConn) balancer.Picker +} + +// NewBalancerBuilder returns a balancer builder. The balancers +// built by this builder will use the picker builder to build pickers. +func NewBalancerBuilder(name string, pb PickerBuilder) balancer.Builder { + return &baseBuilder{ + name: name, + pickerBuilder: pb, + } +} diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go new file mode 100644 index 000000000..2eda0a1c2 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go @@ -0,0 +1,79 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package roundrobin defines a roundrobin balancer. Roundrobin balancer is +// installed as one of the default balancers in gRPC, users don't need to +// explicitly install this balancer. +package roundrobin + +import ( + "sync" + + "golang.org/x/net/context" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/resolver" +) + +// Name is the name of round_robin balancer. +const Name = "round_robin" + +// newBuilder creates a new roundrobin balancer builder. +func newBuilder() balancer.Builder { + return base.NewBalancerBuilder(Name, &rrPickerBuilder{}) +} + +func init() { + balancer.Register(newBuilder()) +} + +type rrPickerBuilder struct{} + +func (*rrPickerBuilder) Build(readySCs map[resolver.Address]balancer.SubConn) balancer.Picker { + grpclog.Infof("roundrobinPicker: newPicker called with readySCs: %v", readySCs) + var scs []balancer.SubConn + for _, sc := range readySCs { + scs = append(scs, sc) + } + return &rrPicker{ + subConns: scs, + } +} + +type rrPicker struct { + // subConns is the snapshot of the roundrobin balancer when this picker was + // created. The slice is immutable. Each Get() will do a round robin + // selection from it and return the selected SubConn. + subConns []balancer.SubConn + + mu sync.Mutex + next int +} + +func (p *rrPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) { + if len(p.subConns) <= 0 { + return nil, nil, balancer.ErrNoSubConnAvailable + } + + p.mu.Lock() + sc := p.subConns[p.next] + p.next = (p.next + 1) % len(p.subConns) + p.mu.Unlock() + return sc, nil, nil +} diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go new file mode 100644 index 000000000..c23f81706 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go @@ -0,0 +1,300 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "fmt" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/resolver" +) + +// scStateUpdate contains the subConn and the new state it changed to. +type scStateUpdate struct { + sc balancer.SubConn + state connectivity.State +} + +// scStateUpdateBuffer is an unbounded channel for scStateChangeTuple. +// TODO make a general purpose buffer that uses interface{}. +type scStateUpdateBuffer struct { + c chan *scStateUpdate + mu sync.Mutex + backlog []*scStateUpdate +} + +func newSCStateUpdateBuffer() *scStateUpdateBuffer { + return &scStateUpdateBuffer{ + c: make(chan *scStateUpdate, 1), + } +} + +func (b *scStateUpdateBuffer) put(t *scStateUpdate) { + b.mu.Lock() + defer b.mu.Unlock() + if len(b.backlog) == 0 { + select { + case b.c <- t: + return + default: + } + } + b.backlog = append(b.backlog, t) +} + +func (b *scStateUpdateBuffer) load() { + b.mu.Lock() + defer b.mu.Unlock() + if len(b.backlog) > 0 { + select { + case b.c <- b.backlog[0]: + b.backlog[0] = nil + b.backlog = b.backlog[1:] + default: + } + } +} + +// get returns the channel that the scStateUpdate will be sent to. +// +// Upon receiving, the caller should call load to send another +// scStateChangeTuple onto the channel if there is any. +func (b *scStateUpdateBuffer) get() <-chan *scStateUpdate { + return b.c +} + +// resolverUpdate contains the new resolved addresses or error if there's +// any. +type resolverUpdate struct { + addrs []resolver.Address + err error +} + +// ccBalancerWrapper is a wrapper on top of cc for balancers. +// It implements balancer.ClientConn interface. +type ccBalancerWrapper struct { + cc *ClientConn + balancer balancer.Balancer + stateChangeQueue *scStateUpdateBuffer + resolverUpdateCh chan *resolverUpdate + done chan struct{} + + mu sync.Mutex + subConns map[*acBalancerWrapper]struct{} +} + +func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.BuildOptions) *ccBalancerWrapper { + ccb := &ccBalancerWrapper{ + cc: cc, + stateChangeQueue: newSCStateUpdateBuffer(), + resolverUpdateCh: make(chan *resolverUpdate, 1), + done: make(chan struct{}), + subConns: make(map[*acBalancerWrapper]struct{}), + } + go ccb.watcher() + ccb.balancer = b.Build(ccb, bopts) + return ccb +} + +// watcher balancer functions sequentially, so the balancer can be implemented +// lock-free. +func (ccb *ccBalancerWrapper) watcher() { + for { + select { + case t := <-ccb.stateChangeQueue.get(): + ccb.stateChangeQueue.load() + select { + case <-ccb.done: + ccb.balancer.Close() + return + default: + } + ccb.balancer.HandleSubConnStateChange(t.sc, t.state) + case t := <-ccb.resolverUpdateCh: + select { + case <-ccb.done: + ccb.balancer.Close() + return + default: + } + ccb.balancer.HandleResolvedAddrs(t.addrs, t.err) + case <-ccb.done: + } + + select { + case <-ccb.done: + ccb.balancer.Close() + ccb.mu.Lock() + scs := ccb.subConns + ccb.subConns = nil + ccb.mu.Unlock() + for acbw := range scs { + ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) + } + return + default: + } + } +} + +func (ccb *ccBalancerWrapper) close() { + close(ccb.done) +} + +func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { + // When updating addresses for a SubConn, if the address in use is not in + // the new addresses, the old ac will be tearDown() and a new ac will be + // created. tearDown() generates a state change with Shutdown state, we + // don't want the balancer to receive this state change. So before + // tearDown() on the old ac, ac.acbw (acWrapper) will be set to nil, and + // this function will be called with (nil, Shutdown). We don't need to call + // balancer method in this case. + if sc == nil { + return + } + ccb.stateChangeQueue.put(&scStateUpdate{ + sc: sc, + state: s, + }) +} + +func (ccb *ccBalancerWrapper) handleResolvedAddrs(addrs []resolver.Address, err error) { + select { + case <-ccb.resolverUpdateCh: + default: + } + ccb.resolverUpdateCh <- &resolverUpdate{ + addrs: addrs, + err: err, + } +} + +func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + if len(addrs) <= 0 { + return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list") + } + ccb.mu.Lock() + defer ccb.mu.Unlock() + if ccb.subConns == nil { + return nil, fmt.Errorf("grpc: ClientConn balancer wrapper was closed") + } + ac, err := ccb.cc.newAddrConn(addrs) + if err != nil { + return nil, err + } + acbw := &acBalancerWrapper{ac: ac} + acbw.ac.mu.Lock() + ac.acbw = acbw + acbw.ac.mu.Unlock() + ccb.subConns[acbw] = struct{}{} + return acbw, nil +} + +func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { + acbw, ok := sc.(*acBalancerWrapper) + if !ok { + return + } + ccb.mu.Lock() + defer ccb.mu.Unlock() + if ccb.subConns == nil { + return + } + delete(ccb.subConns, acbw) + ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) +} + +func (ccb *ccBalancerWrapper) UpdateBalancerState(s connectivity.State, p balancer.Picker) { + ccb.mu.Lock() + defer ccb.mu.Unlock() + if ccb.subConns == nil { + return + } + ccb.cc.csMgr.updateState(s) + ccb.cc.blockingpicker.updatePicker(p) +} + +func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOption) { + ccb.cc.resolveNow(o) +} + +func (ccb *ccBalancerWrapper) Target() string { + return ccb.cc.target +} + +// acBalancerWrapper is a wrapper on top of ac for balancers. +// It implements balancer.SubConn interface. +type acBalancerWrapper struct { + mu sync.Mutex + ac *addrConn +} + +func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { + acbw.mu.Lock() + defer acbw.mu.Unlock() + if len(addrs) <= 0 { + acbw.ac.tearDown(errConnDrain) + return + } + if !acbw.ac.tryUpdateAddrs(addrs) { + cc := acbw.ac.cc + acbw.ac.mu.Lock() + // Set old ac.acbw to nil so the Shutdown state update will be ignored + // by balancer. + // + // TODO(bar) the state transition could be wrong when tearDown() old ac + // and creating new ac, fix the transition. + acbw.ac.acbw = nil + acbw.ac.mu.Unlock() + acState := acbw.ac.getState() + acbw.ac.tearDown(errConnDrain) + + if acState == connectivity.Shutdown { + return + } + + ac, err := cc.newAddrConn(addrs) + if err != nil { + grpclog.Warningf("acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err) + return + } + acbw.ac = ac + ac.mu.Lock() + ac.acbw = acbw + ac.mu.Unlock() + if acState != connectivity.Idle { + ac.connect() + } + } +} + +func (acbw *acBalancerWrapper) Connect() { + acbw.mu.Lock() + defer acbw.mu.Unlock() + acbw.ac.connect() +} + +func (acbw *acBalancerWrapper) getAddrConn() *addrConn { + acbw.mu.Lock() + defer acbw.mu.Unlock() + return acbw.ac +} diff --git a/vendor/google.golang.org/grpc/balancer_v1_wrapper.go b/vendor/google.golang.org/grpc/balancer_v1_wrapper.go new file mode 100644 index 000000000..e0ce32cfb --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer_v1_wrapper.go @@ -0,0 +1,328 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "strings" + "sync" + + "golang.org/x/net/context" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/status" +) + +type balancerWrapperBuilder struct { + b Balancer // The v1 balancer. +} + +func (bwb *balancerWrapperBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + targetAddr := cc.Target() + targetSplitted := strings.Split(targetAddr, ":///") + if len(targetSplitted) >= 2 { + targetAddr = targetSplitted[1] + } + + bwb.b.Start(targetAddr, BalancerConfig{ + DialCreds: opts.DialCreds, + Dialer: opts.Dialer, + }) + _, pickfirst := bwb.b.(*pickFirst) + bw := &balancerWrapper{ + balancer: bwb.b, + pickfirst: pickfirst, + cc: cc, + targetAddr: targetAddr, + startCh: make(chan struct{}), + conns: make(map[resolver.Address]balancer.SubConn), + connSt: make(map[balancer.SubConn]*scState), + csEvltr: &balancer.ConnectivityStateEvaluator{}, + state: connectivity.Idle, + } + cc.UpdateBalancerState(connectivity.Idle, bw) + go bw.lbWatcher() + return bw +} + +func (bwb *balancerWrapperBuilder) Name() string { + return "wrapper" +} + +type scState struct { + addr Address // The v1 address type. + s connectivity.State + down func(error) +} + +type balancerWrapper struct { + balancer Balancer // The v1 balancer. + pickfirst bool + + cc balancer.ClientConn + targetAddr string // Target without the scheme. + + mu sync.Mutex + conns map[resolver.Address]balancer.SubConn + connSt map[balancer.SubConn]*scState + // This channel is closed when handling the first resolver result. + // lbWatcher blocks until this is closed, to avoid race between + // - NewSubConn is created, cc wants to notify balancer of state changes; + // - Build hasn't return, cc doesn't have access to balancer. + startCh chan struct{} + + // To aggregate the connectivity state. + csEvltr *balancer.ConnectivityStateEvaluator + state connectivity.State +} + +// lbWatcher watches the Notify channel of the balancer and manages +// connections accordingly. +func (bw *balancerWrapper) lbWatcher() { + <-bw.startCh + notifyCh := bw.balancer.Notify() + if notifyCh == nil { + // There's no resolver in the balancer. Connect directly. + a := resolver.Address{ + Addr: bw.targetAddr, + Type: resolver.Backend, + } + sc, err := bw.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{}) + if err != nil { + grpclog.Warningf("Error creating connection to %v. Err: %v", a, err) + } else { + bw.mu.Lock() + bw.conns[a] = sc + bw.connSt[sc] = &scState{ + addr: Address{Addr: bw.targetAddr}, + s: connectivity.Idle, + } + bw.mu.Unlock() + sc.Connect() + } + return + } + + for addrs := range notifyCh { + grpclog.Infof("balancerWrapper: got update addr from Notify: %v\n", addrs) + if bw.pickfirst { + var ( + oldA resolver.Address + oldSC balancer.SubConn + ) + bw.mu.Lock() + for oldA, oldSC = range bw.conns { + break + } + bw.mu.Unlock() + if len(addrs) <= 0 { + if oldSC != nil { + // Teardown old sc. + bw.mu.Lock() + delete(bw.conns, oldA) + delete(bw.connSt, oldSC) + bw.mu.Unlock() + bw.cc.RemoveSubConn(oldSC) + } + continue + } + + var newAddrs []resolver.Address + for _, a := range addrs { + newAddr := resolver.Address{ + Addr: a.Addr, + Type: resolver.Backend, // All addresses from balancer are all backends. + ServerName: "", + Metadata: a.Metadata, + } + newAddrs = append(newAddrs, newAddr) + } + if oldSC == nil { + // Create new sc. + sc, err := bw.cc.NewSubConn(newAddrs, balancer.NewSubConnOptions{}) + if err != nil { + grpclog.Warningf("Error creating connection to %v. Err: %v", newAddrs, err) + } else { + bw.mu.Lock() + // For pickfirst, there should be only one SubConn, so the + // address doesn't matter. All states updating (up and down) + // and picking should all happen on that only SubConn. + bw.conns[resolver.Address{}] = sc + bw.connSt[sc] = &scState{ + addr: addrs[0], // Use the first address. + s: connectivity.Idle, + } + bw.mu.Unlock() + sc.Connect() + } + } else { + bw.mu.Lock() + bw.connSt[oldSC].addr = addrs[0] + bw.mu.Unlock() + oldSC.UpdateAddresses(newAddrs) + } + } else { + var ( + add []resolver.Address // Addresses need to setup connections. + del []balancer.SubConn // Connections need to tear down. + ) + resAddrs := make(map[resolver.Address]Address) + for _, a := range addrs { + resAddrs[resolver.Address{ + Addr: a.Addr, + Type: resolver.Backend, // All addresses from balancer are all backends. + ServerName: "", + Metadata: a.Metadata, + }] = a + } + bw.mu.Lock() + for a := range resAddrs { + if _, ok := bw.conns[a]; !ok { + add = append(add, a) + } + } + for a, c := range bw.conns { + if _, ok := resAddrs[a]; !ok { + del = append(del, c) + delete(bw.conns, a) + // Keep the state of this sc in bw.connSt until its state becomes Shutdown. + } + } + bw.mu.Unlock() + for _, a := range add { + sc, err := bw.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{}) + if err != nil { + grpclog.Warningf("Error creating connection to %v. Err: %v", a, err) + } else { + bw.mu.Lock() + bw.conns[a] = sc + bw.connSt[sc] = &scState{ + addr: resAddrs[a], + s: connectivity.Idle, + } + bw.mu.Unlock() + sc.Connect() + } + } + for _, c := range del { + bw.cc.RemoveSubConn(c) + } + } + } +} + +func (bw *balancerWrapper) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { + bw.mu.Lock() + defer bw.mu.Unlock() + scSt, ok := bw.connSt[sc] + if !ok { + return + } + if s == connectivity.Idle { + sc.Connect() + } + oldS := scSt.s + scSt.s = s + if oldS != connectivity.Ready && s == connectivity.Ready { + scSt.down = bw.balancer.Up(scSt.addr) + } else if oldS == connectivity.Ready && s != connectivity.Ready { + if scSt.down != nil { + scSt.down(errConnClosing) + } + } + sa := bw.csEvltr.RecordTransition(oldS, s) + if bw.state != sa { + bw.state = sa + } + bw.cc.UpdateBalancerState(bw.state, bw) + if s == connectivity.Shutdown { + // Remove state for this sc. + delete(bw.connSt, sc) + } +} + +func (bw *balancerWrapper) HandleResolvedAddrs([]resolver.Address, error) { + bw.mu.Lock() + defer bw.mu.Unlock() + select { + case <-bw.startCh: + default: + close(bw.startCh) + } + // There should be a resolver inside the balancer. + // All updates here, if any, are ignored. +} + +func (bw *balancerWrapper) Close() { + bw.mu.Lock() + defer bw.mu.Unlock() + select { + case <-bw.startCh: + default: + close(bw.startCh) + } + bw.balancer.Close() +} + +// The picker is the balancerWrapper itself. +// Pick should never return ErrNoSubConnAvailable. +// It either blocks or returns error, consistent with v1 balancer Get(). +func (bw *balancerWrapper) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) { + failfast := true // Default failfast is true. + if ss, ok := rpcInfoFromContext(ctx); ok { + failfast = ss.failfast + } + a, p, err := bw.balancer.Get(ctx, BalancerGetOptions{BlockingWait: !failfast}) + if err != nil { + return nil, nil, err + } + var done func(balancer.DoneInfo) + if p != nil { + done = func(i balancer.DoneInfo) { p() } + } + var sc balancer.SubConn + bw.mu.Lock() + defer bw.mu.Unlock() + if bw.pickfirst { + // Get the first sc in conns. + for _, sc = range bw.conns { + break + } + } else { + var ok bool + sc, ok = bw.conns[resolver.Address{ + Addr: a.Addr, + Type: resolver.Backend, + ServerName: "", + Metadata: a.Metadata, + }] + if !ok && failfast { + return nil, nil, status.Errorf(codes.Unavailable, "there is no connection available") + } + if s, ok := bw.connSt[sc]; failfast && (!ok || s.s != connectivity.Ready) { + // If the returned sc is not ready and RPC is failfast, + // return error, and this RPC will fail. + return nil, nil, status.Errorf(codes.Unavailable, "there is no connection available") + } + } + + return sc, done, nil +} diff --git a/vendor/google.golang.org/grpc/call.go b/vendor/google.golang.org/grpc/call.go new file mode 100644 index 000000000..180d79d06 --- /dev/null +++ b/vendor/google.golang.org/grpc/call.go @@ -0,0 +1,74 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "golang.org/x/net/context" +) + +// Invoke sends the RPC request on the wire and returns after response is +// received. This is typically called by generated code. +// +// All errors returned by Invoke are compatible with the status package. +func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply interface{}, opts ...CallOption) error { + // allow interceptor to see all applicable call options, which means those + // configured as defaults from dial option as well as per-call options + opts = combine(cc.dopts.callOptions, opts) + + if cc.dopts.unaryInt != nil { + return cc.dopts.unaryInt(ctx, method, args, reply, cc, invoke, opts...) + } + return invoke(ctx, method, args, reply, cc, opts...) +} + +func combine(o1 []CallOption, o2 []CallOption) []CallOption { + // we don't use append because o1 could have extra capacity whose + // elements would be overwritten, which could cause inadvertent + // sharing (and race connditions) between concurrent calls + if len(o1) == 0 { + return o2 + } else if len(o2) == 0 { + return o1 + } + ret := make([]CallOption, len(o1)+len(o2)) + copy(ret, o1) + copy(ret[len(o1):], o2) + return ret +} + +// Invoke sends the RPC request on the wire and returns after response is +// received. This is typically called by generated code. +// +// DEPRECATED: Use ClientConn.Invoke instead. +func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) error { + return cc.Invoke(ctx, method, args, reply, opts...) +} + +var unaryStreamDesc = &StreamDesc{ServerStreams: false, ClientStreams: false} + +func invoke(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error { + cs, err := newClientStream(ctx, unaryStreamDesc, cc, method, opts...) + if err != nil { + return err + } + if err := cs.SendMsg(req); err != nil { + return err + } + return cs.RecvMsg(reply) +} diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go new file mode 100644 index 000000000..318ac4073 --- /dev/null +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -0,0 +1,1277 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "errors" + "fmt" + "math" + "net" + "reflect" + "strings" + "sync" + "sync/atomic" + "time" + + "golang.org/x/net/context" + "golang.org/x/net/trace" + "google.golang.org/grpc/balancer" + _ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin. + "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/resolver" + _ "google.golang.org/grpc/resolver/dns" // To register dns resolver. + _ "google.golang.org/grpc/resolver/passthrough" // To register passthrough resolver. + "google.golang.org/grpc/status" +) + +const ( + // minimum time to give a connection to complete + minConnectTimeout = 20 * time.Second + // must match grpclbName in grpclb/grpclb.go + grpclbName = "grpclb" +) + +var ( + // ErrClientConnClosing indicates that the operation is illegal because + // the ClientConn is closing. + // + // Deprecated: this error should not be relied upon by users; use the status + // code of Canceled instead. + ErrClientConnClosing = status.Error(codes.Canceled, "grpc: the client connection is closing") + // errConnDrain indicates that the connection starts to be drained and does not accept any new RPCs. + errConnDrain = errors.New("grpc: the connection is drained") + // errConnClosing indicates that the connection is closing. + errConnClosing = errors.New("grpc: the connection is closing") + // errBalancerClosed indicates that the balancer is closed. + errBalancerClosed = errors.New("grpc: balancer is closed") + // We use an accessor so that minConnectTimeout can be + // atomically read and updated while testing. + getMinConnectTimeout = func() time.Duration { + return minConnectTimeout + } +) + +// The following errors are returned from Dial and DialContext +var ( + // errNoTransportSecurity indicates that there is no transport security + // being set for ClientConn. Users should either set one or explicitly + // call WithInsecure DialOption to disable security. + errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithInsecure() explicitly or set credentials)") + // errTransportCredentialsMissing indicates that users want to transmit security + // information (e.g., oauth2 token) which requires secure connection on an insecure + // connection. + errTransportCredentialsMissing = errors.New("grpc: the credentials require transport level security (use grpc.WithTransportCredentials() to set)") + // errCredentialsConflict indicates that grpc.WithTransportCredentials() + // and grpc.WithInsecure() are both called for a connection. + errCredentialsConflict = errors.New("grpc: transport credentials are set for an insecure connection (grpc.WithTransportCredentials() and grpc.WithInsecure() are both called)") +) + +const ( + defaultClientMaxReceiveMessageSize = 1024 * 1024 * 4 + defaultClientMaxSendMessageSize = math.MaxInt32 + // http2IOBufSize specifies the buffer size for sending frames. + defaultWriteBufSize = 32 * 1024 + defaultReadBufSize = 32 * 1024 +) + +// Dial creates a client connection to the given target. +func Dial(target string, opts ...DialOption) (*ClientConn, error) { + return DialContext(context.Background(), target, opts...) +} + +// DialContext creates a client connection to the given target. By default, it's +// a non-blocking dial (the function won't wait for connections to be +// established, and connecting happens in the background). To make it a blocking +// dial, use WithBlock() dial option. +// +// In the non-blocking case, the ctx does not act against the connection. It +// only controls the setup steps. +// +// In the blocking case, ctx can be used to cancel or expire the pending +// connection. Once this function returns, the cancellation and expiration of +// ctx will be noop. Users should call ClientConn.Close to terminate all the +// pending operations after this function returns. +// +// The target name syntax is defined in +// https://github.com/grpc/grpc/blob/master/doc/naming.md. +// e.g. to use dns resolver, a "dns:///" prefix should be applied to the target. +func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { + cc := &ClientConn{ + target: target, + csMgr: &connectivityStateManager{}, + conns: make(map[*addrConn]struct{}), + dopts: defaultDialOptions(), + blockingpicker: newPickerWrapper(), + czData: new(channelzData), + } + cc.retryThrottler.Store((*retryThrottler)(nil)) + cc.ctx, cc.cancel = context.WithCancel(context.Background()) + + for _, opt := range opts { + opt.apply(&cc.dopts) + } + + if channelz.IsOn() { + if cc.dopts.channelzParentID != 0 { + cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target) + } else { + cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, 0, target) + } + } + + if !cc.dopts.insecure { + if cc.dopts.copts.TransportCredentials == nil { + return nil, errNoTransportSecurity + } + } else { + if cc.dopts.copts.TransportCredentials != nil { + return nil, errCredentialsConflict + } + for _, cd := range cc.dopts.copts.PerRPCCredentials { + if cd.RequireTransportSecurity() { + return nil, errTransportCredentialsMissing + } + } + } + + cc.mkp = cc.dopts.copts.KeepaliveParams + + if cc.dopts.copts.Dialer == nil { + cc.dopts.copts.Dialer = newProxyDialer( + func(ctx context.Context, addr string) (net.Conn, error) { + network, addr := parseDialTarget(addr) + return dialContext(ctx, network, addr) + }, + ) + } + + if cc.dopts.copts.UserAgent != "" { + cc.dopts.copts.UserAgent += " " + grpcUA + } else { + cc.dopts.copts.UserAgent = grpcUA + } + + if cc.dopts.timeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, cc.dopts.timeout) + defer cancel() + } + + defer func() { + select { + case <-ctx.Done(): + conn, err = nil, ctx.Err() + default: + } + + if err != nil { + cc.Close() + } + }() + + scSet := false + if cc.dopts.scChan != nil { + // Try to get an initial service config. + select { + case sc, ok := <-cc.dopts.scChan: + if ok { + cc.sc = sc + scSet = true + } + default: + } + } + if cc.dopts.bs == nil { + cc.dopts.bs = backoff.Exponential{ + MaxDelay: DefaultBackoffConfig.MaxDelay, + } + } + if cc.dopts.resolverBuilder == nil { + // Only try to parse target when resolver builder is not already set. + cc.parsedTarget = parseTarget(cc.target) + grpclog.Infof("parsed scheme: %q", cc.parsedTarget.Scheme) + cc.dopts.resolverBuilder = resolver.Get(cc.parsedTarget.Scheme) + if cc.dopts.resolverBuilder == nil { + // If resolver builder is still nil, the parse target's scheme is + // not registered. Fallback to default resolver and set Endpoint to + // the original unparsed target. + grpclog.Infof("scheme %q not registered, fallback to default scheme", cc.parsedTarget.Scheme) + cc.parsedTarget = resolver.Target{ + Scheme: resolver.GetDefaultScheme(), + Endpoint: target, + } + cc.dopts.resolverBuilder = resolver.Get(cc.parsedTarget.Scheme) + } + } else { + cc.parsedTarget = resolver.Target{Endpoint: target} + } + creds := cc.dopts.copts.TransportCredentials + if creds != nil && creds.Info().ServerName != "" { + cc.authority = creds.Info().ServerName + } else if cc.dopts.insecure && cc.dopts.authority != "" { + cc.authority = cc.dopts.authority + } else { + // Use endpoint from "scheme://authority/endpoint" as the default + // authority for ClientConn. + cc.authority = cc.parsedTarget.Endpoint + } + + if cc.dopts.scChan != nil && !scSet { + // Blocking wait for the initial service config. + select { + case sc, ok := <-cc.dopts.scChan: + if ok { + cc.sc = sc + } + case <-ctx.Done(): + return nil, ctx.Err() + } + } + if cc.dopts.scChan != nil { + go cc.scWatcher() + } + + var credsClone credentials.TransportCredentials + if creds := cc.dopts.copts.TransportCredentials; creds != nil { + credsClone = creds.Clone() + } + cc.balancerBuildOpts = balancer.BuildOptions{ + DialCreds: credsClone, + Dialer: cc.dopts.copts.Dialer, + ChannelzParentID: cc.channelzID, + } + + // Build the resolver. + cc.resolverWrapper, err = newCCResolverWrapper(cc) + if err != nil { + return nil, fmt.Errorf("failed to build resolver: %v", err) + } + // Start the resolver wrapper goroutine after resolverWrapper is created. + // + // If the goroutine is started before resolverWrapper is ready, the + // following may happen: The goroutine sends updates to cc. cc forwards + // those to balancer. Balancer creates new addrConn. addrConn fails to + // connect, and calls resolveNow(). resolveNow() tries to use the non-ready + // resolverWrapper. + cc.resolverWrapper.start() + + // A blocking dial blocks until the clientConn is ready. + if cc.dopts.block { + for { + s := cc.GetState() + if s == connectivity.Ready { + break + } else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure { + if err = cc.blockingpicker.connectionError(); err != nil { + terr, ok := err.(interface{ Temporary() bool }) + if ok && !terr.Temporary() { + return nil, err + } + } + } + if !cc.WaitForStateChange(ctx, s) { + // ctx got timeout or canceled. + return nil, ctx.Err() + } + } + } + + return cc, nil +} + +// connectivityStateManager keeps the connectivity.State of ClientConn. +// This struct will eventually be exported so the balancers can access it. +type connectivityStateManager struct { + mu sync.Mutex + state connectivity.State + notifyChan chan struct{} +} + +// updateState updates the connectivity.State of ClientConn. +// If there's a change it notifies goroutines waiting on state change to +// happen. +func (csm *connectivityStateManager) updateState(state connectivity.State) { + csm.mu.Lock() + defer csm.mu.Unlock() + if csm.state == connectivity.Shutdown { + return + } + if csm.state == state { + return + } + csm.state = state + if csm.notifyChan != nil { + // There are other goroutines waiting on this channel. + close(csm.notifyChan) + csm.notifyChan = nil + } +} + +func (csm *connectivityStateManager) getState() connectivity.State { + csm.mu.Lock() + defer csm.mu.Unlock() + return csm.state +} + +func (csm *connectivityStateManager) getNotifyChan() <-chan struct{} { + csm.mu.Lock() + defer csm.mu.Unlock() + if csm.notifyChan == nil { + csm.notifyChan = make(chan struct{}) + } + return csm.notifyChan +} + +// ClientConn represents a client connection to an RPC server. +type ClientConn struct { + ctx context.Context + cancel context.CancelFunc + + target string + parsedTarget resolver.Target + authority string + dopts dialOptions + csMgr *connectivityStateManager + + balancerBuildOpts balancer.BuildOptions + resolverWrapper *ccResolverWrapper + blockingpicker *pickerWrapper + + mu sync.RWMutex + sc ServiceConfig + scRaw string + conns map[*addrConn]struct{} + // Keepalive parameter can be updated if a GoAway is received. + mkp keepalive.ClientParameters + curBalancerName string + preBalancerName string // previous balancer name. + curAddresses []resolver.Address + balancerWrapper *ccBalancerWrapper + retryThrottler atomic.Value + + channelzID int64 // channelz unique identification number + czData *channelzData +} + +// WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or +// ctx expires. A true value is returned in former case and false in latter. +// This is an EXPERIMENTAL API. +func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connectivity.State) bool { + ch := cc.csMgr.getNotifyChan() + if cc.csMgr.getState() != sourceState { + return true + } + select { + case <-ctx.Done(): + return false + case <-ch: + return true + } +} + +// GetState returns the connectivity.State of ClientConn. +// This is an EXPERIMENTAL API. +func (cc *ClientConn) GetState() connectivity.State { + return cc.csMgr.getState() +} + +func (cc *ClientConn) scWatcher() { + for { + select { + case sc, ok := <-cc.dopts.scChan: + if !ok { + return + } + cc.mu.Lock() + // TODO: load balance policy runtime change is ignored. + // We may revist this decision in the future. + cc.sc = sc + cc.scRaw = "" + cc.mu.Unlock() + case <-cc.ctx.Done(): + return + } + } +} + +func (cc *ClientConn) handleResolvedAddrs(addrs []resolver.Address, err error) { + cc.mu.Lock() + defer cc.mu.Unlock() + if cc.conns == nil { + // cc was closed. + return + } + + if reflect.DeepEqual(cc.curAddresses, addrs) { + return + } + + cc.curAddresses = addrs + + if cc.dopts.balancerBuilder == nil { + // Only look at balancer types and switch balancer if balancer dial + // option is not set. + var isGRPCLB bool + for _, a := range addrs { + if a.Type == resolver.GRPCLB { + isGRPCLB = true + break + } + } + var newBalancerName string + if isGRPCLB { + newBalancerName = grpclbName + } else { + // Address list doesn't contain grpclb address. Try to pick a + // non-grpclb balancer. + newBalancerName = cc.curBalancerName + // If current balancer is grpclb, switch to the previous one. + if newBalancerName == grpclbName { + newBalancerName = cc.preBalancerName + } + // The following could be true in two cases: + // - the first time handling resolved addresses + // (curBalancerName="") + // - the first time handling non-grpclb addresses + // (curBalancerName="grpclb", preBalancerName="") + if newBalancerName == "" { + newBalancerName = PickFirstBalancerName + } + } + cc.switchBalancer(newBalancerName) + } else if cc.balancerWrapper == nil { + // Balancer dial option was set, and this is the first time handling + // resolved addresses. Build a balancer with dopts.balancerBuilder. + cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, cc.balancerBuildOpts) + } + + cc.balancerWrapper.handleResolvedAddrs(addrs, nil) +} + +// switchBalancer starts the switching from current balancer to the balancer +// with the given name. +// +// It will NOT send the current address list to the new balancer. If needed, +// caller of this function should send address list to the new balancer after +// this function returns. +// +// Caller must hold cc.mu. +func (cc *ClientConn) switchBalancer(name string) { + if cc.conns == nil { + return + } + + if strings.ToLower(cc.curBalancerName) == strings.ToLower(name) { + return + } + + grpclog.Infof("ClientConn switching balancer to %q", name) + if cc.dopts.balancerBuilder != nil { + grpclog.Infoln("ignoring balancer switching: Balancer DialOption used instead") + return + } + // TODO(bar switching) change this to two steps: drain and close. + // Keep track of sc in wrapper. + if cc.balancerWrapper != nil { + cc.balancerWrapper.close() + } + + builder := balancer.Get(name) + if builder == nil { + grpclog.Infof("failed to get balancer builder for: %v, using pick_first instead", name) + builder = newPickfirstBuilder() + } + cc.preBalancerName = cc.curBalancerName + cc.curBalancerName = builder.Name() + cc.balancerWrapper = newCCBalancerWrapper(cc, builder, cc.balancerBuildOpts) +} + +func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return + } + // TODO(bar switching) send updates to all balancer wrappers when balancer + // gracefully switching is supported. + cc.balancerWrapper.handleSubConnStateChange(sc, s) + cc.mu.Unlock() +} + +// newAddrConn creates an addrConn for addrs and adds it to cc.conns. +// +// Caller needs to make sure len(addrs) > 0. +func (cc *ClientConn) newAddrConn(addrs []resolver.Address) (*addrConn, error) { + ac := &addrConn{ + cc: cc, + addrs: addrs, + dopts: cc.dopts, + czData: new(channelzData), + resetBackoff: make(chan struct{}), + } + ac.ctx, ac.cancel = context.WithCancel(cc.ctx) + // Track ac in cc. This needs to be done before any getTransport(...) is called. + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return nil, ErrClientConnClosing + } + if channelz.IsOn() { + ac.channelzID = channelz.RegisterSubChannel(ac, cc.channelzID, "") + } + cc.conns[ac] = struct{}{} + cc.mu.Unlock() + return ac, nil +} + +// removeAddrConn removes the addrConn in the subConn from clientConn. +// It also tears down the ac with the given error. +func (cc *ClientConn) removeAddrConn(ac *addrConn, err error) { + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return + } + delete(cc.conns, ac) + cc.mu.Unlock() + ac.tearDown(err) +} + +func (cc *ClientConn) channelzMetric() *channelz.ChannelInternalMetric { + return &channelz.ChannelInternalMetric{ + State: cc.GetState(), + Target: cc.target, + CallsStarted: atomic.LoadInt64(&cc.czData.callsStarted), + CallsSucceeded: atomic.LoadInt64(&cc.czData.callsSucceeded), + CallsFailed: atomic.LoadInt64(&cc.czData.callsFailed), + LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&cc.czData.lastCallStartedTime)), + } +} + +// Target returns the target string of the ClientConn. +// This is an EXPERIMENTAL API. +func (cc *ClientConn) Target() string { + return cc.target +} + +func (cc *ClientConn) incrCallsStarted() { + atomic.AddInt64(&cc.czData.callsStarted, 1) + atomic.StoreInt64(&cc.czData.lastCallStartedTime, time.Now().UnixNano()) +} + +func (cc *ClientConn) incrCallsSucceeded() { + atomic.AddInt64(&cc.czData.callsSucceeded, 1) +} + +func (cc *ClientConn) incrCallsFailed() { + atomic.AddInt64(&cc.czData.callsFailed, 1) +} + +// connect starts to creating transport and also starts the transport monitor +// goroutine for this ac. +// It does nothing if the ac is not IDLE. +// TODO(bar) Move this to the addrConn section. +// This was part of resetAddrConn, keep it here to make the diff look clean. +func (ac *addrConn) connect() error { + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return errConnClosing + } + if ac.state != connectivity.Idle { + ac.mu.Unlock() + return nil + } + ac.state = connectivity.Connecting + ac.cc.handleSubConnStateChange(ac.acbw, ac.state) + ac.mu.Unlock() + + // Start a goroutine connecting to the server asynchronously. + go func() { + if err := ac.resetTransport(); err != nil { + grpclog.Warningf("Failed to dial %s: %v; please retry.", ac.addrs[0].Addr, err) + if err != errConnClosing { + // Keep this ac in cc.conns, to get the reason it's torn down. + ac.tearDown(err) + } + return + } + ac.transportMonitor() + }() + return nil +} + +// tryUpdateAddrs tries to update ac.addrs with the new addresses list. +// +// It checks whether current connected address of ac is in the new addrs list. +// - If true, it updates ac.addrs and returns true. The ac will keep using +// the existing connection. +// - If false, it does nothing and returns false. +func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { + ac.mu.Lock() + defer ac.mu.Unlock() + grpclog.Infof("addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs) + if ac.state == connectivity.Shutdown { + ac.addrs = addrs + return true + } + + var curAddrFound bool + for _, a := range addrs { + if reflect.DeepEqual(ac.curAddr, a) { + curAddrFound = true + break + } + } + grpclog.Infof("addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound) + if curAddrFound { + ac.addrs = addrs + ac.reconnectIdx = 0 // Start reconnecting from beginning in the new list. + } + + return curAddrFound +} + +// GetMethodConfig gets the method config of the input method. +// If there's an exact match for input method (i.e. /service/method), we return +// the corresponding MethodConfig. +// If there isn't an exact match for the input method, we look for the default config +// under the service (i.e /service/). If there is a default MethodConfig for +// the service, we return it. +// Otherwise, we return an empty MethodConfig. +func (cc *ClientConn) GetMethodConfig(method string) MethodConfig { + // TODO: Avoid the locking here. + cc.mu.RLock() + defer cc.mu.RUnlock() + m, ok := cc.sc.Methods[method] + if !ok { + i := strings.LastIndex(method, "/") + m = cc.sc.Methods[method[:i+1]] + } + return m +} + +func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, func(balancer.DoneInfo), error) { + t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickOptions{ + FullMethodName: method, + }) + if err != nil { + return nil, nil, toRPCErr(err) + } + return t, done, nil +} + +// handleServiceConfig parses the service config string in JSON format to Go native +// struct ServiceConfig, and store both the struct and the JSON string in ClientConn. +func (cc *ClientConn) handleServiceConfig(js string) error { + if cc.dopts.disableServiceConfig { + return nil + } + sc, err := parseServiceConfig(js) + if err != nil { + return err + } + cc.mu.Lock() + cc.scRaw = js + cc.sc = sc + + if sc.retryThrottling != nil { + newThrottler := &retryThrottler{ + tokens: sc.retryThrottling.MaxTokens, + max: sc.retryThrottling.MaxTokens, + thresh: sc.retryThrottling.MaxTokens / 2, + ratio: sc.retryThrottling.TokenRatio, + } + cc.retryThrottler.Store(newThrottler) + } else { + cc.retryThrottler.Store((*retryThrottler)(nil)) + } + + if sc.LB != nil && *sc.LB != grpclbName { // "grpclb" is not a valid balancer option in service config. + if cc.curBalancerName == grpclbName { + // If current balancer is grpclb, there's at least one grpclb + // balancer address in the resolved list. Don't switch the balancer, + // but change the previous balancer name, so if a new resolved + // address list doesn't contain grpclb address, balancer will be + // switched to *sc.LB. + cc.preBalancerName = *sc.LB + } else { + cc.switchBalancer(*sc.LB) + cc.balancerWrapper.handleResolvedAddrs(cc.curAddresses, nil) + } + } + + cc.mu.Unlock() + return nil +} + +func (cc *ClientConn) resolveNow(o resolver.ResolveNowOption) { + cc.mu.RLock() + r := cc.resolverWrapper + cc.mu.RUnlock() + if r == nil { + return + } + go r.resolveNow(o) +} + +// ResetConnectBackoff wakes up all subchannels in transient failure and causes +// them to attempt another connection immediately. It also resets the backoff +// times used for subsequent attempts regardless of the current state. +// +// In general, this function should not be used. Typical service or network +// outages result in a reasonable client reconnection strategy by default. +// However, if a previously unavailable network becomes available, this may be +// used to trigger an immediate reconnect. +// +// This API is EXPERIMENTAL. +func (cc *ClientConn) ResetConnectBackoff() { + cc.mu.Lock() + defer cc.mu.Unlock() + for ac := range cc.conns { + ac.resetConnectBackoff() + } +} + +// Close tears down the ClientConn and all underlying connections. +func (cc *ClientConn) Close() error { + defer cc.cancel() + + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return ErrClientConnClosing + } + conns := cc.conns + cc.conns = nil + cc.csMgr.updateState(connectivity.Shutdown) + + rWrapper := cc.resolverWrapper + cc.resolverWrapper = nil + bWrapper := cc.balancerWrapper + cc.balancerWrapper = nil + cc.mu.Unlock() + + cc.blockingpicker.close() + + if rWrapper != nil { + rWrapper.close() + } + if bWrapper != nil { + bWrapper.close() + } + + for ac := range conns { + ac.tearDown(ErrClientConnClosing) + } + if channelz.IsOn() { + channelz.RemoveEntry(cc.channelzID) + } + return nil +} + +// addrConn is a network connection to a given address. +type addrConn struct { + ctx context.Context + cancel context.CancelFunc + + cc *ClientConn + addrs []resolver.Address + dopts dialOptions + events trace.EventLog + acbw balancer.SubConn + + mu sync.Mutex + curAddr resolver.Address + reconnectIdx int // The index in addrs list to start reconnecting from. + state connectivity.State + // ready is closed and becomes nil when a new transport is up or failed + // due to timeout. + ready chan struct{} + transport transport.ClientTransport + + // The reason this addrConn is torn down. + tearDownErr error + + connectRetryNum int + // backoffDeadline is the time until which resetTransport needs to + // wait before increasing connectRetryNum count. + backoffDeadline time.Time + // connectDeadline is the time by which all connection + // negotiations must complete. + connectDeadline time.Time + + resetBackoff chan struct{} + + channelzID int64 // channelz unique identification number + czData *channelzData +} + +// adjustParams updates parameters used to create transports upon +// receiving a GoAway. +func (ac *addrConn) adjustParams(r transport.GoAwayReason) { + switch r { + case transport.GoAwayTooManyPings: + v := 2 * ac.dopts.copts.KeepaliveParams.Time + ac.cc.mu.Lock() + if v > ac.cc.mkp.Time { + ac.cc.mkp.Time = v + } + ac.cc.mu.Unlock() + } +} + +// printf records an event in ac's event log, unless ac has been closed. +// REQUIRES ac.mu is held. +func (ac *addrConn) printf(format string, a ...interface{}) { + if ac.events != nil { + ac.events.Printf(format, a...) + } +} + +// resetTransport recreates a transport to the address for ac. The old +// transport will close itself on error or when the clientconn is closed. +// The created transport must receive initial settings frame from the server. +// In case that doesn't happen, transportMonitor will kill the newly created +// transport after connectDeadline has expired. +// In case there was an error on the transport before the settings frame was +// received, resetTransport resumes connecting to backends after the one that +// was previously connected to. In case end of the list is reached, resetTransport +// backs off until the original deadline. +// If the DialOption WithWaitForHandshake was set, resetTrasport returns +// successfully only after server settings are received. +// +// TODO(bar) make sure all state transitions are valid. +func (ac *addrConn) resetTransport() error { + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return errConnClosing + } + if ac.ready != nil { + close(ac.ready) + ac.ready = nil + } + ac.transport = nil + ridx := ac.reconnectIdx + ac.mu.Unlock() + ac.cc.mu.RLock() + ac.dopts.copts.KeepaliveParams = ac.cc.mkp + ac.cc.mu.RUnlock() + var backoffDeadline, connectDeadline time.Time + var resetBackoff chan struct{} + for connectRetryNum := 0; ; connectRetryNum++ { + ac.mu.Lock() + if ac.backoffDeadline.IsZero() { + // This means either a successful HTTP2 connection was established + // or this is the first time this addrConn is trying to establish a + // connection. + backoffFor := ac.dopts.bs.Backoff(connectRetryNum) // time.Duration. + resetBackoff = ac.resetBackoff + // This will be the duration that dial gets to finish. + dialDuration := getMinConnectTimeout() + if backoffFor > dialDuration { + // Give dial more time as we keep failing to connect. + dialDuration = backoffFor + } + start := time.Now() + backoffDeadline = start.Add(backoffFor) + connectDeadline = start.Add(dialDuration) + ridx = 0 // Start connecting from the beginning. + } else { + // Continue trying to connect with the same deadlines. + connectRetryNum = ac.connectRetryNum + backoffDeadline = ac.backoffDeadline + connectDeadline = ac.connectDeadline + ac.backoffDeadline = time.Time{} + ac.connectDeadline = time.Time{} + ac.connectRetryNum = 0 + } + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return errConnClosing + } + ac.printf("connecting") + if ac.state != connectivity.Connecting { + ac.state = connectivity.Connecting + ac.cc.handleSubConnStateChange(ac.acbw, ac.state) + } + // copy ac.addrs in case of race + addrsIter := make([]resolver.Address, len(ac.addrs)) + copy(addrsIter, ac.addrs) + copts := ac.dopts.copts + ac.mu.Unlock() + connected, err := ac.createTransport(connectRetryNum, ridx, backoffDeadline, connectDeadline, addrsIter, copts, resetBackoff) + if err != nil { + return err + } + if connected { + return nil + } + } +} + +// createTransport creates a connection to one of the backends in addrs. +// It returns true if a connection was established. +func (ac *addrConn) createTransport(connectRetryNum, ridx int, backoffDeadline, connectDeadline time.Time, addrs []resolver.Address, copts transport.ConnectOptions, resetBackoff chan struct{}) (bool, error) { + for i := ridx; i < len(addrs); i++ { + addr := addrs[i] + target := transport.TargetInfo{ + Addr: addr.Addr, + Metadata: addr.Metadata, + Authority: ac.cc.authority, + } + done := make(chan struct{}) + onPrefaceReceipt := func() { + ac.mu.Lock() + close(done) + if !ac.backoffDeadline.IsZero() { + // If we haven't already started reconnecting to + // other backends. + // Note, this can happen when writer notices an error + // and triggers resetTransport while at the same time + // reader receives the preface and invokes this closure. + ac.backoffDeadline = time.Time{} + ac.connectDeadline = time.Time{} + ac.connectRetryNum = 0 + } + ac.mu.Unlock() + } + // Do not cancel in the success path because of + // this issue in Go1.6: https://github.com/golang/go/issues/15078. + connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline) + if channelz.IsOn() { + copts.ChannelzParentID = ac.channelzID + } + newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, target, copts, onPrefaceReceipt) + if err != nil { + cancel() + ac.cc.blockingpicker.updateConnectionError(err) + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + // ac.tearDown(...) has been invoked. + ac.mu.Unlock() + return false, errConnClosing + } + ac.mu.Unlock() + grpclog.Warningf("grpc: addrConn.createTransport failed to connect to %v. Err :%v. Reconnecting...", addr, err) + continue + } + if ac.dopts.waitForHandshake { + select { + case <-done: + case <-connectCtx.Done(): + // Didn't receive server preface, must kill this new transport now. + grpclog.Warningf("grpc: addrConn.createTransport failed to receive server preface before deadline.") + newTr.Close() + continue + case <-ac.ctx.Done(): + } + } + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + // ac.tearDonn(...) has been invoked. + newTr.Close() + return false, errConnClosing + } + ac.printf("ready") + ac.state = connectivity.Ready + ac.cc.handleSubConnStateChange(ac.acbw, ac.state) + ac.transport = newTr + ac.curAddr = addr + if ac.ready != nil { + close(ac.ready) + ac.ready = nil + } + select { + case <-done: + // If the server has responded back with preface already, + // don't set the reconnect parameters. + default: + ac.connectRetryNum = connectRetryNum + ac.backoffDeadline = backoffDeadline + ac.connectDeadline = connectDeadline + ac.reconnectIdx = i + 1 // Start reconnecting from the next backend in the list. + } + ac.mu.Unlock() + return true, nil + } + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return false, errConnClosing + } + ac.state = connectivity.TransientFailure + ac.cc.handleSubConnStateChange(ac.acbw, ac.state) + ac.cc.resolveNow(resolver.ResolveNowOption{}) + if ac.ready != nil { + close(ac.ready) + ac.ready = nil + } + ac.mu.Unlock() + timer := time.NewTimer(backoffDeadline.Sub(time.Now())) + select { + case <-timer.C: + case <-resetBackoff: + timer.Stop() + case <-ac.ctx.Done(): + timer.Stop() + return false, ac.ctx.Err() + } + return false, nil +} + +func (ac *addrConn) resetConnectBackoff() { + ac.mu.Lock() + close(ac.resetBackoff) + ac.resetBackoff = make(chan struct{}) + ac.connectRetryNum = 0 + ac.mu.Unlock() +} + +// Run in a goroutine to track the error in transport and create the +// new transport if an error happens. It returns when the channel is closing. +func (ac *addrConn) transportMonitor() { + for { + var timer *time.Timer + var cdeadline <-chan time.Time + ac.mu.Lock() + t := ac.transport + if !ac.connectDeadline.IsZero() { + timer = time.NewTimer(ac.connectDeadline.Sub(time.Now())) + cdeadline = timer.C + } + ac.mu.Unlock() + // Block until we receive a goaway or an error occurs. + select { + case <-t.GoAway(): + done := t.Error() + cleanup := t.Close + // Since this transport will be orphaned (won't have a transportMonitor) + // we need to launch a goroutine to keep track of clientConn.Close() + // happening since it might not be noticed by any other goroutine for a while. + go func() { + <-done + cleanup() + }() + case <-t.Error(): + // In case this is triggered because clientConn.Close() + // was called, we want to immeditately close the transport + // since no other goroutine might notice it for a while. + t.Close() + case <-cdeadline: + ac.mu.Lock() + // This implies that client received server preface. + if ac.backoffDeadline.IsZero() { + ac.mu.Unlock() + continue + } + ac.mu.Unlock() + timer = nil + // No server preface received until deadline. + // Kill the connection. + grpclog.Warningf("grpc: addrConn.transportMonitor didn't get server preface after waiting. Closing the new transport now.") + t.Close() + } + if timer != nil { + timer.Stop() + } + // If a GoAway happened, regardless of error, adjust our keepalive + // parameters as appropriate. + select { + case <-t.GoAway(): + ac.adjustParams(t.GetGoAwayReason()) + default: + } + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return + } + // Set connectivity state to TransientFailure before calling + // resetTransport. Transition READY->CONNECTING is not valid. + ac.state = connectivity.TransientFailure + ac.cc.handleSubConnStateChange(ac.acbw, ac.state) + ac.cc.resolveNow(resolver.ResolveNowOption{}) + ac.curAddr = resolver.Address{} + ac.mu.Unlock() + if err := ac.resetTransport(); err != nil { + ac.mu.Lock() + ac.printf("transport exiting: %v", err) + ac.mu.Unlock() + grpclog.Warningf("grpc: addrConn.transportMonitor exits due to: %v", err) + if err != errConnClosing { + // Keep this ac in cc.conns, to get the reason it's torn down. + ac.tearDown(err) + } + return + } + } +} + +// getReadyTransport returns the transport if ac's state is READY. +// Otherwise it returns nil, false. +// If ac's state is IDLE, it will trigger ac to connect. +func (ac *addrConn) getReadyTransport() (transport.ClientTransport, bool) { + ac.mu.Lock() + if ac.state == connectivity.Ready { + t := ac.transport + ac.mu.Unlock() + return t, true + } + var idle bool + if ac.state == connectivity.Idle { + idle = true + } + ac.mu.Unlock() + // Trigger idle ac to connect. + if idle { + ac.connect() + } + return nil, false +} + +// tearDown starts to tear down the addrConn. +// TODO(zhaoq): Make this synchronous to avoid unbounded memory consumption in +// some edge cases (e.g., the caller opens and closes many addrConn's in a +// tight loop. +// tearDown doesn't remove ac from ac.cc.conns. +func (ac *addrConn) tearDown(err error) { + ac.cancel() + ac.mu.Lock() + defer ac.mu.Unlock() + if ac.state == connectivity.Shutdown { + return + } + ac.curAddr = resolver.Address{} + if err == errConnDrain && ac.transport != nil { + // GracefulClose(...) may be executed multiple times when + // i) receiving multiple GoAway frames from the server; or + // ii) there are concurrent name resolver/Balancer triggered + // address removal and GoAway. + ac.transport.GracefulClose() + } + ac.state = connectivity.Shutdown + ac.tearDownErr = err + ac.cc.handleSubConnStateChange(ac.acbw, ac.state) + if ac.events != nil { + ac.events.Finish() + ac.events = nil + } + if ac.ready != nil { + close(ac.ready) + ac.ready = nil + } + if channelz.IsOn() { + channelz.RemoveEntry(ac.channelzID) + } +} + +func (ac *addrConn) getState() connectivity.State { + ac.mu.Lock() + defer ac.mu.Unlock() + return ac.state +} + +func (ac *addrConn) ChannelzMetric() *channelz.ChannelInternalMetric { + ac.mu.Lock() + addr := ac.curAddr.Addr + ac.mu.Unlock() + return &channelz.ChannelInternalMetric{ + State: ac.getState(), + Target: addr, + CallsStarted: atomic.LoadInt64(&ac.czData.callsStarted), + CallsSucceeded: atomic.LoadInt64(&ac.czData.callsSucceeded), + CallsFailed: atomic.LoadInt64(&ac.czData.callsFailed), + LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&ac.czData.lastCallStartedTime)), + } +} + +func (ac *addrConn) incrCallsStarted() { + atomic.AddInt64(&ac.czData.callsStarted, 1) + atomic.StoreInt64(&ac.czData.lastCallStartedTime, time.Now().UnixNano()) +} + +func (ac *addrConn) incrCallsSucceeded() { + atomic.AddInt64(&ac.czData.callsSucceeded, 1) +} + +func (ac *addrConn) incrCallsFailed() { + atomic.AddInt64(&ac.czData.callsFailed, 1) +} + +type retryThrottler struct { + max float64 + thresh float64 + ratio float64 + + mu sync.Mutex + tokens float64 // TODO(dfawley): replace with atomic and remove lock. +} + +// throttle subtracts a retry token from the pool and returns whether a retry +// should be throttled (disallowed) based upon the retry throttling policy in +// the service config. +func (rt *retryThrottler) throttle() bool { + if rt == nil { + return false + } + rt.mu.Lock() + defer rt.mu.Unlock() + rt.tokens-- + if rt.tokens < 0 { + rt.tokens = 0 + } + return rt.tokens <= rt.thresh +} + +func (rt *retryThrottler) successfulRPC() { + if rt == nil { + return + } + rt.mu.Lock() + defer rt.mu.Unlock() + rt.tokens += rt.ratio + if rt.tokens > rt.max { + rt.tokens = rt.max + } +} + +type channelzChannel struct { + cc *ClientConn +} + +func (c *channelzChannel) ChannelzMetric() *channelz.ChannelInternalMetric { + return c.cc.channelzMetric() +} + +// ErrClientConnTimeout indicates that the ClientConn cannot establish the +// underlying connections within the specified timeout. +// +// Deprecated: This error is never returned by grpc and should not be +// referenced by users. +var ErrClientConnTimeout = errors.New("grpc: timed out when dialing") diff --git a/vendor/google.golang.org/grpc/codec.go b/vendor/google.golang.org/grpc/codec.go new file mode 100644 index 000000000..129776547 --- /dev/null +++ b/vendor/google.golang.org/grpc/codec.go @@ -0,0 +1,50 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "google.golang.org/grpc/encoding" + _ "google.golang.org/grpc/encoding/proto" // to register the Codec for "proto" +) + +// baseCodec contains the functionality of both Codec and encoding.Codec, but +// omits the name/string, which vary between the two and are not needed for +// anything besides the registry in the encoding package. +type baseCodec interface { + Marshal(v interface{}) ([]byte, error) + Unmarshal(data []byte, v interface{}) error +} + +var _ baseCodec = Codec(nil) +var _ baseCodec = encoding.Codec(nil) + +// Codec defines the interface gRPC uses to encode and decode messages. +// Note that implementations of this interface must be thread safe; +// a Codec's methods can be called from concurrent goroutines. +// +// Deprecated: use encoding.Codec instead. +type Codec interface { + // Marshal returns the wire format of v. + Marshal(v interface{}) ([]byte, error) + // Unmarshal parses the wire format into v. + Unmarshal(data []byte, v interface{}) error + // String returns the name of the Codec implementation. This is unused by + // gRPC. + String() string +} diff --git a/vendor/google.golang.org/grpc/codes/code_string.go b/vendor/google.golang.org/grpc/codes/code_string.go new file mode 100644 index 000000000..0b206a578 --- /dev/null +++ b/vendor/google.golang.org/grpc/codes/code_string.go @@ -0,0 +1,62 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package codes + +import "strconv" + +func (c Code) String() string { + switch c { + case OK: + return "OK" + case Canceled: + return "Canceled" + case Unknown: + return "Unknown" + case InvalidArgument: + return "InvalidArgument" + case DeadlineExceeded: + return "DeadlineExceeded" + case NotFound: + return "NotFound" + case AlreadyExists: + return "AlreadyExists" + case PermissionDenied: + return "PermissionDenied" + case ResourceExhausted: + return "ResourceExhausted" + case FailedPrecondition: + return "FailedPrecondition" + case Aborted: + return "Aborted" + case OutOfRange: + return "OutOfRange" + case Unimplemented: + return "Unimplemented" + case Internal: + return "Internal" + case Unavailable: + return "Unavailable" + case DataLoss: + return "DataLoss" + case Unauthenticated: + return "Unauthenticated" + default: + return "Code(" + strconv.FormatInt(int64(c), 10) + ")" + } +} diff --git a/vendor/google.golang.org/grpc/codes/codes.go b/vendor/google.golang.org/grpc/codes/codes.go new file mode 100644 index 000000000..d9b9d5782 --- /dev/null +++ b/vendor/google.golang.org/grpc/codes/codes.go @@ -0,0 +1,197 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package codes defines the canonical error codes used by gRPC. It is +// consistent across various languages. +package codes // import "google.golang.org/grpc/codes" + +import ( + "fmt" + "strconv" +) + +// A Code is an unsigned 32-bit error code as defined in the gRPC spec. +type Code uint32 + +const ( + // OK is returned on success. + OK Code = 0 + + // Canceled indicates the operation was canceled (typically by the caller). + Canceled Code = 1 + + // Unknown error. An example of where this error may be returned is + // if a Status value received from another address space belongs to + // an error-space that is not known in this address space. Also + // errors raised by APIs that do not return enough error information + // may be converted to this error. + Unknown Code = 2 + + // InvalidArgument indicates client specified an invalid argument. + // Note that this differs from FailedPrecondition. It indicates arguments + // that are problematic regardless of the state of the system + // (e.g., a malformed file name). + InvalidArgument Code = 3 + + // DeadlineExceeded means operation expired before completion. + // For operations that change the state of the system, this error may be + // returned even if the operation has completed successfully. For + // example, a successful response from a server could have been delayed + // long enough for the deadline to expire. + DeadlineExceeded Code = 4 + + // NotFound means some requested entity (e.g., file or directory) was + // not found. + NotFound Code = 5 + + // AlreadyExists means an attempt to create an entity failed because one + // already exists. + AlreadyExists Code = 6 + + // PermissionDenied indicates the caller does not have permission to + // execute the specified operation. It must not be used for rejections + // caused by exhausting some resource (use ResourceExhausted + // instead for those errors). It must not be + // used if the caller cannot be identified (use Unauthenticated + // instead for those errors). + PermissionDenied Code = 7 + + // ResourceExhausted indicates some resource has been exhausted, perhaps + // a per-user quota, or perhaps the entire file system is out of space. + ResourceExhausted Code = 8 + + // FailedPrecondition indicates operation was rejected because the + // system is not in a state required for the operation's execution. + // For example, directory to be deleted may be non-empty, an rmdir + // operation is applied to a non-directory, etc. + // + // A litmus test that may help a service implementor in deciding + // between FailedPrecondition, Aborted, and Unavailable: + // (a) Use Unavailable if the client can retry just the failing call. + // (b) Use Aborted if the client should retry at a higher-level + // (e.g., restarting a read-modify-write sequence). + // (c) Use FailedPrecondition if the client should not retry until + // the system state has been explicitly fixed. E.g., if an "rmdir" + // fails because the directory is non-empty, FailedPrecondition + // should be returned since the client should not retry unless + // they have first fixed up the directory by deleting files from it. + // (d) Use FailedPrecondition if the client performs conditional + // REST Get/Update/Delete on a resource and the resource on the + // server does not match the condition. E.g., conflicting + // read-modify-write on the same resource. + FailedPrecondition Code = 9 + + // Aborted indicates the operation was aborted, typically due to a + // concurrency issue like sequencer check failures, transaction aborts, + // etc. + // + // See litmus test above for deciding between FailedPrecondition, + // Aborted, and Unavailable. + Aborted Code = 10 + + // OutOfRange means operation was attempted past the valid range. + // E.g., seeking or reading past end of file. + // + // Unlike InvalidArgument, this error indicates a problem that may + // be fixed if the system state changes. For example, a 32-bit file + // system will generate InvalidArgument if asked to read at an + // offset that is not in the range [0,2^32-1], but it will generate + // OutOfRange if asked to read from an offset past the current + // file size. + // + // There is a fair bit of overlap between FailedPrecondition and + // OutOfRange. We recommend using OutOfRange (the more specific + // error) when it applies so that callers who are iterating through + // a space can easily look for an OutOfRange error to detect when + // they are done. + OutOfRange Code = 11 + + // Unimplemented indicates operation is not implemented or not + // supported/enabled in this service. + Unimplemented Code = 12 + + // Internal errors. Means some invariants expected by underlying + // system has been broken. If you see one of these errors, + // something is very broken. + Internal Code = 13 + + // Unavailable indicates the service is currently unavailable. + // This is a most likely a transient condition and may be corrected + // by retrying with a backoff. + // + // See litmus test above for deciding between FailedPrecondition, + // Aborted, and Unavailable. + Unavailable Code = 14 + + // DataLoss indicates unrecoverable data loss or corruption. + DataLoss Code = 15 + + // Unauthenticated indicates the request does not have valid + // authentication credentials for the operation. + Unauthenticated Code = 16 + + _maxCode = 17 +) + +var strToCode = map[string]Code{ + `"OK"`: OK, + `"CANCELLED"`:/* [sic] */ Canceled, + `"UNKNOWN"`: Unknown, + `"INVALID_ARGUMENT"`: InvalidArgument, + `"DEADLINE_EXCEEDED"`: DeadlineExceeded, + `"NOT_FOUND"`: NotFound, + `"ALREADY_EXISTS"`: AlreadyExists, + `"PERMISSION_DENIED"`: PermissionDenied, + `"RESOURCE_EXHAUSTED"`: ResourceExhausted, + `"FAILED_PRECONDITION"`: FailedPrecondition, + `"ABORTED"`: Aborted, + `"OUT_OF_RANGE"`: OutOfRange, + `"UNIMPLEMENTED"`: Unimplemented, + `"INTERNAL"`: Internal, + `"UNAVAILABLE"`: Unavailable, + `"DATA_LOSS"`: DataLoss, + `"UNAUTHENTICATED"`: Unauthenticated, +} + +// UnmarshalJSON unmarshals b into the Code. +func (c *Code) UnmarshalJSON(b []byte) error { + // From json.Unmarshaler: By convention, to approximate the behavior of + // Unmarshal itself, Unmarshalers implement UnmarshalJSON([]byte("null")) as + // a no-op. + if string(b) == "null" { + return nil + } + if c == nil { + return fmt.Errorf("nil receiver passed to UnmarshalJSON") + } + + if ci, err := strconv.ParseUint(string(b), 10, 32); err == nil { + if ci >= _maxCode { + return fmt.Errorf("invalid code: %q", ci) + } + + *c = Code(ci) + return nil + } + + if jc, ok := strToCode[string(b)]; ok { + *c = jc + return nil + } + return fmt.Errorf("invalid code: %q", string(b)) +} diff --git a/vendor/google.golang.org/grpc/connectivity/connectivity.go b/vendor/google.golang.org/grpc/connectivity/connectivity.go new file mode 100644 index 000000000..568ef5dc6 --- /dev/null +++ b/vendor/google.golang.org/grpc/connectivity/connectivity.go @@ -0,0 +1,72 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package connectivity defines connectivity semantics. +// For details, see https://github.com/grpc/grpc/blob/master/doc/connectivity-semantics-and-api.md. +// All APIs in this package are experimental. +package connectivity + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/grpclog" +) + +// State indicates the state of connectivity. +// It can be the state of a ClientConn or SubConn. +type State int + +func (s State) String() string { + switch s { + case Idle: + return "IDLE" + case Connecting: + return "CONNECTING" + case Ready: + return "READY" + case TransientFailure: + return "TRANSIENT_FAILURE" + case Shutdown: + return "SHUTDOWN" + default: + grpclog.Errorf("unknown connectivity state: %d", s) + return "Invalid-State" + } +} + +const ( + // Idle indicates the ClientConn is idle. + Idle State = iota + // Connecting indicates the ClienConn is connecting. + Connecting + // Ready indicates the ClientConn is ready for work. + Ready + // TransientFailure indicates the ClientConn has seen a failure but expects to recover. + TransientFailure + // Shutdown indicates the ClientConn has started shutting down. + Shutdown +) + +// Reporter reports the connectivity states. +type Reporter interface { + // CurrentState returns the current state of the reporter. + CurrentState() State + // WaitForStateChange blocks until the reporter's state is different from the given state, + // and returns true. + // It returns false if <-ctx.Done() can proceed (ctx got timeout or got canceled). + WaitForStateChange(context.Context, State) bool +} diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go new file mode 100644 index 000000000..1dae57ab1 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/credentials.go @@ -0,0 +1,293 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package credentials implements various credentials supported by gRPC library, +// which encapsulate all the state needed by a client to authenticate with a +// server and make various assertions, e.g., about the client's identity, role, +// or whether it is authorized to make a particular call. +package credentials // import "google.golang.org/grpc/credentials" + +import ( + "crypto/tls" + "crypto/x509" + "errors" + "fmt" + "io/ioutil" + "net" + "strings" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" +) + +// alpnProtoStr are the specified application level protocols for gRPC. +var alpnProtoStr = []string{"h2"} + +// PerRPCCredentials defines the common interface for the credentials which need to +// attach security information to every RPC (e.g., oauth2). +type PerRPCCredentials interface { + // GetRequestMetadata gets the current request metadata, refreshing + // tokens if required. This should be called by the transport layer on + // each request, and the data should be populated in headers or other + // context. If a status code is returned, it will be used as the status + // for the RPC. uri is the URI of the entry point for the request. + // When supported by the underlying implementation, ctx can be used for + // timeout and cancellation. + // TODO(zhaoq): Define the set of the qualified keys instead of leaving + // it as an arbitrary string. + GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) + // RequireTransportSecurity indicates whether the credentials requires + // transport security. + RequireTransportSecurity() bool +} + +// ProtocolInfo provides information regarding the gRPC wire protocol version, +// security protocol, security protocol version in use, server name, etc. +type ProtocolInfo struct { + // ProtocolVersion is the gRPC wire protocol version. + ProtocolVersion string + // SecurityProtocol is the security protocol in use. + SecurityProtocol string + // SecurityVersion is the security protocol version. + SecurityVersion string + // ServerName is the user-configured server name. + ServerName string +} + +// AuthInfo defines the common interface for the auth information the users are interested in. +type AuthInfo interface { + AuthType() string +} + +// ErrConnDispatched indicates that rawConn has been dispatched out of gRPC +// and the caller should not close rawConn. +var ErrConnDispatched = errors.New("credentials: rawConn is dispatched out of gRPC") + +// TransportCredentials defines the common interface for all the live gRPC wire +// protocols and supported transport security protocols (e.g., TLS, SSL). +type TransportCredentials interface { + // ClientHandshake does the authentication handshake specified by the corresponding + // authentication protocol on rawConn for clients. It returns the authenticated + // connection and the corresponding auth information about the connection. + // Implementations must use the provided context to implement timely cancellation. + // gRPC will try to reconnect if the error returned is a temporary error + // (io.EOF, context.DeadlineExceeded or err.Temporary() == true). + // If the returned error is a wrapper error, implementations should make sure that + // the error implements Temporary() to have the correct retry behaviors. + // + // If the returned net.Conn is closed, it MUST close the net.Conn provided. + ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error) + // ServerHandshake does the authentication handshake for servers. It returns + // the authenticated connection and the corresponding auth information about + // the connection. + // + // If the returned net.Conn is closed, it MUST close the net.Conn provided. + ServerHandshake(net.Conn) (net.Conn, AuthInfo, error) + // Info provides the ProtocolInfo of this TransportCredentials. + Info() ProtocolInfo + // Clone makes a copy of this TransportCredentials. + Clone() TransportCredentials + // OverrideServerName overrides the server name used to verify the hostname on the returned certificates from the server. + // gRPC internals also use it to override the virtual hosting name if it is set. + // It must be called before dialing. Currently, this is only used by grpclb. + OverrideServerName(string) error +} + +// TLSInfo contains the auth information for a TLS authenticated connection. +// It implements the AuthInfo interface. +type TLSInfo struct { + State tls.ConnectionState +} + +// AuthType returns the type of TLSInfo as a string. +func (t TLSInfo) AuthType() string { + return "tls" +} + +// GetChannelzSecurityValue returns security info requested by channelz. +func (t TLSInfo) GetChannelzSecurityValue() ChannelzSecurityValue { + v := &TLSChannelzSecurityValue{ + StandardName: cipherSuiteLookup[t.State.CipherSuite], + } + // Currently there's no way to get LocalCertificate info from tls package. + if len(t.State.PeerCertificates) > 0 { + v.RemoteCertificate = t.State.PeerCertificates[0].Raw + } + return v +} + +// tlsCreds is the credentials required for authenticating a connection using TLS. +type tlsCreds struct { + // TLS configuration + config *tls.Config +} + +func (c tlsCreds) Info() ProtocolInfo { + return ProtocolInfo{ + SecurityProtocol: "tls", + SecurityVersion: "1.2", + ServerName: c.config.ServerName, + } +} + +func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) { + // use local cfg to avoid clobbering ServerName if using multiple endpoints + cfg := cloneTLSConfig(c.config) + if cfg.ServerName == "" { + colonPos := strings.LastIndex(authority, ":") + if colonPos == -1 { + colonPos = len(authority) + } + cfg.ServerName = authority[:colonPos] + } + conn := tls.Client(rawConn, cfg) + errChannel := make(chan error, 1) + go func() { + errChannel <- conn.Handshake() + }() + select { + case err := <-errChannel: + if err != nil { + return nil, nil, err + } + case <-ctx.Done(): + return nil, nil, ctx.Err() + } + return tlsConn{Conn: conn, rawConn: rawConn}, TLSInfo{conn.ConnectionState()}, nil +} + +func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) { + conn := tls.Server(rawConn, c.config) + if err := conn.Handshake(); err != nil { + return nil, nil, err + } + return tlsConn{Conn: conn, rawConn: rawConn}, TLSInfo{conn.ConnectionState()}, nil +} + +func (c *tlsCreds) Clone() TransportCredentials { + return NewTLS(c.config) +} + +func (c *tlsCreds) OverrideServerName(serverNameOverride string) error { + c.config.ServerName = serverNameOverride + return nil +} + +// NewTLS uses c to construct a TransportCredentials based on TLS. +func NewTLS(c *tls.Config) TransportCredentials { + tc := &tlsCreds{cloneTLSConfig(c)} + tc.config.NextProtos = alpnProtoStr + return tc +} + +// NewClientTLSFromCert constructs TLS credentials from the input certificate for client. +// serverNameOverride is for testing only. If set to a non empty string, +// it will override the virtual host name of authority (e.g. :authority header field) in requests. +func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) TransportCredentials { + return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}) +} + +// NewClientTLSFromFile constructs TLS credentials from the input certificate file for client. +// serverNameOverride is for testing only. If set to a non empty string, +// it will override the virtual host name of authority (e.g. :authority header field) in requests. +func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) { + b, err := ioutil.ReadFile(certFile) + if err != nil { + return nil, err + } + cp := x509.NewCertPool() + if !cp.AppendCertsFromPEM(b) { + return nil, fmt.Errorf("credentials: failed to append certificates") + } + return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}), nil +} + +// NewServerTLSFromCert constructs TLS credentials from the input certificate for server. +func NewServerTLSFromCert(cert *tls.Certificate) TransportCredentials { + return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}}) +} + +// NewServerTLSFromFile constructs TLS credentials from the input certificate file and key +// file for server. +func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error) { + cert, err := tls.LoadX509KeyPair(certFile, keyFile) + if err != nil { + return nil, err + } + return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil +} + +// ChannelzSecurityInfo defines the interface that security protocols should implement +// in order to provide security info to channelz. +type ChannelzSecurityInfo interface { + GetSecurityValue() ChannelzSecurityValue +} + +// ChannelzSecurityValue defines the interface that GetSecurityValue() return value +// should satisfy. This interface should only be satisfied by *TLSChannelzSecurityValue +// and *OtherChannelzSecurityValue. +type ChannelzSecurityValue interface { + isChannelzSecurityValue() +} + +// TLSChannelzSecurityValue defines the struct that TLS protocol should return +// from GetSecurityValue(), containing security info like cipher and certificate used. +type TLSChannelzSecurityValue struct { + StandardName string + LocalCertificate []byte + RemoteCertificate []byte +} + +func (*TLSChannelzSecurityValue) isChannelzSecurityValue() {} + +// OtherChannelzSecurityValue defines the struct that non-TLS protocol should return +// from GetSecurityValue(), which contains protocol specific security info. Note +// the Value field will be sent to users of channelz requesting channel info, and +// thus sensitive info should better be avoided. +type OtherChannelzSecurityValue struct { + Name string + Value proto.Message +} + +func (*OtherChannelzSecurityValue) isChannelzSecurityValue() {} + +type tlsConn struct { + *tls.Conn + rawConn net.Conn +} + +var cipherSuiteLookup = map[uint16]string{ + tls.TLS_RSA_WITH_RC4_128_SHA: "TLS_RSA_WITH_RC4_128_SHA", + tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_RSA_WITH_3DES_EDE_CBC_SHA", + tls.TLS_RSA_WITH_AES_128_CBC_SHA: "TLS_RSA_WITH_AES_128_CBC_SHA", + tls.TLS_RSA_WITH_AES_256_CBC_SHA: "TLS_RSA_WITH_AES_256_CBC_SHA", + tls.TLS_RSA_WITH_AES_128_GCM_SHA256: "TLS_RSA_WITH_AES_128_GCM_SHA256", + tls.TLS_RSA_WITH_AES_256_GCM_SHA384: "TLS_RSA_WITH_AES_256_GCM_SHA384", + tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA: "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA", + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", + tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA: "TLS_ECDHE_RSA_WITH_RC4_128_SHA", + tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", + tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", + tls.TLS_FALLBACK_SCSV: "TLS_FALLBACK_SCSV", +} diff --git a/vendor/google.golang.org/grpc/credentials/go16.go b/vendor/google.golang.org/grpc/credentials/go16.go new file mode 100644 index 000000000..d6bbcc9fd --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/go16.go @@ -0,0 +1,57 @@ +// +build !go1.7 + +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package credentials + +import ( + "crypto/tls" +) + +// cloneTLSConfig returns a shallow clone of the exported +// fields of cfg, ignoring the unexported sync.Once, which +// contains a mutex and must not be copied. +// +// If cfg is nil, a new zero tls.Config is returned. +func cloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{} + } + return &tls.Config{ + Rand: cfg.Rand, + Time: cfg.Time, + Certificates: cfg.Certificates, + NameToCertificate: cfg.NameToCertificate, + GetCertificate: cfg.GetCertificate, + RootCAs: cfg.RootCAs, + NextProtos: cfg.NextProtos, + ServerName: cfg.ServerName, + ClientAuth: cfg.ClientAuth, + ClientCAs: cfg.ClientCAs, + InsecureSkipVerify: cfg.InsecureSkipVerify, + CipherSuites: cfg.CipherSuites, + PreferServerCipherSuites: cfg.PreferServerCipherSuites, + SessionTicketsDisabled: cfg.SessionTicketsDisabled, + SessionTicketKey: cfg.SessionTicketKey, + ClientSessionCache: cfg.ClientSessionCache, + MinVersion: cfg.MinVersion, + MaxVersion: cfg.MaxVersion, + CurvePreferences: cfg.CurvePreferences, + } +} diff --git a/vendor/google.golang.org/grpc/credentials/go17.go b/vendor/google.golang.org/grpc/credentials/go17.go new file mode 100644 index 000000000..fbd500002 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/go17.go @@ -0,0 +1,59 @@ +// +build go1.7,!go1.8 + +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package credentials + +import ( + "crypto/tls" +) + +// cloneTLSConfig returns a shallow clone of the exported +// fields of cfg, ignoring the unexported sync.Once, which +// contains a mutex and must not be copied. +// +// If cfg is nil, a new zero tls.Config is returned. +func cloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{} + } + return &tls.Config{ + Rand: cfg.Rand, + Time: cfg.Time, + Certificates: cfg.Certificates, + NameToCertificate: cfg.NameToCertificate, + GetCertificate: cfg.GetCertificate, + RootCAs: cfg.RootCAs, + NextProtos: cfg.NextProtos, + ServerName: cfg.ServerName, + ClientAuth: cfg.ClientAuth, + ClientCAs: cfg.ClientCAs, + InsecureSkipVerify: cfg.InsecureSkipVerify, + CipherSuites: cfg.CipherSuites, + PreferServerCipherSuites: cfg.PreferServerCipherSuites, + SessionTicketsDisabled: cfg.SessionTicketsDisabled, + SessionTicketKey: cfg.SessionTicketKey, + ClientSessionCache: cfg.ClientSessionCache, + MinVersion: cfg.MinVersion, + MaxVersion: cfg.MaxVersion, + CurvePreferences: cfg.CurvePreferences, + DynamicRecordSizingDisabled: cfg.DynamicRecordSizingDisabled, + Renegotiation: cfg.Renegotiation, + } +} diff --git a/vendor/google.golang.org/grpc/credentials/go18.go b/vendor/google.golang.org/grpc/credentials/go18.go new file mode 100644 index 000000000..db30d46cc --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/go18.go @@ -0,0 +1,46 @@ +// +build go1.8 + +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package credentials + +import ( + "crypto/tls" +) + +func init() { + cipherSuiteLookup[tls.TLS_RSA_WITH_AES_128_CBC_SHA256] = "TLS_RSA_WITH_AES_128_CBC_SHA256" + cipherSuiteLookup[tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256] = "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256" + cipherSuiteLookup[tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256] = "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256" + cipherSuiteLookup[tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305] = "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305" + cipherSuiteLookup[tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305] = "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305" +} + +// cloneTLSConfig returns a shallow clone of the exported +// fields of cfg, ignoring the unexported sync.Once, which +// contains a mutex and must not be copied. +// +// If cfg is nil, a new zero tls.Config is returned. +func cloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{} + } + + return cfg.Clone() +} diff --git a/vendor/google.golang.org/grpc/credentials/go19.go b/vendor/google.golang.org/grpc/credentials/go19.go new file mode 100644 index 000000000..2a4ca1a57 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/go19.go @@ -0,0 +1,35 @@ +// +build go1.9,!appengine + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package credentials + +import ( + "errors" + "syscall" +) + +// implements the syscall.Conn interface +func (c tlsConn) SyscallConn() (syscall.RawConn, error) { + conn, ok := c.rawConn.(syscall.Conn) + if !ok { + return nil, errors.New("RawConn does not implement syscall.Conn") + } + return conn.SyscallConn() +} diff --git a/vendor/google.golang.org/grpc/credentials/oauth/oauth.go b/vendor/google.golang.org/grpc/credentials/oauth/oauth.go new file mode 100644 index 000000000..f6d597a14 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/oauth/oauth.go @@ -0,0 +1,173 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package oauth implements gRPC credentials using OAuth. +package oauth + +import ( + "fmt" + "io/ioutil" + "sync" + + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + "golang.org/x/oauth2/jwt" + "google.golang.org/grpc/credentials" +) + +// TokenSource supplies PerRPCCredentials from an oauth2.TokenSource. +type TokenSource struct { + oauth2.TokenSource +} + +// GetRequestMetadata gets the request metadata as a map from a TokenSource. +func (ts TokenSource) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { + token, err := ts.Token() + if err != nil { + return nil, err + } + return map[string]string{ + "authorization": token.Type() + " " + token.AccessToken, + }, nil +} + +// RequireTransportSecurity indicates whether the credentials requires transport security. +func (ts TokenSource) RequireTransportSecurity() bool { + return true +} + +type jwtAccess struct { + jsonKey []byte +} + +// NewJWTAccessFromFile creates PerRPCCredentials from the given keyFile. +func NewJWTAccessFromFile(keyFile string) (credentials.PerRPCCredentials, error) { + jsonKey, err := ioutil.ReadFile(keyFile) + if err != nil { + return nil, fmt.Errorf("credentials: failed to read the service account key file: %v", err) + } + return NewJWTAccessFromKey(jsonKey) +} + +// NewJWTAccessFromKey creates PerRPCCredentials from the given jsonKey. +func NewJWTAccessFromKey(jsonKey []byte) (credentials.PerRPCCredentials, error) { + return jwtAccess{jsonKey}, nil +} + +func (j jwtAccess) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { + ts, err := google.JWTAccessTokenSourceFromJSON(j.jsonKey, uri[0]) + if err != nil { + return nil, err + } + token, err := ts.Token() + if err != nil { + return nil, err + } + return map[string]string{ + "authorization": token.Type() + " " + token.AccessToken, + }, nil +} + +func (j jwtAccess) RequireTransportSecurity() bool { + return true +} + +// oauthAccess supplies PerRPCCredentials from a given token. +type oauthAccess struct { + token oauth2.Token +} + +// NewOauthAccess constructs the PerRPCCredentials using a given token. +func NewOauthAccess(token *oauth2.Token) credentials.PerRPCCredentials { + return oauthAccess{token: *token} +} + +func (oa oauthAccess) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { + return map[string]string{ + "authorization": oa.token.Type() + " " + oa.token.AccessToken, + }, nil +} + +func (oa oauthAccess) RequireTransportSecurity() bool { + return true +} + +// NewComputeEngine constructs the PerRPCCredentials that fetches access tokens from +// Google Compute Engine (GCE)'s metadata server. It is only valid to use this +// if your program is running on a GCE instance. +// TODO(dsymonds): Deprecate and remove this. +func NewComputeEngine() credentials.PerRPCCredentials { + return TokenSource{google.ComputeTokenSource("")} +} + +// serviceAccount represents PerRPCCredentials via JWT signing key. +type serviceAccount struct { + mu sync.Mutex + config *jwt.Config + t *oauth2.Token +} + +func (s *serviceAccount) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { + s.mu.Lock() + defer s.mu.Unlock() + if !s.t.Valid() { + var err error + s.t, err = s.config.TokenSource(ctx).Token() + if err != nil { + return nil, err + } + } + return map[string]string{ + "authorization": s.t.Type() + " " + s.t.AccessToken, + }, nil +} + +func (s *serviceAccount) RequireTransportSecurity() bool { + return true +} + +// NewServiceAccountFromKey constructs the PerRPCCredentials using the JSON key slice +// from a Google Developers service account. +func NewServiceAccountFromKey(jsonKey []byte, scope ...string) (credentials.PerRPCCredentials, error) { + config, err := google.JWTConfigFromJSON(jsonKey, scope...) + if err != nil { + return nil, err + } + return &serviceAccount{config: config}, nil +} + +// NewServiceAccountFromFile constructs the PerRPCCredentials using the JSON key file +// of a Google Developers service account. +func NewServiceAccountFromFile(keyFile string, scope ...string) (credentials.PerRPCCredentials, error) { + jsonKey, err := ioutil.ReadFile(keyFile) + if err != nil { + return nil, fmt.Errorf("credentials: failed to read the service account key file: %v", err) + } + return NewServiceAccountFromKey(jsonKey, scope...) +} + +// NewApplicationDefault returns "Application Default Credentials". For more +// detail, see https://developers.google.com/accounts/docs/application-default-credentials. +func NewApplicationDefault(ctx context.Context, scope ...string) (credentials.PerRPCCredentials, error) { + t, err := google.DefaultTokenSource(ctx, scope...) + if err != nil { + return nil, err + } + return TokenSource{t}, nil +} diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go new file mode 100644 index 000000000..3d3c9e231 --- /dev/null +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -0,0 +1,453 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "fmt" + "net" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/stats" +) + +// dialOptions configure a Dial call. dialOptions are set by the DialOption +// values passed to Dial. +type dialOptions struct { + unaryInt UnaryClientInterceptor + streamInt StreamClientInterceptor + cp Compressor + dc Decompressor + bs backoff.Strategy + block bool + insecure bool + timeout time.Duration + scChan <-chan ServiceConfig + authority string + copts transport.ConnectOptions + callOptions []CallOption + // This is used by v1 balancer dial option WithBalancer to support v1 + // balancer, and also by WithBalancerName dial option. + balancerBuilder balancer.Builder + // This is to support grpclb. + resolverBuilder resolver.Builder + waitForHandshake bool + channelzParentID int64 + disableServiceConfig bool + disableRetry bool +} + +// DialOption configures how we set up the connection. +type DialOption interface { + apply(*dialOptions) +} + +// EmptyDialOption does not alter the dial configuration. It can be embedded in +// another structure to build custom dial options. +// +// This API is EXPERIMENTAL. +type EmptyDialOption struct{} + +func (EmptyDialOption) apply(*dialOptions) {} + +// funcDialOption wraps a function that modifies dialOptions into an +// implementation of the DialOption interface. +type funcDialOption struct { + f func(*dialOptions) +} + +func (fdo *funcDialOption) apply(do *dialOptions) { + fdo.f(do) +} + +func newFuncDialOption(f func(*dialOptions)) *funcDialOption { + return &funcDialOption{ + f: f, + } +} + +// WithWaitForHandshake blocks until the initial settings frame is received from +// the server before assigning RPCs to the connection. Experimental API. +func WithWaitForHandshake() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.waitForHandshake = true + }) +} + +// WithWriteBufferSize determines how much data can be batched before doing a +// write on the wire. The corresponding memory allocation for this buffer will +// be twice the size to keep syscalls low. The default value for this buffer is +// 32KB. +// +// Zero will disable the write buffer such that each write will be on underlying +// connection. Note: A Send call may not directly translate to a write. +func WithWriteBufferSize(s int) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.WriteBufferSize = s + }) +} + +// WithReadBufferSize lets you set the size of read buffer, this determines how +// much data can be read at most for each read syscall. +// +// The default value for this buffer is 32KB. Zero will disable read buffer for +// a connection so data framer can access the underlying conn directly. +func WithReadBufferSize(s int) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.ReadBufferSize = s + }) +} + +// WithInitialWindowSize returns a DialOption which sets the value for initial +// window size on a stream. The lower bound for window size is 64K and any value +// smaller than that will be ignored. +func WithInitialWindowSize(s int32) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.InitialWindowSize = s + }) +} + +// WithInitialConnWindowSize returns a DialOption which sets the value for +// initial window size on a connection. The lower bound for window size is 64K +// and any value smaller than that will be ignored. +func WithInitialConnWindowSize(s int32) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.InitialConnWindowSize = s + }) +} + +// WithMaxMsgSize returns a DialOption which sets the maximum message size the +// client can receive. +// +// Deprecated: use WithDefaultCallOptions(MaxCallRecvMsgSize(s)) instead. +func WithMaxMsgSize(s int) DialOption { + return WithDefaultCallOptions(MaxCallRecvMsgSize(s)) +} + +// WithDefaultCallOptions returns a DialOption which sets the default +// CallOptions for calls over the connection. +func WithDefaultCallOptions(cos ...CallOption) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.callOptions = append(o.callOptions, cos...) + }) +} + +// WithCodec returns a DialOption which sets a codec for message marshaling and +// unmarshaling. +// +// Deprecated: use WithDefaultCallOptions(CallCustomCodec(c)) instead. +func WithCodec(c Codec) DialOption { + return WithDefaultCallOptions(CallCustomCodec(c)) +} + +// WithCompressor returns a DialOption which sets a Compressor to use for +// message compression. It has lower priority than the compressor set by the +// UseCompressor CallOption. +// +// Deprecated: use UseCompressor instead. +func WithCompressor(cp Compressor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.cp = cp + }) +} + +// WithDecompressor returns a DialOption which sets a Decompressor to use for +// incoming message decompression. If incoming response messages are encoded +// using the decompressor's Type(), it will be used. Otherwise, the message +// encoding will be used to look up the compressor registered via +// encoding.RegisterCompressor, which will then be used to decompress the +// message. If no compressor is registered for the encoding, an Unimplemented +// status error will be returned. +// +// Deprecated: use encoding.RegisterCompressor instead. +func WithDecompressor(dc Decompressor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.dc = dc + }) +} + +// WithBalancer returns a DialOption which sets a load balancer with the v1 API. +// Name resolver will be ignored if this DialOption is specified. +// +// Deprecated: use the new balancer APIs in balancer package and +// WithBalancerName. +func WithBalancer(b Balancer) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.balancerBuilder = &balancerWrapperBuilder{ + b: b, + } + }) +} + +// WithBalancerName sets the balancer that the ClientConn will be initialized +// with. Balancer registered with balancerName will be used. This function +// panics if no balancer was registered by balancerName. +// +// The balancer cannot be overridden by balancer option specified by service +// config. +// +// This is an EXPERIMENTAL API. +func WithBalancerName(balancerName string) DialOption { + builder := balancer.Get(balancerName) + if builder == nil { + panic(fmt.Sprintf("grpc.WithBalancerName: no balancer is registered for name %v", balancerName)) + } + return newFuncDialOption(func(o *dialOptions) { + o.balancerBuilder = builder + }) +} + +// withResolverBuilder is only for grpclb. +func withResolverBuilder(b resolver.Builder) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.resolverBuilder = b + }) +} + +// WithServiceConfig returns a DialOption which has a channel to read the +// service configuration. +// +// Deprecated: service config should be received through name resolver, as +// specified here. +// https://github.com/grpc/grpc/blob/master/doc/service_config.md +func WithServiceConfig(c <-chan ServiceConfig) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.scChan = c + }) +} + +// WithBackoffMaxDelay configures the dialer to use the provided maximum delay +// when backing off after failed connection attempts. +func WithBackoffMaxDelay(md time.Duration) DialOption { + return WithBackoffConfig(BackoffConfig{MaxDelay: md}) +} + +// WithBackoffConfig configures the dialer to use the provided backoff +// parameters after connection failures. +// +// Use WithBackoffMaxDelay until more parameters on BackoffConfig are opened up +// for use. +func WithBackoffConfig(b BackoffConfig) DialOption { + return withBackoff(backoff.Exponential{ + MaxDelay: b.MaxDelay, + }) +} + +// withBackoff sets the backoff strategy used for connectRetryNum after a failed +// connection attempt. +// +// This can be exported if arbitrary backoff strategies are allowed by gRPC. +func withBackoff(bs backoff.Strategy) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.bs = bs + }) +} + +// WithBlock returns a DialOption which makes caller of Dial blocks until the +// underlying connection is up. Without this, Dial returns immediately and +// connecting the server happens in background. +func WithBlock() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.block = true + }) +} + +// WithInsecure returns a DialOption which disables transport security for this +// ClientConn. Note that transport security is required unless WithInsecure is +// set. +func WithInsecure() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.insecure = true + }) +} + +// WithTransportCredentials returns a DialOption which configures a connection +// level security credentials (e.g., TLS/SSL). +func WithTransportCredentials(creds credentials.TransportCredentials) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.TransportCredentials = creds + }) +} + +// WithPerRPCCredentials returns a DialOption which sets credentials and places +// auth state on each outbound RPC. +func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.PerRPCCredentials = append(o.copts.PerRPCCredentials, creds) + }) +} + +// WithTimeout returns a DialOption that configures a timeout for dialing a +// ClientConn initially. This is valid if and only if WithBlock() is present. +// +// Deprecated: use DialContext and context.WithTimeout instead. +func WithTimeout(d time.Duration) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.timeout = d + }) +} + +func withContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.Dialer = f + }) +} + +func init() { + internal.WithContextDialer = withContextDialer + internal.WithResolverBuilder = withResolverBuilder +} + +// WithDialer returns a DialOption that specifies a function to use for dialing +// network addresses. If FailOnNonTempDialError() is set to true, and an error +// is returned by f, gRPC checks the error's Temporary() method to decide if it +// should try to reconnect to the network address. +func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption { + return withContextDialer( + func(ctx context.Context, addr string) (net.Conn, error) { + if deadline, ok := ctx.Deadline(); ok { + return f(addr, deadline.Sub(time.Now())) + } + return f(addr, 0) + }) +} + +// WithStatsHandler returns a DialOption that specifies the stats handler for +// all the RPCs and underlying network connections in this ClientConn. +func WithStatsHandler(h stats.Handler) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.StatsHandler = h + }) +} + +// FailOnNonTempDialError returns a DialOption that specifies if gRPC fails on +// non-temporary dial errors. If f is true, and dialer returns a non-temporary +// error, gRPC will fail the connection to the network address and won't try to +// reconnect. The default value of FailOnNonTempDialError is false. +// +// FailOnNonTempDialError only affects the initial dial, and does not do +// anything useful unless you are also using WithBlock(). +// +// This is an EXPERIMENTAL API. +func FailOnNonTempDialError(f bool) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.FailOnNonTempDialError = f + }) +} + +// WithUserAgent returns a DialOption that specifies a user agent string for all +// the RPCs. +func WithUserAgent(s string) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.UserAgent = s + }) +} + +// WithKeepaliveParams returns a DialOption that specifies keepalive parameters +// for the client transport. +func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.KeepaliveParams = kp + }) +} + +// WithUnaryInterceptor returns a DialOption that specifies the interceptor for +// unary RPCs. +func WithUnaryInterceptor(f UnaryClientInterceptor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.unaryInt = f + }) +} + +// WithStreamInterceptor returns a DialOption that specifies the interceptor for +// streaming RPCs. +func WithStreamInterceptor(f StreamClientInterceptor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.streamInt = f + }) +} + +// WithAuthority returns a DialOption that specifies the value to be used as the +// :authority pseudo-header. This value only works with WithInsecure and has no +// effect if TransportCredentials are present. +func WithAuthority(a string) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.authority = a + }) +} + +// WithChannelzParentID returns a DialOption that specifies the channelz ID of +// current ClientConn's parent. This function is used in nested channel creation +// (e.g. grpclb dial). +func WithChannelzParentID(id int64) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.channelzParentID = id + }) +} + +// WithDisableServiceConfig returns a DialOption that causes grpc to ignore any +// service config provided by the resolver and provides a hint to the resolver +// to not fetch service configs. +func WithDisableServiceConfig() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.disableServiceConfig = true + }) +} + +// WithDisableRetry returns a DialOption that disables retries, even if the +// service config enables them. This does not impact transparent retries, which +// will happen automatically if no data is written to the wire or if the RPC is +// unprocessed by the remote server. +// +// Retry support is currently disabled by default, but will be enabled by +// default in the future. Until then, it may be enabled by setting the +// environment variable "GRPC_GO_RETRY" to "on". +// +// This API is EXPERIMENTAL. +func WithDisableRetry() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.disableRetry = true + }) +} + +// WithMaxHeaderListSize returns a DialOption that specifies the maximum +// (uncompressed) size of header list that the client is prepared to accept. +func WithMaxHeaderListSize(s uint32) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.MaxHeaderListSize = &s + }) +} + +func defaultDialOptions() dialOptions { + return dialOptions{ + disableRetry: !envconfig.Retry, + copts: transport.ConnectOptions{ + WriteBufferSize: defaultWriteBufSize, + ReadBufferSize: defaultReadBufSize, + }, + } +} diff --git a/vendor/google.golang.org/grpc/doc.go b/vendor/google.golang.org/grpc/doc.go new file mode 100644 index 000000000..187adbb11 --- /dev/null +++ b/vendor/google.golang.org/grpc/doc.go @@ -0,0 +1,24 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +/* +Package grpc implements an RPC system called gRPC. + +See grpc.io for more information about gRPC. +*/ +package grpc // import "google.golang.org/grpc" diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go new file mode 100644 index 000000000..ade8b7cec --- /dev/null +++ b/vendor/google.golang.org/grpc/encoding/encoding.go @@ -0,0 +1,118 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package encoding defines the interface for the compressor and codec, and +// functions to register and retrieve compressors and codecs. +// +// This package is EXPERIMENTAL. +package encoding + +import ( + "io" + "strings" +) + +// Identity specifies the optional encoding for uncompressed streams. +// It is intended for grpc internal use only. +const Identity = "identity" + +// Compressor is used for compressing and decompressing when sending or +// receiving messages. +type Compressor interface { + // Compress writes the data written to wc to w after compressing it. If an + // error occurs while initializing the compressor, that error is returned + // instead. + Compress(w io.Writer) (io.WriteCloser, error) + // Decompress reads data from r, decompresses it, and provides the + // uncompressed data via the returned io.Reader. If an error occurs while + // initializing the decompressor, that error is returned instead. + Decompress(r io.Reader) (io.Reader, error) + // Name is the name of the compression codec and is used to set the content + // coding header. The result must be static; the result cannot change + // between calls. + Name() string +} + +var registeredCompressor = make(map[string]Compressor) + +// RegisterCompressor registers the compressor with gRPC by its name. It can +// be activated when sending an RPC via grpc.UseCompressor(). It will be +// automatically accessed when receiving a message based on the content coding +// header. Servers also use it to send a response with the same encoding as +// the request. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Compressors are +// registered with the same name, the one registered last will take effect. +func RegisterCompressor(c Compressor) { + registeredCompressor[c.Name()] = c +} + +// GetCompressor returns Compressor for the given compressor name. +func GetCompressor(name string) Compressor { + return registeredCompressor[name] +} + +// Codec defines the interface gRPC uses to encode and decode messages. Note +// that implementations of this interface must be thread safe; a Codec's +// methods can be called from concurrent goroutines. +type Codec interface { + // Marshal returns the wire format of v. + Marshal(v interface{}) ([]byte, error) + // Unmarshal parses the wire format into v. + Unmarshal(data []byte, v interface{}) error + // Name returns the name of the Codec implementation. The returned string + // will be used as part of content type in transmission. The result must be + // static; the result cannot change between calls. + Name() string +} + +var registeredCodecs = make(map[string]Codec) + +// RegisterCodec registers the provided Codec for use with all gRPC clients and +// servers. +// +// The Codec will be stored and looked up by result of its Name() method, which +// should match the content-subtype of the encoding handled by the Codec. This +// is case-insensitive, and is stored and looked up as lowercase. If the +// result of calling Name() is an empty string, RegisterCodec will panic. See +// Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Compressors are +// registered with the same name, the one registered last will take effect. +func RegisterCodec(codec Codec) { + if codec == nil { + panic("cannot register a nil Codec") + } + contentSubtype := strings.ToLower(codec.Name()) + if contentSubtype == "" { + panic("cannot register Codec with empty string result for String()") + } + registeredCodecs[contentSubtype] = codec +} + +// GetCodec gets a registered Codec by content-subtype, or nil if no Codec is +// registered for the content-subtype. +// +// The content-subtype is expected to be lowercase. +func GetCodec(contentSubtype string) Codec { + return registeredCodecs[contentSubtype] +} diff --git a/vendor/google.golang.org/grpc/encoding/proto/proto.go b/vendor/google.golang.org/grpc/encoding/proto/proto.go new file mode 100644 index 000000000..66b97a6f6 --- /dev/null +++ b/vendor/google.golang.org/grpc/encoding/proto/proto.go @@ -0,0 +1,110 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package proto defines the protobuf codec. Importing this package will +// register the codec. +package proto + +import ( + "math" + "sync" + + "github.com/golang/protobuf/proto" + "google.golang.org/grpc/encoding" +) + +// Name is the name registered for the proto compressor. +const Name = "proto" + +func init() { + encoding.RegisterCodec(codec{}) +} + +// codec is a Codec implementation with protobuf. It is the default codec for gRPC. +type codec struct{} + +type cachedProtoBuffer struct { + lastMarshaledSize uint32 + proto.Buffer +} + +func capToMaxInt32(val int) uint32 { + if val > math.MaxInt32 { + return uint32(math.MaxInt32) + } + return uint32(val) +} + +func marshal(v interface{}, cb *cachedProtoBuffer) ([]byte, error) { + protoMsg := v.(proto.Message) + newSlice := make([]byte, 0, cb.lastMarshaledSize) + + cb.SetBuf(newSlice) + cb.Reset() + if err := cb.Marshal(protoMsg); err != nil { + return nil, err + } + out := cb.Bytes() + cb.lastMarshaledSize = capToMaxInt32(len(out)) + return out, nil +} + +func (codec) Marshal(v interface{}) ([]byte, error) { + if pm, ok := v.(proto.Marshaler); ok { + // object can marshal itself, no need for buffer + return pm.Marshal() + } + + cb := protoBufferPool.Get().(*cachedProtoBuffer) + out, err := marshal(v, cb) + + // put back buffer and lose the ref to the slice + cb.SetBuf(nil) + protoBufferPool.Put(cb) + return out, err +} + +func (codec) Unmarshal(data []byte, v interface{}) error { + protoMsg := v.(proto.Message) + protoMsg.Reset() + + if pu, ok := protoMsg.(proto.Unmarshaler); ok { + // object can unmarshal itself, no need for buffer + return pu.Unmarshal(data) + } + + cb := protoBufferPool.Get().(*cachedProtoBuffer) + cb.SetBuf(data) + err := cb.Unmarshal(protoMsg) + cb.SetBuf(nil) + protoBufferPool.Put(cb) + return err +} + +func (codec) Name() string { + return Name +} + +var protoBufferPool = &sync.Pool{ + New: func() interface{} { + return &cachedProtoBuffer{ + Buffer: proto.Buffer{}, + lastMarshaledSize: 16, + } + }, +} diff --git a/vendor/google.golang.org/grpc/go16.go b/vendor/google.golang.org/grpc/go16.go new file mode 100644 index 000000000..b1db21af6 --- /dev/null +++ b/vendor/google.golang.org/grpc/go16.go @@ -0,0 +1,71 @@ +// +build go1.6,!go1.7 + +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "fmt" + "io" + "net" + "net/http" + + "golang.org/x/net/context" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/status" +) + +// dialContext connects to the address on the named network. +func dialContext(ctx context.Context, network, address string) (net.Conn, error) { + return (&net.Dialer{Cancel: ctx.Done()}).Dial(network, address) +} + +func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error { + req.Cancel = ctx.Done() + if err := req.Write(conn); err != nil { + return fmt.Errorf("failed to write the HTTP request: %v", err) + } + return nil +} + +// toRPCErr converts an error into an error from the status package. +func toRPCErr(err error) error { + if err == nil || err == io.EOF { + return err + } + if err == io.ErrUnexpectedEOF { + return status.Error(codes.Internal, err.Error()) + } + if _, ok := status.FromError(err); ok { + return err + } + switch e := err.(type) { + case transport.ConnectionError: + return status.Error(codes.Unavailable, e.Desc) + default: + switch err { + case context.DeadlineExceeded: + return status.Error(codes.DeadlineExceeded, err.Error()) + case context.Canceled: + return status.Error(codes.Canceled, err.Error()) + } + } + return status.Error(codes.Unknown, err.Error()) +} diff --git a/vendor/google.golang.org/grpc/go17.go b/vendor/google.golang.org/grpc/go17.go new file mode 100644 index 000000000..71a72e8fe --- /dev/null +++ b/vendor/google.golang.org/grpc/go17.go @@ -0,0 +1,72 @@ +// +build go1.7 + +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "fmt" + "io" + "net" + "net/http" + + netctx "golang.org/x/net/context" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/status" +) + +// dialContext connects to the address on the named network. +func dialContext(ctx context.Context, network, address string) (net.Conn, error) { + return (&net.Dialer{}).DialContext(ctx, network, address) +} + +func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error { + req = req.WithContext(ctx) + if err := req.Write(conn); err != nil { + return fmt.Errorf("failed to write the HTTP request: %v", err) + } + return nil +} + +// toRPCErr converts an error into an error from the status package. +func toRPCErr(err error) error { + if err == nil || err == io.EOF { + return err + } + if err == io.ErrUnexpectedEOF { + return status.Error(codes.Internal, err.Error()) + } + if _, ok := status.FromError(err); ok { + return err + } + switch e := err.(type) { + case transport.ConnectionError: + return status.Error(codes.Unavailable, e.Desc) + default: + switch err { + case context.DeadlineExceeded, netctx.DeadlineExceeded: + return status.Error(codes.DeadlineExceeded, err.Error()) + case context.Canceled, netctx.Canceled: + return status.Error(codes.Canceled, err.Error()) + } + } + return status.Error(codes.Unknown, err.Error()) +} diff --git a/vendor/google.golang.org/grpc/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/grpclog.go new file mode 100644 index 000000000..1fabb11e1 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/grpclog.go @@ -0,0 +1,126 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpclog defines logging for grpc. +// +// All logs in transport package only go to verbose level 2. +// All logs in other packages in grpc are logged in spite of the verbosity level. +// +// In the default logger, +// severity level can be set by environment variable GRPC_GO_LOG_SEVERITY_LEVEL, +// verbosity level can be set by GRPC_GO_LOG_VERBOSITY_LEVEL. +package grpclog // import "google.golang.org/grpc/grpclog" + +import "os" + +var logger = newLoggerV2() + +// V reports whether verbosity level l is at least the requested verbose level. +func V(l int) bool { + return logger.V(l) +} + +// Info logs to the INFO log. +func Info(args ...interface{}) { + logger.Info(args...) +} + +// Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf. +func Infof(format string, args ...interface{}) { + logger.Infof(format, args...) +} + +// Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println. +func Infoln(args ...interface{}) { + logger.Infoln(args...) +} + +// Warning logs to the WARNING log. +func Warning(args ...interface{}) { + logger.Warning(args...) +} + +// Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf. +func Warningf(format string, args ...interface{}) { + logger.Warningf(format, args...) +} + +// Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println. +func Warningln(args ...interface{}) { + logger.Warningln(args...) +} + +// Error logs to the ERROR log. +func Error(args ...interface{}) { + logger.Error(args...) +} + +// Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf. +func Errorf(format string, args ...interface{}) { + logger.Errorf(format, args...) +} + +// Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println. +func Errorln(args ...interface{}) { + logger.Errorln(args...) +} + +// Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print. +// It calls os.Exit() with exit code 1. +func Fatal(args ...interface{}) { + logger.Fatal(args...) + // Make sure fatal logs will exit. + os.Exit(1) +} + +// Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf. +// It calles os.Exit() with exit code 1. +func Fatalf(format string, args ...interface{}) { + logger.Fatalf(format, args...) + // Make sure fatal logs will exit. + os.Exit(1) +} + +// Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println. +// It calle os.Exit()) with exit code 1. +func Fatalln(args ...interface{}) { + logger.Fatalln(args...) + // Make sure fatal logs will exit. + os.Exit(1) +} + +// Print prints to the logger. Arguments are handled in the manner of fmt.Print. +// +// Deprecated: use Info. +func Print(args ...interface{}) { + logger.Info(args...) +} + +// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf. +// +// Deprecated: use Infof. +func Printf(format string, args ...interface{}) { + logger.Infof(format, args...) +} + +// Println prints to the logger. Arguments are handled in the manner of fmt.Println. +// +// Deprecated: use Infoln. +func Println(args ...interface{}) { + logger.Infoln(args...) +} diff --git a/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/google.golang.org/grpc/grpclog/logger.go new file mode 100644 index 000000000..097494f71 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/logger.go @@ -0,0 +1,85 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclog + +// Logger mimics golang's standard Logger as an interface. +// +// Deprecated: use LoggerV2. +type Logger interface { + Fatal(args ...interface{}) + Fatalf(format string, args ...interface{}) + Fatalln(args ...interface{}) + Print(args ...interface{}) + Printf(format string, args ...interface{}) + Println(args ...interface{}) +} + +// SetLogger sets the logger that is used in grpc. Call only from +// init() functions. +// +// Deprecated: use SetLoggerV2. +func SetLogger(l Logger) { + logger = &loggerWrapper{Logger: l} +} + +// loggerWrapper wraps Logger into a LoggerV2. +type loggerWrapper struct { + Logger +} + +func (g *loggerWrapper) Info(args ...interface{}) { + g.Logger.Print(args...) +} + +func (g *loggerWrapper) Infoln(args ...interface{}) { + g.Logger.Println(args...) +} + +func (g *loggerWrapper) Infof(format string, args ...interface{}) { + g.Logger.Printf(format, args...) +} + +func (g *loggerWrapper) Warning(args ...interface{}) { + g.Logger.Print(args...) +} + +func (g *loggerWrapper) Warningln(args ...interface{}) { + g.Logger.Println(args...) +} + +func (g *loggerWrapper) Warningf(format string, args ...interface{}) { + g.Logger.Printf(format, args...) +} + +func (g *loggerWrapper) Error(args ...interface{}) { + g.Logger.Print(args...) +} + +func (g *loggerWrapper) Errorln(args ...interface{}) { + g.Logger.Println(args...) +} + +func (g *loggerWrapper) Errorf(format string, args ...interface{}) { + g.Logger.Printf(format, args...) +} + +func (g *loggerWrapper) V(l int) bool { + // Returns true for all verbose level. + return true +} diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go new file mode 100644 index 000000000..d49325776 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go @@ -0,0 +1,195 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclog + +import ( + "io" + "io/ioutil" + "log" + "os" + "strconv" +) + +// LoggerV2 does underlying logging work for grpclog. +type LoggerV2 interface { + // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. + Info(args ...interface{}) + // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. + Infoln(args ...interface{}) + // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. + Infof(format string, args ...interface{}) + // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. + Warning(args ...interface{}) + // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. + Warningln(args ...interface{}) + // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. + Warningf(format string, args ...interface{}) + // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. + Error(args ...interface{}) + // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + Errorln(args ...interface{}) + // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + Errorf(format string, args ...interface{}) + // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatal(args ...interface{}) + // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatalln(args ...interface{}) + // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatalf(format string, args ...interface{}) + // V reports whether verbosity level l is at least the requested verbose level. + V(l int) bool +} + +// SetLoggerV2 sets logger that is used in grpc to a V2 logger. +// Not mutex-protected, should be called before any gRPC functions. +func SetLoggerV2(l LoggerV2) { + logger = l +} + +const ( + // infoLog indicates Info severity. + infoLog int = iota + // warningLog indicates Warning severity. + warningLog + // errorLog indicates Error severity. + errorLog + // fatalLog indicates Fatal severity. + fatalLog +) + +// severityName contains the string representation of each severity. +var severityName = []string{ + infoLog: "INFO", + warningLog: "WARNING", + errorLog: "ERROR", + fatalLog: "FATAL", +} + +// loggerT is the default logger used by grpclog. +type loggerT struct { + m []*log.Logger + v int +} + +// NewLoggerV2 creates a loggerV2 with the provided writers. +// Fatal logs will be written to errorW, warningW, infoW, followed by exit(1). +// Error logs will be written to errorW, warningW and infoW. +// Warning logs will be written to warningW and infoW. +// Info logs will be written to infoW. +func NewLoggerV2(infoW, warningW, errorW io.Writer) LoggerV2 { + return NewLoggerV2WithVerbosity(infoW, warningW, errorW, 0) +} + +// NewLoggerV2WithVerbosity creates a loggerV2 with the provided writers and +// verbosity level. +func NewLoggerV2WithVerbosity(infoW, warningW, errorW io.Writer, v int) LoggerV2 { + var m []*log.Logger + m = append(m, log.New(infoW, severityName[infoLog]+": ", log.LstdFlags)) + m = append(m, log.New(io.MultiWriter(infoW, warningW), severityName[warningLog]+": ", log.LstdFlags)) + ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal. + m = append(m, log.New(ew, severityName[errorLog]+": ", log.LstdFlags)) + m = append(m, log.New(ew, severityName[fatalLog]+": ", log.LstdFlags)) + return &loggerT{m: m, v: v} +} + +// newLoggerV2 creates a loggerV2 to be used as default logger. +// All logs are written to stderr. +func newLoggerV2() LoggerV2 { + errorW := ioutil.Discard + warningW := ioutil.Discard + infoW := ioutil.Discard + + logLevel := os.Getenv("GRPC_GO_LOG_SEVERITY_LEVEL") + switch logLevel { + case "", "ERROR", "error": // If env is unset, set level to ERROR. + errorW = os.Stderr + case "WARNING", "warning": + warningW = os.Stderr + case "INFO", "info": + infoW = os.Stderr + } + + var v int + vLevel := os.Getenv("GRPC_GO_LOG_VERBOSITY_LEVEL") + if vl, err := strconv.Atoi(vLevel); err == nil { + v = vl + } + return NewLoggerV2WithVerbosity(infoW, warningW, errorW, v) +} + +func (g *loggerT) Info(args ...interface{}) { + g.m[infoLog].Print(args...) +} + +func (g *loggerT) Infoln(args ...interface{}) { + g.m[infoLog].Println(args...) +} + +func (g *loggerT) Infof(format string, args ...interface{}) { + g.m[infoLog].Printf(format, args...) +} + +func (g *loggerT) Warning(args ...interface{}) { + g.m[warningLog].Print(args...) +} + +func (g *loggerT) Warningln(args ...interface{}) { + g.m[warningLog].Println(args...) +} + +func (g *loggerT) Warningf(format string, args ...interface{}) { + g.m[warningLog].Printf(format, args...) +} + +func (g *loggerT) Error(args ...interface{}) { + g.m[errorLog].Print(args...) +} + +func (g *loggerT) Errorln(args ...interface{}) { + g.m[errorLog].Println(args...) +} + +func (g *loggerT) Errorf(format string, args ...interface{}) { + g.m[errorLog].Printf(format, args...) +} + +func (g *loggerT) Fatal(args ...interface{}) { + g.m[fatalLog].Fatal(args...) + // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). +} + +func (g *loggerT) Fatalln(args ...interface{}) { + g.m[fatalLog].Fatalln(args...) + // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). +} + +func (g *loggerT) Fatalf(format string, args ...interface{}) { + g.m[fatalLog].Fatalf(format, args...) + // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). +} + +func (g *loggerT) V(l int) bool { + return l <= g.v +} diff --git a/vendor/google.golang.org/grpc/interceptor.go b/vendor/google.golang.org/grpc/interceptor.go new file mode 100644 index 000000000..1f6ef6780 --- /dev/null +++ b/vendor/google.golang.org/grpc/interceptor.go @@ -0,0 +1,77 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "golang.org/x/net/context" +) + +// UnaryInvoker is called by UnaryClientInterceptor to complete RPCs. +type UnaryInvoker func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error + +// UnaryClientInterceptor intercepts the execution of a unary RPC on the client. invoker is the handler to complete the RPC +// and it is the responsibility of the interceptor to call it. +// This is an EXPERIMENTAL API. +type UnaryClientInterceptor func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error + +// Streamer is called by StreamClientInterceptor to create a ClientStream. +type Streamer func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) + +// StreamClientInterceptor intercepts the creation of ClientStream. It may return a custom ClientStream to intercept all I/O +// operations. streamer is the handler to create a ClientStream and it is the responsibility of the interceptor to call it. +// This is an EXPERIMENTAL API. +type StreamClientInterceptor func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error) + +// UnaryServerInfo consists of various information about a unary RPC on +// server side. All per-rpc information may be mutated by the interceptor. +type UnaryServerInfo struct { + // Server is the service implementation the user provides. This is read-only. + Server interface{} + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string +} + +// UnaryHandler defines the handler invoked by UnaryServerInterceptor to complete the normal +// execution of a unary RPC. If a UnaryHandler returns an error, it should be produced by the +// status package, or else gRPC will use codes.Unknown as the status code and err.Error() as +// the status message of the RPC. +type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error) + +// UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info +// contains all the information of this RPC the interceptor can operate on. And handler is the wrapper +// of the service method implementation. It is the responsibility of the interceptor to invoke handler +// to complete the RPC. +type UnaryServerInterceptor func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (resp interface{}, err error) + +// StreamServerInfo consists of various information about a streaming RPC on +// server side. All per-rpc information may be mutated by the interceptor. +type StreamServerInfo struct { + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string + // IsClientStream indicates whether the RPC is a client streaming RPC. + IsClientStream bool + // IsServerStream indicates whether the RPC is a server streaming RPC. + IsServerStream bool +} + +// StreamServerInterceptor provides a hook to intercept the execution of a streaming RPC on the server. +// info contains all the information of this RPC the interceptor can operate on. And handler is the +// service method implementation. It is the responsibility of the interceptor to invoke handler to +// complete the RPC. +type StreamServerInterceptor func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error diff --git a/vendor/google.golang.org/grpc/internal/backoff/backoff.go b/vendor/google.golang.org/grpc/internal/backoff/backoff.go new file mode 100644 index 000000000..1bd0cce5a --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/backoff/backoff.go @@ -0,0 +1,78 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package backoff implement the backoff strategy for gRPC. +// +// This is kept in internal until the gRPC project decides whether or not to +// allow alternative backoff strategies. +package backoff + +import ( + "time" + + "google.golang.org/grpc/internal/grpcrand" +) + +// Strategy defines the methodology for backing off after a grpc connection +// failure. +// +type Strategy interface { + // Backoff returns the amount of time to wait before the next retry given + // the number of consecutive failures. + Backoff(retries int) time.Duration +} + +const ( + // baseDelay is the amount of time to wait before retrying after the first + // failure. + baseDelay = 1.0 * time.Second + // factor is applied to the backoff after each retry. + factor = 1.6 + // jitter provides a range to randomize backoff delays. + jitter = 0.2 +) + +// Exponential implements exponential backoff algorithm as defined in +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +type Exponential struct { + // MaxDelay is the upper bound of backoff delay. + MaxDelay time.Duration +} + +// Backoff returns the amount of time to wait before the next retry given the +// number of retries. +func (bc Exponential) Backoff(retries int) time.Duration { + if retries == 0 { + return baseDelay + } + backoff, max := float64(baseDelay), float64(bc.MaxDelay) + for backoff < max && retries > 0 { + backoff *= factor + retries-- + } + if backoff > max { + backoff = max + } + // Randomize backoff delays so that if a cluster of requests start at + // the same time, they won't operate in lockstep. + backoff *= 1 + jitter*(grpcrand.Float64()*2-1) + if backoff < 0 { + return 0 + } + return time.Duration(backoff) +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go new file mode 100644 index 000000000..586a0336b --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -0,0 +1,573 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package channelz defines APIs for enabling channelz service, entry +// registration/deletion, and accessing channelz data. It also defines channelz +// metric struct formats. +// +// All APIs in this package are experimental. +package channelz + +import ( + "sort" + "sync" + "sync/atomic" + + "google.golang.org/grpc/grpclog" +) + +var ( + db dbWrapper + idGen idGenerator + // EntryPerPage defines the number of channelz entries to be shown on a web page. + EntryPerPage = 50 + curState int32 +) + +// TurnOn turns on channelz data collection. +func TurnOn() { + if !IsOn() { + NewChannelzStorage() + atomic.StoreInt32(&curState, 1) + } +} + +// IsOn returns whether channelz data collection is on. +func IsOn() bool { + return atomic.CompareAndSwapInt32(&curState, 1, 1) +} + +// dbWarpper wraps around a reference to internal channelz data storage, and +// provide synchronized functionality to set and get the reference. +type dbWrapper struct { + mu sync.RWMutex + DB *channelMap +} + +func (d *dbWrapper) set(db *channelMap) { + d.mu.Lock() + d.DB = db + d.mu.Unlock() +} + +func (d *dbWrapper) get() *channelMap { + d.mu.RLock() + defer d.mu.RUnlock() + return d.DB +} + +// NewChannelzStorage initializes channelz data storage and id generator. +// +// Note: This function is exported for testing purpose only. User should not call +// it in most cases. +func NewChannelzStorage() { + db.set(&channelMap{ + topLevelChannels: make(map[int64]struct{}), + channels: make(map[int64]*channel), + listenSockets: make(map[int64]*listenSocket), + normalSockets: make(map[int64]*normalSocket), + servers: make(map[int64]*server), + subChannels: make(map[int64]*subChannel), + }) + idGen.reset() +} + +// GetTopChannels returns a slice of top channel's ChannelMetric, along with a +// boolean indicating whether there's more top channels to be queried for. +// +// The arg id specifies that only top channel with id at or above it will be included +// in the result. The returned slice is up to a length of EntryPerPage, and is +// sorted in ascending id order. +func GetTopChannels(id int64) ([]*ChannelMetric, bool) { + return db.get().GetTopChannels(id) +} + +// GetServers returns a slice of server's ServerMetric, along with a +// boolean indicating whether there's more servers to be queried for. +// +// The arg id specifies that only server with id at or above it will be included +// in the result. The returned slice is up to a length of EntryPerPage, and is +// sorted in ascending id order. +func GetServers(id int64) ([]*ServerMetric, bool) { + return db.get().GetServers(id) +} + +// GetServerSockets returns a slice of server's (identified by id) normal socket's +// SocketMetric, along with a boolean indicating whether there's more sockets to +// be queried for. +// +// The arg startID specifies that only sockets with id at or above it will be +// included in the result. The returned slice is up to a length of EntryPerPage, +// and is sorted in ascending id order. +func GetServerSockets(id int64, startID int64) ([]*SocketMetric, bool) { + return db.get().GetServerSockets(id, startID) +} + +// GetChannel returns the ChannelMetric for the channel (identified by id). +func GetChannel(id int64) *ChannelMetric { + return db.get().GetChannel(id) +} + +// GetSubChannel returns the SubChannelMetric for the subchannel (identified by id). +func GetSubChannel(id int64) *SubChannelMetric { + return db.get().GetSubChannel(id) +} + +// GetSocket returns the SocketInternalMetric for the socket (identified by id). +func GetSocket(id int64) *SocketMetric { + return db.get().GetSocket(id) +} + +// RegisterChannel registers the given channel c in channelz database with ref +// as its reference name, and add it to the child list of its parent (identified +// by pid). pid = 0 means no parent. It returns the unique channelz tracking id +// assigned to this channel. +func RegisterChannel(c Channel, pid int64, ref string) int64 { + id := idGen.genID() + cn := &channel{ + refName: ref, + c: c, + subChans: make(map[int64]string), + nestedChans: make(map[int64]string), + id: id, + pid: pid, + } + if pid == 0 { + db.get().addChannel(id, cn, true, pid, ref) + } else { + db.get().addChannel(id, cn, false, pid, ref) + } + return id +} + +// RegisterSubChannel registers the given channel c in channelz database with ref +// as its reference name, and add it to the child list of its parent (identified +// by pid). It returns the unique channelz tracking id assigned to this subchannel. +func RegisterSubChannel(c Channel, pid int64, ref string) int64 { + if pid == 0 { + grpclog.Error("a SubChannel's parent id cannot be 0") + return 0 + } + id := idGen.genID() + sc := &subChannel{ + refName: ref, + c: c, + sockets: make(map[int64]string), + id: id, + pid: pid, + } + db.get().addSubChannel(id, sc, pid, ref) + return id +} + +// RegisterServer registers the given server s in channelz database. It returns +// the unique channelz tracking id assigned to this server. +func RegisterServer(s Server, ref string) int64 { + id := idGen.genID() + svr := &server{ + refName: ref, + s: s, + sockets: make(map[int64]string), + listenSockets: make(map[int64]string), + id: id, + } + db.get().addServer(id, svr) + return id +} + +// RegisterListenSocket registers the given listen socket s in channelz database +// with ref as its reference name, and add it to the child list of its parent +// (identified by pid). It returns the unique channelz tracking id assigned to +// this listen socket. +func RegisterListenSocket(s Socket, pid int64, ref string) int64 { + if pid == 0 { + grpclog.Error("a ListenSocket's parent id cannot be 0") + return 0 + } + id := idGen.genID() + ls := &listenSocket{refName: ref, s: s, id: id, pid: pid} + db.get().addListenSocket(id, ls, pid, ref) + return id +} + +// RegisterNormalSocket registers the given normal socket s in channelz database +// with ref as its reference name, and add it to the child list of its parent +// (identified by pid). It returns the unique channelz tracking id assigned to +// this normal socket. +func RegisterNormalSocket(s Socket, pid int64, ref string) int64 { + if pid == 0 { + grpclog.Error("a NormalSocket's parent id cannot be 0") + return 0 + } + id := idGen.genID() + ns := &normalSocket{refName: ref, s: s, id: id, pid: pid} + db.get().addNormalSocket(id, ns, pid, ref) + return id +} + +// RemoveEntry removes an entry with unique channelz trakcing id to be id from +// channelz database. +func RemoveEntry(id int64) { + db.get().removeEntry(id) +} + +// channelMap is the storage data structure for channelz. +// Methods of channelMap can be divided in two two categories with respect to locking. +// 1. Methods acquire the global lock. +// 2. Methods that can only be called when global lock is held. +// A second type of method need always to be called inside a first type of method. +type channelMap struct { + mu sync.RWMutex + topLevelChannels map[int64]struct{} + servers map[int64]*server + channels map[int64]*channel + subChannels map[int64]*subChannel + listenSockets map[int64]*listenSocket + normalSockets map[int64]*normalSocket +} + +func (c *channelMap) addServer(id int64, s *server) { + c.mu.Lock() + s.cm = c + c.servers[id] = s + c.mu.Unlock() +} + +func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64, ref string) { + c.mu.Lock() + cn.cm = c + c.channels[id] = cn + if isTopChannel { + c.topLevelChannels[id] = struct{}{} + } else { + c.findEntry(pid).addChild(id, cn) + } + c.mu.Unlock() +} + +func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64, ref string) { + c.mu.Lock() + sc.cm = c + c.subChannels[id] = sc + c.findEntry(pid).addChild(id, sc) + c.mu.Unlock() +} + +func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64, ref string) { + c.mu.Lock() + ls.cm = c + c.listenSockets[id] = ls + c.findEntry(pid).addChild(id, ls) + c.mu.Unlock() +} + +func (c *channelMap) addNormalSocket(id int64, ns *normalSocket, pid int64, ref string) { + c.mu.Lock() + ns.cm = c + c.normalSockets[id] = ns + c.findEntry(pid).addChild(id, ns) + c.mu.Unlock() +} + +// removeEntry triggers the removal of an entry, which may not indeed delete the +// entry, if it has to wait on the deletion of its children, or may lead to a chain +// of entry deletion. For example, deleting the last socket of a gracefully shutting +// down server will lead to the server being also deleted. +func (c *channelMap) removeEntry(id int64) { + c.mu.Lock() + c.findEntry(id).triggerDelete() + c.mu.Unlock() +} + +// c.mu must be held by the caller. +func (c *channelMap) findEntry(id int64) entry { + var v entry + var ok bool + if v, ok = c.channels[id]; ok { + return v + } + if v, ok = c.subChannels[id]; ok { + return v + } + if v, ok = c.servers[id]; ok { + return v + } + if v, ok = c.listenSockets[id]; ok { + return v + } + if v, ok = c.normalSockets[id]; ok { + return v + } + return &dummyEntry{idNotFound: id} +} + +// c.mu must be held by the caller +// deleteEntry simply deletes an entry from the channelMap. Before calling this +// method, caller must check this entry is ready to be deleted, i.e removeEntry() +// has been called on it, and no children still exist. +// Conditionals are ordered by the expected frequency of deletion of each entity +// type, in order to optimize performance. +func (c *channelMap) deleteEntry(id int64) { + var ok bool + if _, ok = c.normalSockets[id]; ok { + delete(c.normalSockets, id) + return + } + if _, ok = c.subChannels[id]; ok { + delete(c.subChannels, id) + return + } + if _, ok = c.channels[id]; ok { + delete(c.channels, id) + delete(c.topLevelChannels, id) + return + } + if _, ok = c.listenSockets[id]; ok { + delete(c.listenSockets, id) + return + } + if _, ok = c.servers[id]; ok { + delete(c.servers, id) + return + } +} + +type int64Slice []int64 + +func (s int64Slice) Len() int { return len(s) } +func (s int64Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s int64Slice) Less(i, j int) bool { return s[i] < s[j] } + +func copyMap(m map[int64]string) map[int64]string { + n := make(map[int64]string) + for k, v := range m { + n[k] = v + } + return n +} + +func min(a, b int) int { + if a < b { + return a + } + return b +} + +func (c *channelMap) GetTopChannels(id int64) ([]*ChannelMetric, bool) { + c.mu.RLock() + l := len(c.topLevelChannels) + ids := make([]int64, 0, l) + cns := make([]*channel, 0, min(l, EntryPerPage)) + + for k := range c.topLevelChannels { + ids = append(ids, k) + } + sort.Sort(int64Slice(ids)) + idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id }) + count := 0 + var end bool + var t []*ChannelMetric + for i, v := range ids[idx:] { + if count == EntryPerPage { + break + } + if cn, ok := c.channels[v]; ok { + cns = append(cns, cn) + t = append(t, &ChannelMetric{ + NestedChans: copyMap(cn.nestedChans), + SubChans: copyMap(cn.subChans), + }) + count++ + } + if i == len(ids[idx:])-1 { + end = true + break + } + } + c.mu.RUnlock() + if count == 0 { + end = true + } + + for i, cn := range cns { + t[i].ChannelData = cn.c.ChannelzMetric() + t[i].ID = cn.id + t[i].RefName = cn.refName + } + return t, end +} + +func (c *channelMap) GetServers(id int64) ([]*ServerMetric, bool) { + c.mu.RLock() + l := len(c.servers) + ids := make([]int64, 0, l) + ss := make([]*server, 0, min(l, EntryPerPage)) + for k := range c.servers { + ids = append(ids, k) + } + sort.Sort(int64Slice(ids)) + idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id }) + count := 0 + var end bool + var s []*ServerMetric + for i, v := range ids[idx:] { + if count == EntryPerPage { + break + } + if svr, ok := c.servers[v]; ok { + ss = append(ss, svr) + s = append(s, &ServerMetric{ + ListenSockets: copyMap(svr.listenSockets), + }) + count++ + } + if i == len(ids[idx:])-1 { + end = true + break + } + } + c.mu.RUnlock() + if count == 0 { + end = true + } + + for i, svr := range ss { + s[i].ServerData = svr.s.ChannelzMetric() + s[i].ID = svr.id + s[i].RefName = svr.refName + } + return s, end +} + +func (c *channelMap) GetServerSockets(id int64, startID int64) ([]*SocketMetric, bool) { + var svr *server + var ok bool + c.mu.RLock() + if svr, ok = c.servers[id]; !ok { + // server with id doesn't exist. + c.mu.RUnlock() + return nil, true + } + svrskts := svr.sockets + l := len(svrskts) + ids := make([]int64, 0, l) + sks := make([]*normalSocket, 0, min(l, EntryPerPage)) + for k := range svrskts { + ids = append(ids, k) + } + sort.Sort((int64Slice(ids))) + idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id }) + count := 0 + var end bool + for i, v := range ids[idx:] { + if count == EntryPerPage { + break + } + if ns, ok := c.normalSockets[v]; ok { + sks = append(sks, ns) + count++ + } + if i == len(ids[idx:])-1 { + end = true + break + } + } + c.mu.RUnlock() + if count == 0 { + end = true + } + var s []*SocketMetric + for _, ns := range sks { + sm := &SocketMetric{} + sm.SocketData = ns.s.ChannelzMetric() + sm.ID = ns.id + sm.RefName = ns.refName + s = append(s, sm) + } + return s, end +} + +func (c *channelMap) GetChannel(id int64) *ChannelMetric { + cm := &ChannelMetric{} + var cn *channel + var ok bool + c.mu.RLock() + if cn, ok = c.channels[id]; !ok { + // channel with id doesn't exist. + c.mu.RUnlock() + return nil + } + cm.NestedChans = copyMap(cn.nestedChans) + cm.SubChans = copyMap(cn.subChans) + c.mu.RUnlock() + cm.ChannelData = cn.c.ChannelzMetric() + cm.ID = cn.id + cm.RefName = cn.refName + return cm +} + +func (c *channelMap) GetSubChannel(id int64) *SubChannelMetric { + cm := &SubChannelMetric{} + var sc *subChannel + var ok bool + c.mu.RLock() + if sc, ok = c.subChannels[id]; !ok { + // subchannel with id doesn't exist. + c.mu.RUnlock() + return nil + } + cm.Sockets = copyMap(sc.sockets) + c.mu.RUnlock() + cm.ChannelData = sc.c.ChannelzMetric() + cm.ID = sc.id + cm.RefName = sc.refName + return cm +} + +func (c *channelMap) GetSocket(id int64) *SocketMetric { + sm := &SocketMetric{} + c.mu.RLock() + if ls, ok := c.listenSockets[id]; ok { + c.mu.RUnlock() + sm.SocketData = ls.s.ChannelzMetric() + sm.ID = ls.id + sm.RefName = ls.refName + return sm + } + if ns, ok := c.normalSockets[id]; ok { + c.mu.RUnlock() + sm.SocketData = ns.s.ChannelzMetric() + sm.ID = ns.id + sm.RefName = ns.refName + return sm + } + c.mu.RUnlock() + return nil +} + +type idGenerator struct { + id int64 +} + +func (i *idGenerator) reset() { + atomic.StoreInt64(&i.id, 0) +} + +func (i *idGenerator) genID() int64 { + return atomic.AddInt64(&i.id, 1) +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/types.go b/vendor/google.golang.org/grpc/internal/channelz/types.go new file mode 100644 index 000000000..6fd6bb388 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/types.go @@ -0,0 +1,419 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "net" + "time" + + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" +) + +// entry represents a node in the channelz database. +type entry interface { + // addChild adds a child e, whose channelz id is id to child list + addChild(id int64, e entry) + // deleteChild deletes a child with channelz id to be id from child list + deleteChild(id int64) + // triggerDelete tries to delete self from channelz database. However, if child + // list is not empty, then deletion from the database is on hold until the last + // child is deleted from database. + triggerDelete() + // deleteSelfIfReady check whether triggerDelete() has been called before, and whether child + // list is now empty. If both conditions are met, then delete self from database. + deleteSelfIfReady() +} + +// dummyEntry is a fake entry to handle entry not found case. +type dummyEntry struct { + idNotFound int64 +} + +func (d *dummyEntry) addChild(id int64, e entry) { + // Note: It is possible for a normal program to reach here under race condition. + // For example, there could be a race between ClientConn.Close() info being propagated + // to addrConn and http2Client. ClientConn.Close() cancel the context and result + // in http2Client to error. The error info is then caught by transport monitor + // and before addrConn.tearDown() is called in side ClientConn.Close(). Therefore, + // the addrConn will create a new transport. And when registering the new transport in + // channelz, its parent addrConn could have already been torn down and deleted + // from channelz tracking, and thus reach the code here. + grpclog.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound) +} + +func (d *dummyEntry) deleteChild(id int64) { + // It is possible for a normal program to reach here under race condition. + // Refer to the example described in addChild(). + grpclog.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound) +} + +func (d *dummyEntry) triggerDelete() { + grpclog.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound) +} + +func (*dummyEntry) deleteSelfIfReady() { + // code should not reach here. deleteSelfIfReady is always called on an existing entry. +} + +// ChannelMetric defines the info channelz provides for a specific Channel, which +// includes ChannelInternalMetric and channelz-specific data, such as channelz id, +// child list, etc. +type ChannelMetric struct { + // ID is the channelz id of this channel. + ID int64 + // RefName is the human readable reference string of this channel. + RefName string + // ChannelData contains channel internal metric reported by the channel through + // ChannelzMetric(). + ChannelData *ChannelInternalMetric + // NestedChans tracks the nested channel type children of this channel in the format of + // a map from nested channel channelz id to corresponding reference string. + NestedChans map[int64]string + // SubChans tracks the subchannel type children of this channel in the format of a + // map from subchannel channelz id to corresponding reference string. + SubChans map[int64]string + // Sockets tracks the socket type children of this channel in the format of a map + // from socket channelz id to corresponding reference string. + // Note current grpc implementation doesn't allow channel having sockets directly, + // therefore, this is field is unused. + Sockets map[int64]string +} + +// SubChannelMetric defines the info channelz provides for a specific SubChannel, +// which includes ChannelInternalMetric and channelz-specific data, such as +// channelz id, child list, etc. +type SubChannelMetric struct { + // ID is the channelz id of this subchannel. + ID int64 + // RefName is the human readable reference string of this subchannel. + RefName string + // ChannelData contains subchannel internal metric reported by the subchannel + // through ChannelzMetric(). + ChannelData *ChannelInternalMetric + // NestedChans tracks the nested channel type children of this subchannel in the format of + // a map from nested channel channelz id to corresponding reference string. + // Note current grpc implementation doesn't allow subchannel to have nested channels + // as children, therefore, this field is unused. + NestedChans map[int64]string + // SubChans tracks the subchannel type children of this subchannel in the format of a + // map from subchannel channelz id to corresponding reference string. + // Note current grpc implementation doesn't allow subchannel to have subchannels + // as children, therefore, this field is unused. + SubChans map[int64]string + // Sockets tracks the socket type children of this subchannel in the format of a map + // from socket channelz id to corresponding reference string. + Sockets map[int64]string +} + +// ChannelInternalMetric defines the struct that the implementor of Channel interface +// should return from ChannelzMetric(). +type ChannelInternalMetric struct { + // current connectivity state of the channel. + State connectivity.State + // The target this channel originally tried to connect to. May be absent + Target string + // The number of calls started on the channel. + CallsStarted int64 + // The number of calls that have completed with an OK status. + CallsSucceeded int64 + // The number of calls that have a completed with a non-OK status. + CallsFailed int64 + // The last time a call was started on the channel. + LastCallStartedTimestamp time.Time + //TODO: trace +} + +// Channel is the interface that should be satisfied in order to be tracked by +// channelz as Channel or SubChannel. +type Channel interface { + ChannelzMetric() *ChannelInternalMetric +} + +type channel struct { + refName string + c Channel + closeCalled bool + nestedChans map[int64]string + subChans map[int64]string + id int64 + pid int64 + cm *channelMap +} + +func (c *channel) addChild(id int64, e entry) { + switch v := e.(type) { + case *subChannel: + c.subChans[id] = v.refName + case *channel: + c.nestedChans[id] = v.refName + default: + grpclog.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e) + } +} + +func (c *channel) deleteChild(id int64) { + delete(c.subChans, id) + delete(c.nestedChans, id) + c.deleteSelfIfReady() +} + +func (c *channel) triggerDelete() { + c.closeCalled = true + c.deleteSelfIfReady() +} + +func (c *channel) deleteSelfIfReady() { + if !c.closeCalled || len(c.subChans)+len(c.nestedChans) != 0 { + return + } + c.cm.deleteEntry(c.id) + // not top channel + if c.pid != 0 { + c.cm.findEntry(c.pid).deleteChild(c.id) + } +} + +type subChannel struct { + refName string + c Channel + closeCalled bool + sockets map[int64]string + id int64 + pid int64 + cm *channelMap +} + +func (sc *subChannel) addChild(id int64, e entry) { + if v, ok := e.(*normalSocket); ok { + sc.sockets[id] = v.refName + } else { + grpclog.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e) + } +} + +func (sc *subChannel) deleteChild(id int64) { + delete(sc.sockets, id) + sc.deleteSelfIfReady() +} + +func (sc *subChannel) triggerDelete() { + sc.closeCalled = true + sc.deleteSelfIfReady() +} + +func (sc *subChannel) deleteSelfIfReady() { + if !sc.closeCalled || len(sc.sockets) != 0 { + return + } + sc.cm.deleteEntry(sc.id) + sc.cm.findEntry(sc.pid).deleteChild(sc.id) +} + +// SocketMetric defines the info channelz provides for a specific Socket, which +// includes SocketInternalMetric and channelz-specific data, such as channelz id, etc. +type SocketMetric struct { + // ID is the channelz id of this socket. + ID int64 + // RefName is the human readable reference string of this socket. + RefName string + // SocketData contains socket internal metric reported by the socket through + // ChannelzMetric(). + SocketData *SocketInternalMetric +} + +// SocketInternalMetric defines the struct that the implementor of Socket interface +// should return from ChannelzMetric(). +type SocketInternalMetric struct { + // The number of streams that have been started. + StreamsStarted int64 + // The number of streams that have ended successfully: + // On client side, receiving frame with eos bit set. + // On server side, sending frame with eos bit set. + StreamsSucceeded int64 + // The number of streams that have ended unsuccessfully: + // On client side, termination without receiving frame with eos bit set. + // On server side, termination without sending frame with eos bit set. + StreamsFailed int64 + // The number of messages successfully sent on this socket. + MessagesSent int64 + MessagesReceived int64 + // The number of keep alives sent. This is typically implemented with HTTP/2 + // ping messages. + KeepAlivesSent int64 + // The last time a stream was created by this endpoint. Usually unset for + // servers. + LastLocalStreamCreatedTimestamp time.Time + // The last time a stream was created by the remote endpoint. Usually unset + // for clients. + LastRemoteStreamCreatedTimestamp time.Time + // The last time a message was sent by this endpoint. + LastMessageSentTimestamp time.Time + // The last time a message was received by this endpoint. + LastMessageReceivedTimestamp time.Time + // The amount of window, granted to the local endpoint by the remote endpoint. + // This may be slightly out of date due to network latency. This does NOT + // include stream level or TCP level flow control info. + LocalFlowControlWindow int64 + // The amount of window, granted to the remote endpoint by the local endpoint. + // This may be slightly out of date due to network latency. This does NOT + // include stream level or TCP level flow control info. + RemoteFlowControlWindow int64 + // The locally bound address. + LocalAddr net.Addr + // The remote bound address. May be absent. + RemoteAddr net.Addr + // Optional, represents the name of the remote endpoint, if different than + // the original target name. + RemoteName string + SocketOptions *SocketOptionData + Security credentials.ChannelzSecurityValue +} + +// Socket is the interface that should be satisfied in order to be tracked by +// channelz as Socket. +type Socket interface { + ChannelzMetric() *SocketInternalMetric +} + +type listenSocket struct { + refName string + s Socket + id int64 + pid int64 + cm *channelMap +} + +func (ls *listenSocket) addChild(id int64, e entry) { + grpclog.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e) +} + +func (ls *listenSocket) deleteChild(id int64) { + grpclog.Errorf("cannot delete a child (id = %d) from a listen socket", id) +} + +func (ls *listenSocket) triggerDelete() { + ls.cm.deleteEntry(ls.id) + ls.cm.findEntry(ls.pid).deleteChild(ls.id) +} + +func (ls *listenSocket) deleteSelfIfReady() { + grpclog.Errorf("cannot call deleteSelfIfReady on a listen socket") +} + +type normalSocket struct { + refName string + s Socket + id int64 + pid int64 + cm *channelMap +} + +func (ns *normalSocket) addChild(id int64, e entry) { + grpclog.Errorf("cannot add a child (id = %d) of type %T to a normal socket", id, e) +} + +func (ns *normalSocket) deleteChild(id int64) { + grpclog.Errorf("cannot delete a child (id = %d) from a normal socket", id) +} + +func (ns *normalSocket) triggerDelete() { + ns.cm.deleteEntry(ns.id) + ns.cm.findEntry(ns.pid).deleteChild(ns.id) +} + +func (ns *normalSocket) deleteSelfIfReady() { + grpclog.Errorf("cannot call deleteSelfIfReady on a normal socket") +} + +// ServerMetric defines the info channelz provides for a specific Server, which +// includes ServerInternalMetric and channelz-specific data, such as channelz id, +// child list, etc. +type ServerMetric struct { + // ID is the channelz id of this server. + ID int64 + // RefName is the human readable reference string of this server. + RefName string + // ServerData contains server internal metric reported by the server through + // ChannelzMetric(). + ServerData *ServerInternalMetric + // ListenSockets tracks the listener socket type children of this server in the + // format of a map from socket channelz id to corresponding reference string. + ListenSockets map[int64]string +} + +// ServerInternalMetric defines the struct that the implementor of Server interface +// should return from ChannelzMetric(). +type ServerInternalMetric struct { + // The number of incoming calls started on the server. + CallsStarted int64 + // The number of incoming calls that have completed with an OK status. + CallsSucceeded int64 + // The number of incoming calls that have a completed with a non-OK status. + CallsFailed int64 + // The last time a call was started on the server. + LastCallStartedTimestamp time.Time + //TODO: trace +} + +// Server is the interface to be satisfied in order to be tracked by channelz as +// Server. +type Server interface { + ChannelzMetric() *ServerInternalMetric +} + +type server struct { + refName string + s Server + closeCalled bool + sockets map[int64]string + listenSockets map[int64]string + id int64 + cm *channelMap +} + +func (s *server) addChild(id int64, e entry) { + switch v := e.(type) { + case *normalSocket: + s.sockets[id] = v.refName + case *listenSocket: + s.listenSockets[id] = v.refName + default: + grpclog.Errorf("cannot add a child (id = %d) of type %T to a server", id, e) + } +} + +func (s *server) deleteChild(id int64) { + delete(s.sockets, id) + delete(s.listenSockets, id) + s.deleteSelfIfReady() +} + +func (s *server) triggerDelete() { + s.closeCalled = true + s.deleteSelfIfReady() +} + +func (s *server) deleteSelfIfReady() { + if !s.closeCalled || len(s.sockets)+len(s.listenSockets) != 0 { + return + } + s.cm.deleteEntry(s.id) +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_linux.go b/vendor/google.golang.org/grpc/internal/channelz/types_linux.go new file mode 100644 index 000000000..07215396d --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/types_linux.go @@ -0,0 +1,53 @@ +// +build !appengine,go1.7 + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +// SocketOptionData defines the struct to hold socket option data, and related +// getter function to obtain info from fd. +type SocketOptionData struct { + Linger *unix.Linger + RecvTimeout *unix.Timeval + SendTimeout *unix.Timeval + TCPInfo *unix.TCPInfo +} + +// Getsockopt defines the function to get socket options requested by channelz. +// It is to be passed to syscall.RawConn.Control(). +func (s *SocketOptionData) Getsockopt(fd uintptr) { + if v, err := unix.GetsockoptLinger(int(fd), syscall.SOL_SOCKET, syscall.SO_LINGER); err == nil { + s.Linger = v + } + if v, err := unix.GetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_RCVTIMEO); err == nil { + s.RecvTimeout = v + } + if v, err := unix.GetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_SNDTIMEO); err == nil { + s.SendTimeout = v + } + if v, err := unix.GetsockoptTCPInfo(int(fd), syscall.SOL_TCP, syscall.TCP_INFO); err == nil { + s.TCPInfo = v + } +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go new file mode 100644 index 000000000..884910c4e --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go @@ -0,0 +1,38 @@ +// +build !linux appengine !go1.7 + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import "google.golang.org/grpc/grpclog" + +func init() { + grpclog.Infof("Channelz: socket options are not supported on non-linux os and appengine.") +} + +// SocketOptionData defines the struct to hold socket option data, and related +// getter function to obtain info from fd. +// Windows OS doesn't support Socket Option +type SocketOptionData struct { +} + +// Getsockopt defines the function to get socket options requested by channelz. +// It is to be passed to syscall.RawConn.Control(). +// Windows OS doesn't support Socket Option +func (s *SocketOptionData) Getsockopt(fd uintptr) {} diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_linux_go19.go b/vendor/google.golang.org/grpc/internal/channelz/util_linux_go19.go new file mode 100644 index 000000000..e1e9e32d7 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/util_linux_go19.go @@ -0,0 +1,39 @@ +// +build linux,go1.9,!appengine + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "syscall" +) + +// GetSocketOption gets the socket option info of the conn. +func GetSocketOption(socket interface{}) *SocketOptionData { + c, ok := socket.(syscall.Conn) + if !ok { + return nil + } + data := &SocketOptionData{} + if rawConn, err := c.SyscallConn(); err == nil { + rawConn.Control(data.Getsockopt) + return data + } + return nil +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux_pre_go19.go b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux_pre_go19.go new file mode 100644 index 000000000..1d4da952d --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux_pre_go19.go @@ -0,0 +1,26 @@ +// +build !linux !go1.9 appengine + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +// GetSocketOption gets the socket option info of the conn. +func GetSocketOption(c interface{}) *SocketOptionData { + return nil +} diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go new file mode 100644 index 000000000..3ee8740f1 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -0,0 +1,35 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package envconfig contains grpc settings configured by environment variables. +package envconfig + +import ( + "os" + "strings" +) + +const ( + prefix = "GRPC_GO_" + retryStr = prefix + "RETRY" +) + +var ( + // Retry is set if retry is explicitly enabled via "GRPC_GO_RETRY=on". + Retry = strings.EqualFold(os.Getenv(retryStr), "on") +) diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go new file mode 100644 index 000000000..200b115ca --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go @@ -0,0 +1,56 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpcrand implements math/rand functions in a concurrent-safe way +// with a global random source, independent of math/rand's global source. +package grpcrand + +import ( + "math/rand" + "sync" + "time" +) + +var ( + r = rand.New(rand.NewSource(time.Now().UnixNano())) + mu sync.Mutex +) + +// Int63n implements rand.Int63n on the grpcrand global source. +func Int63n(n int64) int64 { + mu.Lock() + res := r.Int63n(n) + mu.Unlock() + return res +} + +// Intn implements rand.Intn on the grpcrand global source. +func Intn(n int) int { + mu.Lock() + res := r.Intn(n) + mu.Unlock() + return res +} + +// Float64 implements rand.Float64 on the grpcrand global source. +func Float64() float64 { + mu.Lock() + res := r.Float64() + mu.Unlock() + return res +} diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go new file mode 100644 index 000000000..c35afb05e --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -0,0 +1,28 @@ +/* + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains gRPC-internal code, to avoid polluting +// the godoc of the top-level grpc package. It must not import any grpc +// symbols to avoid circular dependencies. +package internal + +var ( + // WithContextDialer is exported by clientconn.go + WithContextDialer interface{} // func(context.Context, string) (net.Conn, error) grpc.DialOption + // WithResolverBuilder is exported by clientconn.go + WithResolverBuilder interface{} // func (resolver.Builder) grpc.DialOption +) diff --git a/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go b/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go new file mode 100644 index 000000000..63cd2627c --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go @@ -0,0 +1,140 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "sync" + "time" +) + +const ( + // bdpLimit is the maximum value the flow control windows + // will be increased to. + bdpLimit = (1 << 20) * 4 + // alpha is a constant factor used to keep a moving average + // of RTTs. + alpha = 0.9 + // If the current bdp sample is greater than or equal to + // our beta * our estimated bdp and the current bandwidth + // sample is the maximum bandwidth observed so far, we + // increase our bbp estimate by a factor of gamma. + beta = 0.66 + // To put our bdp to be smaller than or equal to twice the real BDP, + // we should multiply our current sample with 4/3, however to round things out + // we use 2 as the multiplication factor. + gamma = 2 +) + +// Adding arbitrary data to ping so that its ack can be identified. +// Easter-egg: what does the ping message say? +var bdpPing = &ping{data: [8]byte{2, 4, 16, 16, 9, 14, 7, 7}} + +type bdpEstimator struct { + // sentAt is the time when the ping was sent. + sentAt time.Time + + mu sync.Mutex + // bdp is the current bdp estimate. + bdp uint32 + // sample is the number of bytes received in one measurement cycle. + sample uint32 + // bwMax is the maximum bandwidth noted so far (bytes/sec). + bwMax float64 + // bool to keep track of the beginning of a new measurement cycle. + isSent bool + // Callback to update the window sizes. + updateFlowControl func(n uint32) + // sampleCount is the number of samples taken so far. + sampleCount uint64 + // round trip time (seconds) + rtt float64 +} + +// timesnap registers the time bdp ping was sent out so that +// network rtt can be calculated when its ack is received. +// It is called (by controller) when the bdpPing is +// being written on the wire. +func (b *bdpEstimator) timesnap(d [8]byte) { + if bdpPing.data != d { + return + } + b.sentAt = time.Now() +} + +// add adds bytes to the current sample for calculating bdp. +// It returns true only if a ping must be sent. This can be used +// by the caller (handleData) to make decision about batching +// a window update with it. +func (b *bdpEstimator) add(n uint32) bool { + b.mu.Lock() + defer b.mu.Unlock() + if b.bdp == bdpLimit { + return false + } + if !b.isSent { + b.isSent = true + b.sample = n + b.sentAt = time.Time{} + b.sampleCount++ + return true + } + b.sample += n + return false +} + +// calculate is called when an ack for a bdp ping is received. +// Here we calculate the current bdp and bandwidth sample and +// decide if the flow control windows should go up. +func (b *bdpEstimator) calculate(d [8]byte) { + // Check if the ping acked for was the bdp ping. + if bdpPing.data != d { + return + } + b.mu.Lock() + rttSample := time.Since(b.sentAt).Seconds() + if b.sampleCount < 10 { + // Bootstrap rtt with an average of first 10 rtt samples. + b.rtt += (rttSample - b.rtt) / float64(b.sampleCount) + } else { + // Heed to the recent past more. + b.rtt += (rttSample - b.rtt) * float64(alpha) + } + b.isSent = false + // The number of bytes accumulated so far in the sample is smaller + // than or equal to 1.5 times the real BDP on a saturated connection. + bwCurrent := float64(b.sample) / (b.rtt * float64(1.5)) + if bwCurrent > b.bwMax { + b.bwMax = bwCurrent + } + // If the current sample (which is smaller than or equal to the 1.5 times the real BDP) is + // greater than or equal to 2/3rd our perceived bdp AND this is the maximum bandwidth seen so far, we + // should update our perception of the network BDP. + if float64(b.sample) >= beta*float64(b.bdp) && bwCurrent == b.bwMax && b.bdp != bdpLimit { + sampleFloat := float64(b.sample) + b.bdp = uint32(gamma * sampleFloat) + if b.bdp > bdpLimit { + b.bdp = bdpLimit + } + bdp := b.bdp + b.mu.Unlock() + b.updateFlowControl(bdp) + return + } + b.mu.Unlock() +} diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go new file mode 100644 index 000000000..204ba1588 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -0,0 +1,852 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "bytes" + "fmt" + "runtime" + "sync" + + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" +) + +var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) { + e.SetMaxDynamicTableSizeLimit(v) +} + +type itemNode struct { + it interface{} + next *itemNode +} + +type itemList struct { + head *itemNode + tail *itemNode +} + +func (il *itemList) enqueue(i interface{}) { + n := &itemNode{it: i} + if il.tail == nil { + il.head, il.tail = n, n + return + } + il.tail.next = n + il.tail = n +} + +// peek returns the first item in the list without removing it from the +// list. +func (il *itemList) peek() interface{} { + return il.head.it +} + +func (il *itemList) dequeue() interface{} { + if il.head == nil { + return nil + } + i := il.head.it + il.head = il.head.next + if il.head == nil { + il.tail = nil + } + return i +} + +func (il *itemList) dequeueAll() *itemNode { + h := il.head + il.head, il.tail = nil, nil + return h +} + +func (il *itemList) isEmpty() bool { + return il.head == nil +} + +// The following defines various control items which could flow through +// the control buffer of transport. They represent different aspects of +// control tasks, e.g., flow control, settings, streaming resetting, etc. + +// registerStream is used to register an incoming stream with loopy writer. +type registerStream struct { + streamID uint32 + wq *writeQuota +} + +// headerFrame is also used to register stream on the client-side. +type headerFrame struct { + streamID uint32 + hf []hpack.HeaderField + endStream bool // Valid on server side. + initStream func(uint32) (bool, error) // Used only on the client side. + onWrite func() + wq *writeQuota // write quota for the stream created. + cleanup *cleanupStream // Valid on the server side. + onOrphaned func(error) // Valid on client-side +} + +type cleanupStream struct { + streamID uint32 + rst bool + rstCode http2.ErrCode + onWrite func() +} + +type dataFrame struct { + streamID uint32 + endStream bool + h []byte + d []byte + // onEachWrite is called every time + // a part of d is written out. + onEachWrite func() +} + +type incomingWindowUpdate struct { + streamID uint32 + increment uint32 +} + +type outgoingWindowUpdate struct { + streamID uint32 + increment uint32 +} + +type incomingSettings struct { + ss []http2.Setting +} + +type outgoingSettings struct { + ss []http2.Setting +} + +type incomingGoAway struct { +} + +type goAway struct { + code http2.ErrCode + debugData []byte + headsUp bool + closeConn bool +} + +type ping struct { + ack bool + data [8]byte +} + +type outFlowControlSizeRequest struct { + resp chan uint32 +} + +type outStreamState int + +const ( + active outStreamState = iota + empty + waitingOnStreamQuota +) + +type outStream struct { + id uint32 + state outStreamState + itl *itemList + bytesOutStanding int + wq *writeQuota + + next *outStream + prev *outStream +} + +func (s *outStream) deleteSelf() { + if s.prev != nil { + s.prev.next = s.next + } + if s.next != nil { + s.next.prev = s.prev + } + s.next, s.prev = nil, nil +} + +type outStreamList struct { + // Following are sentinel objects that mark the + // beginning and end of the list. They do not + // contain any item lists. All valid objects are + // inserted in between them. + // This is needed so that an outStream object can + // deleteSelf() in O(1) time without knowing which + // list it belongs to. + head *outStream + tail *outStream +} + +func newOutStreamList() *outStreamList { + head, tail := new(outStream), new(outStream) + head.next = tail + tail.prev = head + return &outStreamList{ + head: head, + tail: tail, + } +} + +func (l *outStreamList) enqueue(s *outStream) { + e := l.tail.prev + e.next = s + s.prev = e + s.next = l.tail + l.tail.prev = s +} + +// remove from the beginning of the list. +func (l *outStreamList) dequeue() *outStream { + b := l.head.next + if b == l.tail { + return nil + } + b.deleteSelf() + return b +} + +// controlBuffer is a way to pass information to loopy. +// Information is passed as specific struct types called control frames. +// A control frame not only represents data, messages or headers to be sent out +// but can also be used to instruct loopy to update its internal state. +// It shouldn't be confused with an HTTP2 frame, although some of the control frames +// like dataFrame and headerFrame do go out on wire as HTTP2 frames. +type controlBuffer struct { + ch chan struct{} + done <-chan struct{} + mu sync.Mutex + consumerWaiting bool + list *itemList + err error +} + +func newControlBuffer(done <-chan struct{}) *controlBuffer { + return &controlBuffer{ + ch: make(chan struct{}, 1), + list: &itemList{}, + done: done, + } +} + +func (c *controlBuffer) put(it interface{}) error { + _, err := c.executeAndPut(nil, it) + return err +} + +func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it interface{}) (bool, error) { + var wakeUp bool + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return false, c.err + } + if f != nil { + if !f(it) { // f wasn't successful + c.mu.Unlock() + return false, nil + } + } + if c.consumerWaiting { + wakeUp = true + c.consumerWaiting = false + } + c.list.enqueue(it) + c.mu.Unlock() + if wakeUp { + select { + case c.ch <- struct{}{}: + default: + } + } + return true, nil +} + +// Note argument f should never be nil. +func (c *controlBuffer) execute(f func(it interface{}) bool, it interface{}) (bool, error) { + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return false, c.err + } + if !f(it) { // f wasn't successful + c.mu.Unlock() + return false, nil + } + c.mu.Unlock() + return true, nil +} + +func (c *controlBuffer) get(block bool) (interface{}, error) { + for { + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return nil, c.err + } + if !c.list.isEmpty() { + h := c.list.dequeue() + c.mu.Unlock() + return h, nil + } + if !block { + c.mu.Unlock() + return nil, nil + } + c.consumerWaiting = true + c.mu.Unlock() + select { + case <-c.ch: + case <-c.done: + c.finish() + return nil, ErrConnClosing + } + } +} + +func (c *controlBuffer) finish() { + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return + } + c.err = ErrConnClosing + // There may be headers for streams in the control buffer. + // These streams need to be cleaned out since the transport + // is still not aware of these yet. + for head := c.list.dequeueAll(); head != nil; head = head.next { + hdr, ok := head.it.(*headerFrame) + if !ok { + continue + } + if hdr.onOrphaned != nil { // It will be nil on the server-side. + hdr.onOrphaned(ErrConnClosing) + } + } + c.mu.Unlock() +} + +type side int + +const ( + clientSide side = iota + serverSide +) + +// Loopy receives frames from the control buffer. +// Each frame is handled individually; most of the work done by loopy goes +// into handling data frames. Loopy maintains a queue of active streams, and each +// stream maintains a queue of data frames; as loopy receives data frames +// it gets added to the queue of the relevant stream. +// Loopy goes over this list of active streams by processing one node every iteration, +// thereby closely resemebling to a round-robin scheduling over all streams. While +// processing a stream, loopy writes out data bytes from this stream capped by the min +// of http2MaxFrameLen, connection-level flow control and stream-level flow control. +type loopyWriter struct { + side side + cbuf *controlBuffer + sendQuota uint32 + oiws uint32 // outbound initial window size. + // estdStreams is map of all established streams that are not cleaned-up yet. + // On client-side, this is all streams whose headers were sent out. + // On server-side, this is all streams whose headers were received. + estdStreams map[uint32]*outStream // Established streams. + // activeStreams is a linked-list of all streams that have data to send and some + // stream-level flow control quota. + // Each of these streams internally have a list of data items(and perhaps trailers + // on the server-side) to be sent out. + activeStreams *outStreamList + framer *framer + hBuf *bytes.Buffer // The buffer for HPACK encoding. + hEnc *hpack.Encoder // HPACK encoder. + bdpEst *bdpEstimator + draining bool + + // Side-specific handlers + ssGoAwayHandler func(*goAway) (bool, error) +} + +func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator) *loopyWriter { + var buf bytes.Buffer + l := &loopyWriter{ + side: s, + cbuf: cbuf, + sendQuota: defaultWindowSize, + oiws: defaultWindowSize, + estdStreams: make(map[uint32]*outStream), + activeStreams: newOutStreamList(), + framer: fr, + hBuf: &buf, + hEnc: hpack.NewEncoder(&buf), + bdpEst: bdpEst, + } + return l +} + +const minBatchSize = 1000 + +// run should be run in a separate goroutine. +// It reads control frames from controlBuf and processes them by: +// 1. Updating loopy's internal state, or/and +// 2. Writing out HTTP2 frames on the wire. +// +// Loopy keeps all active streams with data to send in a linked-list. +// All streams in the activeStreams linked-list must have both: +// 1. Data to send, and +// 2. Stream level flow control quota available. +// +// In each iteration of run loop, other than processing the incoming control +// frame, loopy calls processData, which processes one node from the activeStreams linked-list. +// This results in writing of HTTP2 frames into an underlying write buffer. +// When there's no more control frames to read from controlBuf, loopy flushes the write buffer. +// As an optimization, to increase the batch size for each flush, loopy yields the processor, once +// if the batch size is too low to give stream goroutines a chance to fill it up. +func (l *loopyWriter) run() (err error) { + defer func() { + if err == ErrConnClosing { + // Don't log ErrConnClosing as error since it happens + // 1. When the connection is closed by some other known issue. + // 2. User closed the connection. + // 3. A graceful close of connection. + infof("transport: loopyWriter.run returning. %v", err) + err = nil + } + }() + for { + it, err := l.cbuf.get(true) + if err != nil { + return err + } + if err = l.handle(it); err != nil { + return err + } + if _, err = l.processData(); err != nil { + return err + } + gosched := true + hasdata: + for { + it, err := l.cbuf.get(false) + if err != nil { + return err + } + if it != nil { + if err = l.handle(it); err != nil { + return err + } + if _, err = l.processData(); err != nil { + return err + } + continue hasdata + } + isEmpty, err := l.processData() + if err != nil { + return err + } + if !isEmpty { + continue hasdata + } + if gosched { + gosched = false + if l.framer.writer.offset < minBatchSize { + runtime.Gosched() + continue hasdata + } + } + l.framer.writer.Flush() + break hasdata + + } + } +} + +func (l *loopyWriter) outgoingWindowUpdateHandler(w *outgoingWindowUpdate) error { + return l.framer.fr.WriteWindowUpdate(w.streamID, w.increment) +} + +func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) error { + // Otherwise update the quota. + if w.streamID == 0 { + l.sendQuota += w.increment + return nil + } + // Find the stream and update it. + if str, ok := l.estdStreams[w.streamID]; ok { + str.bytesOutStanding -= int(w.increment) + if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota > 0 && str.state == waitingOnStreamQuota { + str.state = active + l.activeStreams.enqueue(str) + return nil + } + } + return nil +} + +func (l *loopyWriter) outgoingSettingsHandler(s *outgoingSettings) error { + return l.framer.fr.WriteSettings(s.ss...) +} + +func (l *loopyWriter) incomingSettingsHandler(s *incomingSettings) error { + if err := l.applySettings(s.ss); err != nil { + return err + } + return l.framer.fr.WriteSettingsAck() +} + +func (l *loopyWriter) registerStreamHandler(h *registerStream) error { + str := &outStream{ + id: h.streamID, + state: empty, + itl: &itemList{}, + wq: h.wq, + } + l.estdStreams[h.streamID] = str + return nil +} + +func (l *loopyWriter) headerHandler(h *headerFrame) error { + if l.side == serverSide { + str, ok := l.estdStreams[h.streamID] + if !ok { + warningf("transport: loopy doesn't recognize the stream: %d", h.streamID) + return nil + } + // Case 1.A: Server is responding back with headers. + if !h.endStream { + return l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite) + } + // else: Case 1.B: Server wants to close stream. + + if str.state != empty { // either active or waiting on stream quota. + // add it str's list of items. + str.itl.enqueue(h) + return nil + } + if err := l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite); err != nil { + return err + } + return l.cleanupStreamHandler(h.cleanup) + } + // Case 2: Client wants to originate stream. + str := &outStream{ + id: h.streamID, + state: empty, + itl: &itemList{}, + wq: h.wq, + } + str.itl.enqueue(h) + return l.originateStream(str) +} + +func (l *loopyWriter) originateStream(str *outStream) error { + hdr := str.itl.dequeue().(*headerFrame) + sendPing, err := hdr.initStream(str.id) + if err != nil { + if err == ErrConnClosing { + return err + } + // Other errors(errStreamDrain) need not close transport. + return nil + } + if err = l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil { + return err + } + l.estdStreams[str.id] = str + if sendPing { + return l.pingHandler(&ping{data: [8]byte{}}) + } + return nil +} + +func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.HeaderField, onWrite func()) error { + if onWrite != nil { + onWrite() + } + l.hBuf.Reset() + for _, f := range hf { + if err := l.hEnc.WriteField(f); err != nil { + warningf("transport: loopyWriter.writeHeader encountered error while encoding headers:", err) + } + } + var ( + err error + endHeaders, first bool + ) + first = true + for !endHeaders { + size := l.hBuf.Len() + if size > http2MaxFrameLen { + size = http2MaxFrameLen + } else { + endHeaders = true + } + if first { + first = false + err = l.framer.fr.WriteHeaders(http2.HeadersFrameParam{ + StreamID: streamID, + BlockFragment: l.hBuf.Next(size), + EndStream: endStream, + EndHeaders: endHeaders, + }) + } else { + err = l.framer.fr.WriteContinuation( + streamID, + endHeaders, + l.hBuf.Next(size), + ) + } + if err != nil { + return err + } + } + return nil +} + +func (l *loopyWriter) preprocessData(df *dataFrame) error { + str, ok := l.estdStreams[df.streamID] + if !ok { + return nil + } + // If we got data for a stream it means that + // stream was originated and the headers were sent out. + str.itl.enqueue(df) + if str.state == empty { + str.state = active + l.activeStreams.enqueue(str) + } + return nil +} + +func (l *loopyWriter) pingHandler(p *ping) error { + if !p.ack { + l.bdpEst.timesnap(p.data) + } + return l.framer.fr.WritePing(p.ack, p.data) + +} + +func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) error { + o.resp <- l.sendQuota + return nil +} + +func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error { + c.onWrite() + if str, ok := l.estdStreams[c.streamID]; ok { + // On the server side it could be a trailers-only response or + // a RST_STREAM before stream initialization thus the stream might + // not be established yet. + delete(l.estdStreams, c.streamID) + str.deleteSelf() + } + if c.rst { // If RST_STREAM needs to be sent. + if err := l.framer.fr.WriteRSTStream(c.streamID, c.rstCode); err != nil { + return err + } + } + if l.side == clientSide && l.draining && len(l.estdStreams) == 0 { + return ErrConnClosing + } + return nil +} + +func (l *loopyWriter) incomingGoAwayHandler(*incomingGoAway) error { + if l.side == clientSide { + l.draining = true + if len(l.estdStreams) == 0 { + return ErrConnClosing + } + } + return nil +} + +func (l *loopyWriter) goAwayHandler(g *goAway) error { + // Handling of outgoing GoAway is very specific to side. + if l.ssGoAwayHandler != nil { + draining, err := l.ssGoAwayHandler(g) + if err != nil { + return err + } + l.draining = draining + } + return nil +} + +func (l *loopyWriter) handle(i interface{}) error { + switch i := i.(type) { + case *incomingWindowUpdate: + return l.incomingWindowUpdateHandler(i) + case *outgoingWindowUpdate: + return l.outgoingWindowUpdateHandler(i) + case *incomingSettings: + return l.incomingSettingsHandler(i) + case *outgoingSettings: + return l.outgoingSettingsHandler(i) + case *headerFrame: + return l.headerHandler(i) + case *registerStream: + return l.registerStreamHandler(i) + case *cleanupStream: + return l.cleanupStreamHandler(i) + case *incomingGoAway: + return l.incomingGoAwayHandler(i) + case *dataFrame: + return l.preprocessData(i) + case *ping: + return l.pingHandler(i) + case *goAway: + return l.goAwayHandler(i) + case *outFlowControlSizeRequest: + return l.outFlowControlSizeRequestHandler(i) + default: + return fmt.Errorf("transport: unknown control message type %T", i) + } +} + +func (l *loopyWriter) applySettings(ss []http2.Setting) error { + for _, s := range ss { + switch s.ID { + case http2.SettingInitialWindowSize: + o := l.oiws + l.oiws = s.Val + if o < l.oiws { + // If the new limit is greater make all depleted streams active. + for _, stream := range l.estdStreams { + if stream.state == waitingOnStreamQuota { + stream.state = active + l.activeStreams.enqueue(stream) + } + } + } + case http2.SettingHeaderTableSize: + updateHeaderTblSize(l.hEnc, s.Val) + } + } + return nil +} + +// processData removes the first stream from active streams, writes out at most 16KB +// of its data and then puts it at the end of activeStreams if there's still more data +// to be sent and stream has some stream-level flow control. +func (l *loopyWriter) processData() (bool, error) { + if l.sendQuota == 0 { + return true, nil + } + str := l.activeStreams.dequeue() // Remove the first stream. + if str == nil { + return true, nil + } + dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream. + // A data item is represented by a dataFrame, since it later translates into + // multiple HTTP2 data frames. + // Every dataFrame has two buffers; h that keeps grpc-message header and d that is acutal data. + // As an optimization to keep wire traffic low, data from d is copied to h to make as big as the + // maximum possilbe HTTP2 frame size. + + if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // Empty data frame + // Client sends out empty data frame with endStream = true + if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil { + return false, err + } + str.itl.dequeue() // remove the empty data item from stream + if str.itl.isEmpty() { + str.state = empty + } else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers. + if err := l.writeHeader(trailer.streamID, trailer.endStream, trailer.hf, trailer.onWrite); err != nil { + return false, err + } + if err := l.cleanupStreamHandler(trailer.cleanup); err != nil { + return false, nil + } + } else { + l.activeStreams.enqueue(str) + } + return false, nil + } + var ( + idx int + buf []byte + ) + if len(dataItem.h) != 0 { // data header has not been written out yet. + buf = dataItem.h + } else { + idx = 1 + buf = dataItem.d + } + size := http2MaxFrameLen + if len(buf) < size { + size = len(buf) + } + if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 { // stream-level flow control. + str.state = waitingOnStreamQuota + return false, nil + } else if strQuota < size { + size = strQuota + } + + if l.sendQuota < uint32(size) { // connection-level flow control. + size = int(l.sendQuota) + } + // Now that outgoing flow controls are checked we can replenish str's write quota + str.wq.replenish(size) + var endStream bool + // If this is the last data message on this stream and all of it can be written in this iteration. + if dataItem.endStream && size == len(buf) { + // buf contains either data or it contains header but data is empty. + if idx == 1 || len(dataItem.d) == 0 { + endStream = true + } + } + if dataItem.onEachWrite != nil { + dataItem.onEachWrite() + } + if err := l.framer.fr.WriteData(dataItem.streamID, endStream, buf[:size]); err != nil { + return false, err + } + buf = buf[size:] + str.bytesOutStanding += size + l.sendQuota -= uint32(size) + if idx == 0 { + dataItem.h = buf + } else { + dataItem.d = buf + } + + if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // All the data from that message was written out. + str.itl.dequeue() + } + if str.itl.isEmpty() { + str.state = empty + } else if trailer, ok := str.itl.peek().(*headerFrame); ok { // The next item is trailers. + if err := l.writeHeader(trailer.streamID, trailer.endStream, trailer.hf, trailer.onWrite); err != nil { + return false, err + } + if err := l.cleanupStreamHandler(trailer.cleanup); err != nil { + return false, err + } + } else if int(l.oiws)-str.bytesOutStanding <= 0 { // Ran out of stream quota. + str.state = waitingOnStreamQuota + } else { // Otherwise add it back to the list of active streams. + l.activeStreams.enqueue(str) + } + return false, nil +} diff --git a/vendor/google.golang.org/grpc/internal/transport/defaults.go b/vendor/google.golang.org/grpc/internal/transport/defaults.go new file mode 100644 index 000000000..9fa306b2e --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/defaults.go @@ -0,0 +1,49 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "math" + "time" +) + +const ( + // The default value of flow control window size in HTTP2 spec. + defaultWindowSize = 65535 + // The initial window size for flow control. + initialWindowSize = defaultWindowSize // for an RPC + infinity = time.Duration(math.MaxInt64) + defaultClientKeepaliveTime = infinity + defaultClientKeepaliveTimeout = 20 * time.Second + defaultMaxStreamsClient = 100 + defaultMaxConnectionIdle = infinity + defaultMaxConnectionAge = infinity + defaultMaxConnectionAgeGrace = infinity + defaultServerKeepaliveTime = 2 * time.Hour + defaultServerKeepaliveTimeout = 20 * time.Second + defaultKeepalivePolicyMinTime = 5 * time.Minute + // max window limit set by HTTP2 Specs. + maxWindowSize = math.MaxInt32 + // defaultWriteQuota is the default value for number of data + // bytes that each stream can schedule before some of it being + // flushed out. + defaultWriteQuota = 64 * 1024 + defaultClientMaxHeaderListSize = uint32(16 << 20) + defaultServerMaxHeaderListSize = uint32(16 << 20) +) diff --git a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go new file mode 100644 index 000000000..5ea997a7e --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go @@ -0,0 +1,218 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "fmt" + "math" + "sync" + "sync/atomic" +) + +// writeQuota is a soft limit on the amount of data a stream can +// schedule before some of it is written out. +type writeQuota struct { + quota int32 + // get waits on read from when quota goes less than or equal to zero. + // replenish writes on it when quota goes positive again. + ch chan struct{} + // done is triggered in error case. + done <-chan struct{} + // replenish is called by loopyWriter to give quota back to. + // It is implemented as a field so that it can be updated + // by tests. + replenish func(n int) +} + +func newWriteQuota(sz int32, done <-chan struct{}) *writeQuota { + w := &writeQuota{ + quota: sz, + ch: make(chan struct{}, 1), + done: done, + } + w.replenish = w.realReplenish + return w +} + +func (w *writeQuota) get(sz int32) error { + for { + if atomic.LoadInt32(&w.quota) > 0 { + atomic.AddInt32(&w.quota, -sz) + return nil + } + select { + case <-w.ch: + continue + case <-w.done: + return errStreamDone + } + } +} + +func (w *writeQuota) realReplenish(n int) { + sz := int32(n) + a := atomic.AddInt32(&w.quota, sz) + b := a - sz + if b <= 0 && a > 0 { + select { + case w.ch <- struct{}{}: + default: + } + } +} + +type trInFlow struct { + limit uint32 + unacked uint32 + effectiveWindowSize uint32 +} + +func (f *trInFlow) newLimit(n uint32) uint32 { + d := n - f.limit + f.limit = n + f.updateEffectiveWindowSize() + return d +} + +func (f *trInFlow) onData(n uint32) uint32 { + f.unacked += n + if f.unacked >= f.limit/4 { + w := f.unacked + f.unacked = 0 + f.updateEffectiveWindowSize() + return w + } + f.updateEffectiveWindowSize() + return 0 +} + +func (f *trInFlow) reset() uint32 { + w := f.unacked + f.unacked = 0 + f.updateEffectiveWindowSize() + return w +} + +func (f *trInFlow) updateEffectiveWindowSize() { + atomic.StoreUint32(&f.effectiveWindowSize, f.limit-f.unacked) +} + +func (f *trInFlow) getSize() uint32 { + return atomic.LoadUint32(&f.effectiveWindowSize) +} + +// TODO(mmukhi): Simplify this code. +// inFlow deals with inbound flow control +type inFlow struct { + mu sync.Mutex + // The inbound flow control limit for pending data. + limit uint32 + // pendingData is the overall data which have been received but not been + // consumed by applications. + pendingData uint32 + // The amount of data the application has consumed but grpc has not sent + // window update for them. Used to reduce window update frequency. + pendingUpdate uint32 + // delta is the extra window update given by receiver when an application + // is reading data bigger in size than the inFlow limit. + delta uint32 +} + +// newLimit updates the inflow window to a new value n. +// It assumes that n is always greater than the old limit. +func (f *inFlow) newLimit(n uint32) uint32 { + f.mu.Lock() + d := n - f.limit + f.limit = n + f.mu.Unlock() + return d +} + +func (f *inFlow) maybeAdjust(n uint32) uint32 { + if n > uint32(math.MaxInt32) { + n = uint32(math.MaxInt32) + } + f.mu.Lock() + // estSenderQuota is the receiver's view of the maximum number of bytes the sender + // can send without a window update. + estSenderQuota := int32(f.limit - (f.pendingData + f.pendingUpdate)) + // estUntransmittedData is the maximum number of bytes the sends might not have put + // on the wire yet. A value of 0 or less means that we have already received all or + // more bytes than the application is requesting to read. + estUntransmittedData := int32(n - f.pendingData) // Casting into int32 since it could be negative. + // This implies that unless we send a window update, the sender won't be able to send all the bytes + // for this message. Therefore we must send an update over the limit since there's an active read + // request from the application. + if estUntransmittedData > estSenderQuota { + // Sender's window shouldn't go more than 2^31 - 1 as specified in the HTTP spec. + if f.limit+n > maxWindowSize { + f.delta = maxWindowSize - f.limit + } else { + // Send a window update for the whole message and not just the difference between + // estUntransmittedData and estSenderQuota. This will be helpful in case the message + // is padded; We will fallback on the current available window(at least a 1/4th of the limit). + f.delta = n + } + f.mu.Unlock() + return f.delta + } + f.mu.Unlock() + return 0 +} + +// onData is invoked when some data frame is received. It updates pendingData. +func (f *inFlow) onData(n uint32) error { + f.mu.Lock() + f.pendingData += n + if f.pendingData+f.pendingUpdate > f.limit+f.delta { + limit := f.limit + rcvd := f.pendingData + f.pendingUpdate + f.mu.Unlock() + return fmt.Errorf("received %d-bytes data exceeding the limit %d bytes", rcvd, limit) + } + f.mu.Unlock() + return nil +} + +// onRead is invoked when the application reads the data. It returns the window size +// to be sent to the peer. +func (f *inFlow) onRead(n uint32) uint32 { + f.mu.Lock() + if f.pendingData == 0 { + f.mu.Unlock() + return 0 + } + f.pendingData -= n + if n > f.delta { + n -= f.delta + f.delta = 0 + } else { + f.delta -= n + n = 0 + } + f.pendingUpdate += n + if f.pendingUpdate >= f.limit/4 { + wu := f.pendingUpdate + f.pendingUpdate = 0 + f.mu.Unlock() + return wu + } + f.mu.Unlock() + return 0 +} diff --git a/vendor/google.golang.org/grpc/internal/transport/go16.go b/vendor/google.golang.org/grpc/internal/transport/go16.go new file mode 100644 index 000000000..e0d00115d --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/go16.go @@ -0,0 +1,52 @@ +// +build go1.6,!go1.7 + +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "net" + "net/http" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "golang.org/x/net/context" +) + +// dialContext connects to the address on the named network. +func dialContext(ctx context.Context, network, address string) (net.Conn, error) { + return (&net.Dialer{Cancel: ctx.Done()}).Dial(network, address) +} + +// ContextErr converts the error from context package into a status error. +func ContextErr(err error) error { + switch err { + case context.DeadlineExceeded: + return status.Error(codes.DeadlineExceeded, err.Error()) + case context.Canceled: + return status.Error(codes.Canceled, err.Error()) + } + return status.Errorf(codes.Internal, "Unexpected error from context packet: %v", err) +} + +// contextFromRequest returns a background context. +func contextFromRequest(r *http.Request) context.Context { + return context.Background() +} diff --git a/vendor/google.golang.org/grpc/internal/transport/go17.go b/vendor/google.golang.org/grpc/internal/transport/go17.go new file mode 100644 index 000000000..4d515b00d --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/go17.go @@ -0,0 +1,53 @@ +// +build go1.7 + +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "context" + "net" + "net/http" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + netctx "golang.org/x/net/context" +) + +// dialContext connects to the address on the named network. +func dialContext(ctx context.Context, network, address string) (net.Conn, error) { + return (&net.Dialer{}).DialContext(ctx, network, address) +} + +// ContextErr converts the error from context package into a status error. +func ContextErr(err error) error { + switch err { + case context.DeadlineExceeded, netctx.DeadlineExceeded: + return status.Error(codes.DeadlineExceeded, err.Error()) + case context.Canceled, netctx.Canceled: + return status.Error(codes.Canceled, err.Error()) + } + return status.Errorf(codes.Internal, "Unexpected error from context packet: %v", err) +} + +// contextFromRequest returns a context from the HTTP Request. +func contextFromRequest(r *http.Request) context.Context { + return r.Context() +} diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go new file mode 100644 index 000000000..c6fb4b9c1 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -0,0 +1,449 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// This file is the implementation of a gRPC server using HTTP/2 which +// uses the standard Go http2 Server implementation (via the +// http.Handler interface), rather than speaking low-level HTTP/2 +// frames itself. It is the implementation of *grpc.Server.ServeHTTP. + +package transport + +import ( + "errors" + "fmt" + "io" + "net" + "net/http" + "strings" + "sync" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + "golang.org/x/net/http2" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +// NewServerHandlerTransport returns a ServerTransport handling gRPC +// from inside an http.Handler. It requires that the http Server +// supports HTTP/2. +func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats stats.Handler) (ServerTransport, error) { + if r.ProtoMajor != 2 { + return nil, errors.New("gRPC requires HTTP/2") + } + if r.Method != "POST" { + return nil, errors.New("invalid gRPC request method") + } + contentType := r.Header.Get("Content-Type") + // TODO: do we assume contentType is lowercase? we did before + contentSubtype, validContentType := contentSubtype(contentType) + if !validContentType { + return nil, errors.New("invalid gRPC request content-type") + } + if _, ok := w.(http.Flusher); !ok { + return nil, errors.New("gRPC requires a ResponseWriter supporting http.Flusher") + } + if _, ok := w.(http.CloseNotifier); !ok { + return nil, errors.New("gRPC requires a ResponseWriter supporting http.CloseNotifier") + } + + st := &serverHandlerTransport{ + rw: w, + req: r, + closedCh: make(chan struct{}), + writes: make(chan func()), + contentType: contentType, + contentSubtype: contentSubtype, + stats: stats, + } + + if v := r.Header.Get("grpc-timeout"); v != "" { + to, err := decodeTimeout(v) + if err != nil { + return nil, status.Errorf(codes.Internal, "malformed time-out: %v", err) + } + st.timeoutSet = true + st.timeout = to + } + + metakv := []string{"content-type", contentType} + if r.Host != "" { + metakv = append(metakv, ":authority", r.Host) + } + for k, vv := range r.Header { + k = strings.ToLower(k) + if isReservedHeader(k) && !isWhitelistedHeader(k) { + continue + } + for _, v := range vv { + v, err := decodeMetadataHeader(k, v) + if err != nil { + return nil, status.Errorf(codes.Internal, "malformed binary metadata: %v", err) + } + metakv = append(metakv, k, v) + } + } + st.headerMD = metadata.Pairs(metakv...) + + return st, nil +} + +// serverHandlerTransport is an implementation of ServerTransport +// which replies to exactly one gRPC request (exactly one HTTP request), +// using the net/http.Handler interface. This http.Handler is guaranteed +// at this point to be speaking over HTTP/2, so it's able to speak valid +// gRPC. +type serverHandlerTransport struct { + rw http.ResponseWriter + req *http.Request + timeoutSet bool + timeout time.Duration + didCommonHeaders bool + + headerMD metadata.MD + + closeOnce sync.Once + closedCh chan struct{} // closed on Close + + // writes is a channel of code to run serialized in the + // ServeHTTP (HandleStreams) goroutine. The channel is closed + // when WriteStatus is called. + writes chan func() + + // block concurrent WriteStatus calls + // e.g. grpc/(*serverStream).SendMsg/RecvMsg + writeStatusMu sync.Mutex + + // we just mirror the request content-type + contentType string + // we store both contentType and contentSubtype so we don't keep recreating them + // TODO make sure this is consistent across handler_server and http2_server + contentSubtype string + + stats stats.Handler +} + +func (ht *serverHandlerTransport) Close() error { + ht.closeOnce.Do(ht.closeCloseChanOnce) + return nil +} + +func (ht *serverHandlerTransport) closeCloseChanOnce() { close(ht.closedCh) } + +func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) } + +// strAddr is a net.Addr backed by either a TCP "ip:port" string, or +// the empty string if unknown. +type strAddr string + +func (a strAddr) Network() string { + if a != "" { + // Per the documentation on net/http.Request.RemoteAddr, if this is + // set, it's set to the IP:port of the peer (hence, TCP): + // https://golang.org/pkg/net/http/#Request + // + // If we want to support Unix sockets later, we can + // add our own grpc-specific convention within the + // grpc codebase to set RemoteAddr to a different + // format, or probably better: we can attach it to the + // context and use that from serverHandlerTransport.RemoteAddr. + return "tcp" + } + return "" +} + +func (a strAddr) String() string { return string(a) } + +// do runs fn in the ServeHTTP goroutine. +func (ht *serverHandlerTransport) do(fn func()) error { + // Avoid a panic writing to closed channel. Imperfect but maybe good enough. + select { + case <-ht.closedCh: + return ErrConnClosing + default: + select { + case ht.writes <- fn: + return nil + case <-ht.closedCh: + return ErrConnClosing + } + } +} + +func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) error { + ht.writeStatusMu.Lock() + defer ht.writeStatusMu.Unlock() + + err := ht.do(func() { + ht.writeCommonHeaders(s) + + // And flush, in case no header or body has been sent yet. + // This forces a separation of headers and trailers if this is the + // first call (for example, in end2end tests's TestNoService). + ht.rw.(http.Flusher).Flush() + + h := ht.rw.Header() + h.Set("Grpc-Status", fmt.Sprintf("%d", st.Code())) + if m := st.Message(); m != "" { + h.Set("Grpc-Message", encodeGrpcMessage(m)) + } + + if p := st.Proto(); p != nil && len(p.Details) > 0 { + stBytes, err := proto.Marshal(p) + if err != nil { + // TODO: return error instead, when callers are able to handle it. + panic(err) + } + + h.Set("Grpc-Status-Details-Bin", encodeBinHeader(stBytes)) + } + + if md := s.Trailer(); len(md) > 0 { + for k, vv := range md { + // Clients don't tolerate reading restricted headers after some non restricted ones were sent. + if isReservedHeader(k) { + continue + } + for _, v := range vv { + // http2 ResponseWriter mechanism to send undeclared Trailers after + // the headers have possibly been written. + h.Add(http2.TrailerPrefix+k, encodeMetadataHeader(k, v)) + } + } + } + }) + + if err == nil { // transport has not been closed + if ht.stats != nil { + ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{}) + } + close(ht.writes) + } + ht.Close() + return err +} + +// writeCommonHeaders sets common headers on the first write +// call (Write, WriteHeader, or WriteStatus). +func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { + if ht.didCommonHeaders { + return + } + ht.didCommonHeaders = true + + h := ht.rw.Header() + h["Date"] = nil // suppress Date to make tests happy; TODO: restore + h.Set("Content-Type", ht.contentType) + + // Predeclare trailers we'll set later in WriteStatus (after the body). + // This is a SHOULD in the HTTP RFC, and the way you add (known) + // Trailers per the net/http.ResponseWriter contract. + // See https://golang.org/pkg/net/http/#ResponseWriter + // and https://golang.org/pkg/net/http/#example_ResponseWriter_trailers + h.Add("Trailer", "Grpc-Status") + h.Add("Trailer", "Grpc-Message") + h.Add("Trailer", "Grpc-Status-Details-Bin") + + if s.sendCompress != "" { + h.Set("Grpc-Encoding", s.sendCompress) + } +} + +func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { + return ht.do(func() { + ht.writeCommonHeaders(s) + ht.rw.Write(hdr) + ht.rw.Write(data) + ht.rw.(http.Flusher).Flush() + }) +} + +func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { + err := ht.do(func() { + ht.writeCommonHeaders(s) + h := ht.rw.Header() + for k, vv := range md { + // Clients don't tolerate reading restricted headers after some non restricted ones were sent. + if isReservedHeader(k) { + continue + } + for _, v := range vv { + v = encodeMetadataHeader(k, v) + h.Add(k, v) + } + } + ht.rw.WriteHeader(200) + ht.rw.(http.Flusher).Flush() + }) + + if err == nil { + if ht.stats != nil { + ht.stats.HandleRPC(s.Context(), &stats.OutHeader{}) + } + } + return err +} + +func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), traceCtx func(context.Context, string) context.Context) { + // With this transport type there will be exactly 1 stream: this HTTP request. + + ctx := contextFromRequest(ht.req) + var cancel context.CancelFunc + if ht.timeoutSet { + ctx, cancel = context.WithTimeout(ctx, ht.timeout) + } else { + ctx, cancel = context.WithCancel(ctx) + } + + // requestOver is closed when either the request's context is done + // or the status has been written via WriteStatus. + requestOver := make(chan struct{}) + + // clientGone receives a single value if peer is gone, either + // because the underlying connection is dead or because the + // peer sends an http2 RST_STREAM. + clientGone := ht.rw.(http.CloseNotifier).CloseNotify() + go func() { + select { + case <-requestOver: + case <-ht.closedCh: + case <-clientGone: + } + cancel() + ht.Close() + }() + + req := ht.req + + s := &Stream{ + id: 0, // irrelevant + requestRead: func(int) {}, + cancel: cancel, + buf: newRecvBuffer(), + st: ht, + method: req.URL.Path, + recvCompress: req.Header.Get("grpc-encoding"), + contentSubtype: ht.contentSubtype, + } + pr := &peer.Peer{ + Addr: ht.RemoteAddr(), + } + if req.TLS != nil { + pr.AuthInfo = credentials.TLSInfo{State: *req.TLS} + } + ctx = metadata.NewIncomingContext(ctx, ht.headerMD) + s.ctx = peer.NewContext(ctx, pr) + if ht.stats != nil { + s.ctx = ht.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) + inHeader := &stats.InHeader{ + FullMethod: s.method, + RemoteAddr: ht.RemoteAddr(), + Compression: s.recvCompress, + } + ht.stats.HandleRPC(s.ctx, inHeader) + } + s.trReader = &transportReader{ + reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf}, + windowHandler: func(int) {}, + } + + // readerDone is closed when the Body.Read-ing goroutine exits. + readerDone := make(chan struct{}) + go func() { + defer close(readerDone) + + // TODO: minimize garbage, optimize recvBuffer code/ownership + const readSize = 8196 + for buf := make([]byte, readSize); ; { + n, err := req.Body.Read(buf) + if n > 0 { + s.buf.put(recvMsg{data: buf[:n:n]}) + buf = buf[n:] + } + if err != nil { + s.buf.put(recvMsg{err: mapRecvMsgError(err)}) + return + } + if len(buf) == 0 { + buf = make([]byte, readSize) + } + } + }() + + // startStream is provided by the *grpc.Server's serveStreams. + // It starts a goroutine serving s and exits immediately. + // The goroutine that is started is the one that then calls + // into ht, calling WriteHeader, Write, WriteStatus, Close, etc. + startStream(s) + + ht.runStream() + close(requestOver) + + // Wait for reading goroutine to finish. + req.Body.Close() + <-readerDone +} + +func (ht *serverHandlerTransport) runStream() { + for { + select { + case fn, ok := <-ht.writes: + if !ok { + return + } + fn() + case <-ht.closedCh: + return + } + } +} + +func (ht *serverHandlerTransport) IncrMsgSent() {} + +func (ht *serverHandlerTransport) IncrMsgRecv() {} + +func (ht *serverHandlerTransport) Drain() { + panic("Drain() is not implemented") +} + +// mapRecvMsgError returns the non-nil err into the appropriate +// error value as expected by callers of *grpc.parser.recvMsg. +// In particular, in can only be: +// * io.EOF +// * io.ErrUnexpectedEOF +// * of type transport.ConnectionError +// * an error from the status package +func mapRecvMsgError(err error) error { + if err == io.EOF || err == io.ErrUnexpectedEOF { + return err + } + if se, ok := err.(http2.StreamError); ok { + if code, ok := http2ErrConvTab[se.Code]; ok { + return status.Error(code, se.Error()) + } + } + if strings.Contains(err.Error(), "body closed by handler") { + return status.Error(codes.Canceled, err.Error()) + } + return connectionErrorf(true, err, err.Error()) +} diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go new file mode 100644 index 000000000..904e790c4 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -0,0 +1,1337 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "io" + "math" + "net" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "golang.org/x/net/context" + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +// http2Client implements the ClientTransport interface with HTTP2. +type http2Client struct { + ctx context.Context + cancel context.CancelFunc + ctxDone <-chan struct{} // Cache the ctx.Done() chan. + userAgent string + md interface{} + conn net.Conn // underlying communication channel + loopy *loopyWriter + remoteAddr net.Addr + localAddr net.Addr + authInfo credentials.AuthInfo // auth info about the connection + + readerDone chan struct{} // sync point to enable testing. + writerDone chan struct{} // sync point to enable testing. + // goAway is closed to notify the upper layer (i.e., addrConn.transportMonitor) + // that the server sent GoAway on this transport. + goAway chan struct{} + // awakenKeepalive is used to wake up keepalive when after it has gone dormant. + awakenKeepalive chan struct{} + + framer *framer + // controlBuf delivers all the control related tasks (e.g., window + // updates, reset streams, and various settings) to the controller. + controlBuf *controlBuffer + fc *trInFlow + // The scheme used: https if TLS is on, http otherwise. + scheme string + + isSecure bool + + creds []credentials.PerRPCCredentials + + // Boolean to keep track of reading activity on transport. + // 1 is true and 0 is false. + activity uint32 // Accessed atomically. + kp keepalive.ClientParameters + keepaliveEnabled bool + + statsHandler stats.Handler + + initialWindowSize int32 + + // configured by peer through SETTINGS_MAX_HEADER_LIST_SIZE + maxSendHeaderListSize *uint32 + + bdpEst *bdpEstimator + // onSuccess is a callback that client transport calls upon + // receiving server preface to signal that a succefull HTTP2 + // connection was established. + onSuccess func() + + maxConcurrentStreams uint32 + streamQuota int64 + streamsQuotaAvailable chan struct{} + waitingStreams uint32 + nextID uint32 + + mu sync.Mutex // guard the following variables + state transportState + activeStreams map[uint32]*Stream + // prevGoAway ID records the Last-Stream-ID in the previous GOAway frame. + prevGoAwayID uint32 + // goAwayReason records the http2.ErrCode and debug data received with the + // GoAway frame. + goAwayReason GoAwayReason + + // Fields below are for channelz metric collection. + channelzID int64 // channelz unique identification number + czData *channelzData +} + +func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr string) (net.Conn, error) { + if fn != nil { + return fn(ctx, addr) + } + return dialContext(ctx, "tcp", addr) +} + +func isTemporary(err error) bool { + switch err := err.(type) { + case interface { + Temporary() bool + }: + return err.Temporary() + case interface { + Timeout() bool + }: + // Timeouts may be resolved upon retry, and are thus treated as + // temporary. + return err.Timeout() + } + return true +} + +// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 +// and starts to receive messages on it. Non-nil error returns if construction +// fails. +func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts ConnectOptions, onSuccess func()) (_ *http2Client, err error) { + scheme := "http" + ctx, cancel := context.WithCancel(ctx) + defer func() { + if err != nil { + cancel() + } + }() + + conn, err := dial(connectCtx, opts.Dialer, addr.Addr) + if err != nil { + if opts.FailOnNonTempDialError { + return nil, connectionErrorf(isTemporary(err), err, "transport: error while dialing: %v", err) + } + return nil, connectionErrorf(true, err, "transport: Error while dialing %v", err) + } + // Any further errors will close the underlying connection + defer func(conn net.Conn) { + if err != nil { + conn.Close() + } + }(conn) + var ( + isSecure bool + authInfo credentials.AuthInfo + ) + if creds := opts.TransportCredentials; creds != nil { + scheme = "https" + conn, authInfo, err = creds.ClientHandshake(connectCtx, addr.Authority, conn) + if err != nil { + return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err) + } + isSecure = true + } + kp := opts.KeepaliveParams + // Validate keepalive parameters. + if kp.Time == 0 { + kp.Time = defaultClientKeepaliveTime + } + if kp.Timeout == 0 { + kp.Timeout = defaultClientKeepaliveTimeout + } + dynamicWindow := true + icwz := int32(initialWindowSize) + if opts.InitialConnWindowSize >= defaultWindowSize { + icwz = opts.InitialConnWindowSize + dynamicWindow = false + } + writeBufSize := opts.WriteBufferSize + readBufSize := opts.ReadBufferSize + maxHeaderListSize := defaultClientMaxHeaderListSize + if opts.MaxHeaderListSize != nil { + maxHeaderListSize = *opts.MaxHeaderListSize + } + t := &http2Client{ + ctx: ctx, + ctxDone: ctx.Done(), // Cache Done chan. + cancel: cancel, + userAgent: opts.UserAgent, + md: addr.Metadata, + conn: conn, + remoteAddr: conn.RemoteAddr(), + localAddr: conn.LocalAddr(), + authInfo: authInfo, + readerDone: make(chan struct{}), + writerDone: make(chan struct{}), + goAway: make(chan struct{}), + awakenKeepalive: make(chan struct{}, 1), + framer: newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize), + fc: &trInFlow{limit: uint32(icwz)}, + scheme: scheme, + activeStreams: make(map[uint32]*Stream), + isSecure: isSecure, + creds: opts.PerRPCCredentials, + kp: kp, + statsHandler: opts.StatsHandler, + initialWindowSize: initialWindowSize, + onSuccess: onSuccess, + nextID: 1, + maxConcurrentStreams: defaultMaxStreamsClient, + streamQuota: defaultMaxStreamsClient, + streamsQuotaAvailable: make(chan struct{}, 1), + czData: new(channelzData), + } + t.controlBuf = newControlBuffer(t.ctxDone) + if opts.InitialWindowSize >= defaultWindowSize { + t.initialWindowSize = opts.InitialWindowSize + dynamicWindow = false + } + if dynamicWindow { + t.bdpEst = &bdpEstimator{ + bdp: initialWindowSize, + updateFlowControl: t.updateFlowControl, + } + } + // Make sure awakenKeepalive can't be written upon. + // keepalive routine will make it writable, if need be. + t.awakenKeepalive <- struct{}{} + if t.statsHandler != nil { + t.ctx = t.statsHandler.TagConn(t.ctx, &stats.ConnTagInfo{ + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + }) + connBegin := &stats.ConnBegin{ + Client: true, + } + t.statsHandler.HandleConn(t.ctx, connBegin) + } + if channelz.IsOn() { + t.channelzID = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, "") + } + if t.kp.Time != infinity { + t.keepaliveEnabled = true + go t.keepalive() + } + // Start the reader goroutine for incoming message. Each transport has + // a dedicated goroutine which reads HTTP2 frame from network. Then it + // dispatches the frame to the corresponding stream entity. + go t.reader() + // Send connection preface to server. + n, err := t.conn.Write(clientPreface) + if err != nil { + t.Close() + return nil, connectionErrorf(true, err, "transport: failed to write client preface: %v", err) + } + if n != len(clientPreface) { + t.Close() + return nil, connectionErrorf(true, err, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface)) + } + var ss []http2.Setting + + if t.initialWindowSize != defaultWindowSize { + ss = append(ss, http2.Setting{ + ID: http2.SettingInitialWindowSize, + Val: uint32(t.initialWindowSize), + }) + } + if opts.MaxHeaderListSize != nil { + ss = append(ss, http2.Setting{ + ID: http2.SettingMaxHeaderListSize, + Val: *opts.MaxHeaderListSize, + }) + } + err = t.framer.fr.WriteSettings(ss...) + if err != nil { + t.Close() + return nil, connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err) + } + // Adjust the connection flow control window if needed. + if delta := uint32(icwz - defaultWindowSize); delta > 0 { + if err := t.framer.fr.WriteWindowUpdate(0, delta); err != nil { + t.Close() + return nil, connectionErrorf(true, err, "transport: failed to write window update: %v", err) + } + } + t.framer.writer.Flush() + go func() { + t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst) + err := t.loopy.run() + if err != nil { + errorf("transport: loopyWriter.run returning. Err: %v", err) + } + // If it's a connection error, let reader goroutine handle it + // since there might be data in the buffers. + if _, ok := err.(net.Error); !ok { + t.conn.Close() + } + close(t.writerDone) + }() + return t, nil +} + +func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { + // TODO(zhaoq): Handle uint32 overflow of Stream.id. + s := &Stream{ + done: make(chan struct{}), + method: callHdr.Method, + sendCompress: callHdr.SendCompress, + buf: newRecvBuffer(), + headerChan: make(chan struct{}), + contentSubtype: callHdr.ContentSubtype, + } + s.wq = newWriteQuota(defaultWriteQuota, s.done) + s.requestRead = func(n int) { + t.adjustWindow(s, uint32(n)) + } + // The client side stream context should have exactly the same life cycle with the user provided context. + // That means, s.ctx should be read-only. And s.ctx is done iff ctx is done. + // So we use the original context here instead of creating a copy. + s.ctx = ctx + s.trReader = &transportReader{ + reader: &recvBufferReader{ + ctx: s.ctx, + ctxDone: s.ctx.Done(), + recv: s.buf, + }, + windowHandler: func(n int) { + t.updateWindow(s, uint32(n)) + }, + } + return s +} + +func (t *http2Client) getPeer() *peer.Peer { + pr := &peer.Peer{ + Addr: t.remoteAddr, + } + // Attach Auth info if there is any. + if t.authInfo != nil { + pr.AuthInfo = t.authInfo + } + return pr +} + +func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) ([]hpack.HeaderField, error) { + aud := t.createAudience(callHdr) + authData, err := t.getTrAuthData(ctx, aud) + if err != nil { + return nil, err + } + callAuthData, err := t.getCallAuthData(ctx, aud, callHdr) + if err != nil { + return nil, err + } + // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields + // first and create a slice of that exact size. + // Make the slice of certain predictable size to reduce allocations made by append. + hfLen := 7 // :method, :scheme, :path, :authority, content-type, user-agent, te + hfLen += len(authData) + len(callAuthData) + headerFields := make([]hpack.HeaderField, 0, hfLen) + headerFields = append(headerFields, hpack.HeaderField{Name: ":method", Value: "POST"}) + headerFields = append(headerFields, hpack.HeaderField{Name: ":scheme", Value: t.scheme}) + headerFields = append(headerFields, hpack.HeaderField{Name: ":path", Value: callHdr.Method}) + headerFields = append(headerFields, hpack.HeaderField{Name: ":authority", Value: callHdr.Host}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(callHdr.ContentSubtype)}) + headerFields = append(headerFields, hpack.HeaderField{Name: "user-agent", Value: t.userAgent}) + headerFields = append(headerFields, hpack.HeaderField{Name: "te", Value: "trailers"}) + if callHdr.PreviousAttempts > 0 { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-previous-rpc-attempts", Value: strconv.Itoa(callHdr.PreviousAttempts)}) + } + + if callHdr.SendCompress != "" { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress}) + } + if dl, ok := ctx.Deadline(); ok { + // Send out timeout regardless its value. The server can detect timeout context by itself. + // TODO(mmukhi): Perhaps this field should be updated when actually writing out to the wire. + timeout := dl.Sub(time.Now()) + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-timeout", Value: encodeTimeout(timeout)}) + } + for k, v := range authData { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + for k, v := range callAuthData { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + if b := stats.OutgoingTags(ctx); b != nil { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-tags-bin", Value: encodeBinHeader(b)}) + } + if b := stats.OutgoingTrace(ctx); b != nil { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-trace-bin", Value: encodeBinHeader(b)}) + } + + if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok { + var k string + for _, vv := range added { + for i, v := range vv { + if i%2 == 0 { + k = v + continue + } + // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set. + if isReservedHeader(k) { + continue + } + headerFields = append(headerFields, hpack.HeaderField{Name: strings.ToLower(k), Value: encodeMetadataHeader(k, v)}) + } + } + for k, vv := range md { + // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set. + if isReservedHeader(k) { + continue + } + for _, v := range vv { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + } + } + if md, ok := t.md.(*metadata.MD); ok { + for k, vv := range *md { + if isReservedHeader(k) { + continue + } + for _, v := range vv { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + } + } + return headerFields, nil +} + +func (t *http2Client) createAudience(callHdr *CallHdr) string { + // Create an audience string only if needed. + if len(t.creds) == 0 && callHdr.Creds == nil { + return "" + } + // Construct URI required to get auth request metadata. + // Omit port if it is the default one. + host := strings.TrimSuffix(callHdr.Host, ":443") + pos := strings.LastIndex(callHdr.Method, "/") + if pos == -1 { + pos = len(callHdr.Method) + } + return "https://" + host + callHdr.Method[:pos] +} + +func (t *http2Client) getTrAuthData(ctx context.Context, audience string) (map[string]string, error) { + authData := map[string]string{} + for _, c := range t.creds { + data, err := c.GetRequestMetadata(ctx, audience) + if err != nil { + if _, ok := status.FromError(err); ok { + return nil, err + } + + return nil, status.Errorf(codes.Unauthenticated, "transport: %v", err) + } + for k, v := range data { + // Capital header names are illegal in HTTP/2. + k = strings.ToLower(k) + authData[k] = v + } + } + return authData, nil +} + +func (t *http2Client) getCallAuthData(ctx context.Context, audience string, callHdr *CallHdr) (map[string]string, error) { + callAuthData := map[string]string{} + // Check if credentials.PerRPCCredentials were provided via call options. + // Note: if these credentials are provided both via dial options and call + // options, then both sets of credentials will be applied. + if callCreds := callHdr.Creds; callCreds != nil { + if !t.isSecure && callCreds.RequireTransportSecurity() { + return nil, status.Error(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure connection") + } + data, err := callCreds.GetRequestMetadata(ctx, audience) + if err != nil { + return nil, status.Errorf(codes.Internal, "transport: %v", err) + } + for k, v := range data { + // Capital header names are illegal in HTTP/2 + k = strings.ToLower(k) + callAuthData[k] = v + } + } + return callAuthData, nil +} + +// NewStream creates a stream and registers it into the transport as "active" +// streams. +func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) { + ctx = peer.NewContext(ctx, t.getPeer()) + headerFields, err := t.createHeaderFields(ctx, callHdr) + if err != nil { + return nil, err + } + s := t.newStream(ctx, callHdr) + cleanup := func(err error) { + if s.swapState(streamDone) == streamDone { + // If it was already done, return. + return + } + // The stream was unprocessed by the server. + atomic.StoreUint32(&s.unprocessed, 1) + s.write(recvMsg{err: err}) + close(s.done) + // If headerChan isn't closed, then close it. + if atomic.SwapUint32(&s.headerDone, 1) == 0 { + close(s.headerChan) + } + + } + hdr := &headerFrame{ + hf: headerFields, + endStream: false, + initStream: func(id uint32) (bool, error) { + t.mu.Lock() + if state := t.state; state != reachable { + t.mu.Unlock() + // Do a quick cleanup. + err := error(errStreamDrain) + if state == closing { + err = ErrConnClosing + } + cleanup(err) + return false, err + } + t.activeStreams[id] = s + if channelz.IsOn() { + atomic.AddInt64(&t.czData.streamsStarted, 1) + atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano()) + } + var sendPing bool + // If the number of active streams change from 0 to 1, then check if keepalive + // has gone dormant. If so, wake it up. + if len(t.activeStreams) == 1 && t.keepaliveEnabled { + select { + case t.awakenKeepalive <- struct{}{}: + sendPing = true + // Fill the awakenKeepalive channel again as this channel must be + // kept non-writable except at the point that the keepalive() + // goroutine is waiting either to be awaken or shutdown. + t.awakenKeepalive <- struct{}{} + default: + } + } + t.mu.Unlock() + return sendPing, nil + }, + onOrphaned: cleanup, + wq: s.wq, + } + firstTry := true + var ch chan struct{} + checkForStreamQuota := func(it interface{}) bool { + if t.streamQuota <= 0 { // Can go negative if server decreases it. + if firstTry { + t.waitingStreams++ + } + ch = t.streamsQuotaAvailable + return false + } + if !firstTry { + t.waitingStreams-- + } + t.streamQuota-- + h := it.(*headerFrame) + h.streamID = t.nextID + t.nextID += 2 + s.id = h.streamID + s.fc = &inFlow{limit: uint32(t.initialWindowSize)} + if t.streamQuota > 0 && t.waitingStreams > 0 { + select { + case t.streamsQuotaAvailable <- struct{}{}: + default: + } + } + return true + } + var hdrListSizeErr error + checkForHeaderListSize := func(it interface{}) bool { + if t.maxSendHeaderListSize == nil { + return true + } + hdrFrame := it.(*headerFrame) + var sz int64 + for _, f := range hdrFrame.hf { + if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) { + hdrListSizeErr = status.Errorf(codes.Internal, "header list size to send violates the maximum size (%d bytes) set by server", *t.maxSendHeaderListSize) + return false + } + } + return true + } + for { + success, err := t.controlBuf.executeAndPut(func(it interface{}) bool { + if !checkForStreamQuota(it) { + return false + } + if !checkForHeaderListSize(it) { + return false + } + return true + }, hdr) + if err != nil { + return nil, err + } + if success { + break + } + if hdrListSizeErr != nil { + return nil, hdrListSizeErr + } + firstTry = false + select { + case <-ch: + case <-s.ctx.Done(): + return nil, ContextErr(s.ctx.Err()) + case <-t.goAway: + return nil, errStreamDrain + case <-t.ctx.Done(): + return nil, ErrConnClosing + } + } + if t.statsHandler != nil { + outHeader := &stats.OutHeader{ + Client: true, + FullMethod: callHdr.Method, + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + Compression: callHdr.SendCompress, + } + t.statsHandler.HandleRPC(s.ctx, outHeader) + } + return s, nil +} + +// CloseStream clears the footprint of a stream when the stream is not needed any more. +// This must not be executed in reader's goroutine. +func (t *http2Client) CloseStream(s *Stream, err error) { + var ( + rst bool + rstCode http2.ErrCode + ) + if err != nil { + rst = true + rstCode = http2.ErrCodeCancel + } + t.closeStream(s, err, rst, rstCode, status.Convert(err), nil, false) +} + +func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.ErrCode, st *status.Status, mdata map[string][]string, eosReceived bool) { + // Set stream status to done. + if s.swapState(streamDone) == streamDone { + // If it was already done, return. + return + } + // status and trailers can be updated here without any synchronization because the stream goroutine will + // only read it after it sees an io.EOF error from read or write and we'll write those errors + // only after updating this. + s.status = st + if len(mdata) > 0 { + s.trailer = mdata + } + if err != nil { + // This will unblock reads eventually. + s.write(recvMsg{err: err}) + } + // This will unblock write. + close(s.done) + // If headerChan isn't closed, then close it. + if atomic.SwapUint32(&s.headerDone, 1) == 0 { + s.noHeaders = true + close(s.headerChan) + } + cleanup := &cleanupStream{ + streamID: s.id, + onWrite: func() { + t.mu.Lock() + if t.activeStreams != nil { + delete(t.activeStreams, s.id) + } + t.mu.Unlock() + if channelz.IsOn() { + if eosReceived { + atomic.AddInt64(&t.czData.streamsSucceeded, 1) + } else { + atomic.AddInt64(&t.czData.streamsFailed, 1) + } + } + }, + rst: rst, + rstCode: rstCode, + } + addBackStreamQuota := func(interface{}) bool { + t.streamQuota++ + if t.streamQuota > 0 && t.waitingStreams > 0 { + select { + case t.streamsQuotaAvailable <- struct{}{}: + default: + } + } + return true + } + t.controlBuf.executeAndPut(addBackStreamQuota, cleanup) +} + +// Close kicks off the shutdown process of the transport. This should be called +// only once on a transport. Once it is called, the transport should not be +// accessed any more. +func (t *http2Client) Close() error { + t.mu.Lock() + // Make sure we only Close once. + if t.state == closing { + t.mu.Unlock() + return nil + } + t.state = closing + streams := t.activeStreams + t.activeStreams = nil + t.mu.Unlock() + t.controlBuf.finish() + t.cancel() + err := t.conn.Close() + if channelz.IsOn() { + channelz.RemoveEntry(t.channelzID) + } + // Notify all active streams. + for _, s := range streams { + t.closeStream(s, ErrConnClosing, false, http2.ErrCodeNo, status.New(codes.Unavailable, ErrConnClosing.Desc), nil, false) + } + if t.statsHandler != nil { + connEnd := &stats.ConnEnd{ + Client: true, + } + t.statsHandler.HandleConn(t.ctx, connEnd) + } + return err +} + +// GracefulClose sets the state to draining, which prevents new streams from +// being created and causes the transport to be closed when the last active +// stream is closed. If there are no active streams, the transport is closed +// immediately. This does nothing if the transport is already draining or +// closing. +func (t *http2Client) GracefulClose() error { + t.mu.Lock() + // Make sure we move to draining only from active. + if t.state == draining || t.state == closing { + t.mu.Unlock() + return nil + } + t.state = draining + active := len(t.activeStreams) + t.mu.Unlock() + if active == 0 { + return t.Close() + } + t.controlBuf.put(&incomingGoAway{}) + return nil +} + +// Write formats the data into HTTP2 data frame(s) and sends it out. The caller +// should proceed only if Write returns nil. +func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { + if opts.Last { + // If it's the last message, update stream state. + if !s.compareAndSwapState(streamActive, streamWriteDone) { + return errStreamDone + } + } else if s.getState() != streamActive { + return errStreamDone + } + df := &dataFrame{ + streamID: s.id, + endStream: opts.Last, + } + if hdr != nil || data != nil { // If it's not an empty data frame. + // Add some data to grpc message header so that we can equally + // distribute bytes across frames. + emptyLen := http2MaxFrameLen - len(hdr) + if emptyLen > len(data) { + emptyLen = len(data) + } + hdr = append(hdr, data[:emptyLen]...) + data = data[emptyLen:] + df.h, df.d = hdr, data + // TODO(mmukhi): The above logic in this if can be moved to loopyWriter's data handler. + if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { + return err + } + } + return t.controlBuf.put(df) +} + +func (t *http2Client) getStream(f http2.Frame) (*Stream, bool) { + t.mu.Lock() + defer t.mu.Unlock() + s, ok := t.activeStreams[f.Header().StreamID] + return s, ok +} + +// adjustWindow sends out extra window update over the initial window size +// of stream if the application is requesting data larger in size than +// the window. +func (t *http2Client) adjustWindow(s *Stream, n uint32) { + if w := s.fc.maybeAdjust(n); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) + } +} + +// updateWindow adjusts the inbound quota for the stream. +// Window updates will be sent out when the cumulative quota +// exceeds the corresponding threshold. +func (t *http2Client) updateWindow(s *Stream, n uint32) { + if w := s.fc.onRead(n); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) + } +} + +// updateFlowControl updates the incoming flow control windows +// for the transport and the stream based on the current bdp +// estimation. +func (t *http2Client) updateFlowControl(n uint32) { + t.mu.Lock() + for _, s := range t.activeStreams { + s.fc.newLimit(n) + } + t.mu.Unlock() + updateIWS := func(interface{}) bool { + t.initialWindowSize = int32(n) + return true + } + t.controlBuf.executeAndPut(updateIWS, &outgoingWindowUpdate{streamID: 0, increment: t.fc.newLimit(n)}) + t.controlBuf.put(&outgoingSettings{ + ss: []http2.Setting{ + { + ID: http2.SettingInitialWindowSize, + Val: n, + }, + }, + }) +} + +func (t *http2Client) handleData(f *http2.DataFrame) { + size := f.Header().Length + var sendBDPPing bool + if t.bdpEst != nil { + sendBDPPing = t.bdpEst.add(size) + } + // Decouple connection's flow control from application's read. + // An update on connection's flow control should not depend on + // whether user application has read the data or not. Such a + // restriction is already imposed on the stream's flow control, + // and therefore the sender will be blocked anyways. + // Decoupling the connection flow control will prevent other + // active(fast) streams from starving in presence of slow or + // inactive streams. + // + if w := t.fc.onData(size); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: w, + }) + } + if sendBDPPing { + // Avoid excessive ping detection (e.g. in an L7 proxy) + // by sending a window update prior to the BDP ping. + + if w := t.fc.reset(); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: w, + }) + } + + t.controlBuf.put(bdpPing) + } + // Select the right stream to dispatch. + s, ok := t.getStream(f) + if !ok { + return + } + if size > 0 { + if err := s.fc.onData(size); err != nil { + t.closeStream(s, io.EOF, true, http2.ErrCodeFlowControl, status.New(codes.Internal, err.Error()), nil, false) + return + } + if f.Header().Flags.Has(http2.FlagDataPadded) { + if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{s.id, w}) + } + } + // TODO(bradfitz, zhaoq): A copy is required here because there is no + // guarantee f.Data() is consumed before the arrival of next frame. + // Can this copy be eliminated? + if len(f.Data()) > 0 { + data := make([]byte, len(f.Data())) + copy(data, f.Data()) + s.write(recvMsg{data: data}) + } + } + // The server has closed the stream without sending trailers. Record that + // the read direction is closed, and set the status appropriately. + if f.FrameHeader.Flags.Has(http2.FlagDataEndStream) { + t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.New(codes.Internal, "server closed the stream without sending trailers"), nil, true) + } +} + +func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { + s, ok := t.getStream(f) + if !ok { + return + } + if f.ErrCode == http2.ErrCodeRefusedStream { + // The stream was unprocessed by the server. + atomic.StoreUint32(&s.unprocessed, 1) + } + statusCode, ok := http2ErrConvTab[f.ErrCode] + if !ok { + warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode) + statusCode = codes.Unknown + } + if statusCode == codes.Canceled { + // Our deadline was already exceeded, and that was likely the cause of + // this cancelation. Alter the status code accordingly. + if d, ok := s.ctx.Deadline(); ok && d.After(time.Now()) { + statusCode = codes.DeadlineExceeded + } + } + t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.Newf(statusCode, "stream terminated by RST_STREAM with error code: %v", f.ErrCode), nil, false) +} + +func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) { + if f.IsAck() { + return + } + var maxStreams *uint32 + var ss []http2.Setting + var updateFuncs []func() + f.ForeachSetting(func(s http2.Setting) error { + switch s.ID { + case http2.SettingMaxConcurrentStreams: + maxStreams = new(uint32) + *maxStreams = s.Val + case http2.SettingMaxHeaderListSize: + updateFuncs = append(updateFuncs, func() { + t.maxSendHeaderListSize = new(uint32) + *t.maxSendHeaderListSize = s.Val + }) + default: + ss = append(ss, s) + } + return nil + }) + if isFirst && maxStreams == nil { + maxStreams = new(uint32) + *maxStreams = math.MaxUint32 + } + sf := &incomingSettings{ + ss: ss, + } + if maxStreams != nil { + updateStreamQuota := func() { + delta := int64(*maxStreams) - int64(t.maxConcurrentStreams) + t.maxConcurrentStreams = *maxStreams + t.streamQuota += delta + if delta > 0 && t.waitingStreams > 0 { + close(t.streamsQuotaAvailable) // wake all of them up. + t.streamsQuotaAvailable = make(chan struct{}, 1) + } + } + updateFuncs = append(updateFuncs, updateStreamQuota) + } + t.controlBuf.executeAndPut(func(interface{}) bool { + for _, f := range updateFuncs { + f() + } + return true + }, sf) +} + +func (t *http2Client) handlePing(f *http2.PingFrame) { + if f.IsAck() { + // Maybe it's a BDP ping. + if t.bdpEst != nil { + t.bdpEst.calculate(f.Data) + } + return + } + pingAck := &ping{ack: true} + copy(pingAck.data[:], f.Data[:]) + t.controlBuf.put(pingAck) +} + +func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { + t.mu.Lock() + if t.state == closing { + t.mu.Unlock() + return + } + if f.ErrCode == http2.ErrCodeEnhanceYourCalm { + infof("Client received GoAway with http2.ErrCodeEnhanceYourCalm.") + } + id := f.LastStreamID + if id > 0 && id%2 != 1 { + t.mu.Unlock() + t.Close() + return + } + // A client can receive multiple GoAways from the server (see + // https://github.com/grpc/grpc-go/issues/1387). The idea is that the first + // GoAway will be sent with an ID of MaxInt32 and the second GoAway will be + // sent after an RTT delay with the ID of the last stream the server will + // process. + // + // Therefore, when we get the first GoAway we don't necessarily close any + // streams. While in case of second GoAway we close all streams created after + // the GoAwayId. This way streams that were in-flight while the GoAway from + // server was being sent don't get killed. + select { + case <-t.goAway: // t.goAway has been closed (i.e.,multiple GoAways). + // If there are multiple GoAways the first one should always have an ID greater than the following ones. + if id > t.prevGoAwayID { + t.mu.Unlock() + t.Close() + return + } + default: + t.setGoAwayReason(f) + close(t.goAway) + t.state = draining + t.controlBuf.put(&incomingGoAway{}) + } + // All streams with IDs greater than the GoAwayId + // and smaller than the previous GoAway ID should be killed. + upperLimit := t.prevGoAwayID + if upperLimit == 0 { // This is the first GoAway Frame. + upperLimit = math.MaxUint32 // Kill all streams after the GoAway ID. + } + for streamID, stream := range t.activeStreams { + if streamID > id && streamID <= upperLimit { + // The stream was unprocessed by the server. + atomic.StoreUint32(&stream.unprocessed, 1) + t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false) + } + } + t.prevGoAwayID = id + active := len(t.activeStreams) + t.mu.Unlock() + if active == 0 { + t.Close() + } +} + +// setGoAwayReason sets the value of t.goAwayReason based +// on the GoAway frame received. +// It expects a lock on transport's mutext to be held by +// the caller. +func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) { + t.goAwayReason = GoAwayNoReason + switch f.ErrCode { + case http2.ErrCodeEnhanceYourCalm: + if string(f.DebugData()) == "too_many_pings" { + t.goAwayReason = GoAwayTooManyPings + } + } +} + +func (t *http2Client) GetGoAwayReason() GoAwayReason { + t.mu.Lock() + defer t.mu.Unlock() + return t.goAwayReason +} + +func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) { + t.controlBuf.put(&incomingWindowUpdate{ + streamID: f.Header().StreamID, + increment: f.Increment, + }) +} + +// operateHeaders takes action on the decoded headers. +func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { + s, ok := t.getStream(frame) + if !ok { + return + } + atomic.StoreUint32(&s.bytesReceived, 1) + var state decodeState + if err := state.decodeHeader(frame); err != nil { + t.closeStream(s, err, true, http2.ErrCodeProtocol, status.New(codes.Internal, err.Error()), nil, false) + // Something wrong. Stops reading even when there is remaining. + return + } + + endStream := frame.StreamEnded() + var isHeader bool + defer func() { + if t.statsHandler != nil { + if isHeader { + inHeader := &stats.InHeader{ + Client: true, + WireLength: int(frame.Header().Length), + } + t.statsHandler.HandleRPC(s.ctx, inHeader) + } else { + inTrailer := &stats.InTrailer{ + Client: true, + WireLength: int(frame.Header().Length), + } + t.statsHandler.HandleRPC(s.ctx, inTrailer) + } + } + }() + // If headers haven't been received yet. + if atomic.SwapUint32(&s.headerDone, 1) == 0 { + if !endStream { + // Headers frame is not actually a trailers-only frame. + isHeader = true + // These values can be set without any synchronization because + // stream goroutine will read it only after seeing a closed + // headerChan which we'll close after setting this. + s.recvCompress = state.encoding + if len(state.mdata) > 0 { + s.header = state.mdata + } + } else { + s.noHeaders = true + } + close(s.headerChan) + } + if !endStream { + return + } + t.closeStream(s, io.EOF, false, http2.ErrCodeNo, state.status(), state.mdata, true) +} + +// reader runs as a separate goroutine in charge of reading data from network +// connection. +// +// TODO(zhaoq): currently one reader per transport. Investigate whether this is +// optimal. +// TODO(zhaoq): Check the validity of the incoming frame sequence. +func (t *http2Client) reader() { + defer close(t.readerDone) + // Check the validity of server preface. + frame, err := t.framer.fr.ReadFrame() + if err != nil { + t.Close() + return + } + if t.keepaliveEnabled { + atomic.CompareAndSwapUint32(&t.activity, 0, 1) + } + sf, ok := frame.(*http2.SettingsFrame) + if !ok { + t.Close() + return + } + t.onSuccess() + t.handleSettings(sf, true) + + // loop to keep reading incoming messages on this transport. + for { + frame, err := t.framer.fr.ReadFrame() + if t.keepaliveEnabled { + atomic.CompareAndSwapUint32(&t.activity, 0, 1) + } + if err != nil { + // Abort an active stream if the http2.Framer returns a + // http2.StreamError. This can happen only if the server's response + // is malformed http2. + if se, ok := err.(http2.StreamError); ok { + t.mu.Lock() + s := t.activeStreams[se.StreamID] + t.mu.Unlock() + if s != nil { + // use error detail to provide better err message + code := http2ErrConvTab[se.Code] + msg := t.framer.fr.ErrorDetail().Error() + t.closeStream(s, status.Error(code, msg), true, http2.ErrCodeProtocol, status.New(code, msg), nil, false) + } + continue + } else { + // Transport error. + t.Close() + return + } + } + switch frame := frame.(type) { + case *http2.MetaHeadersFrame: + t.operateHeaders(frame) + case *http2.DataFrame: + t.handleData(frame) + case *http2.RSTStreamFrame: + t.handleRSTStream(frame) + case *http2.SettingsFrame: + t.handleSettings(frame, false) + case *http2.PingFrame: + t.handlePing(frame) + case *http2.GoAwayFrame: + t.handleGoAway(frame) + case *http2.WindowUpdateFrame: + t.handleWindowUpdate(frame) + default: + errorf("transport: http2Client.reader got unhandled frame type %v.", frame) + } + } +} + +// keepalive running in a separate goroutune makes sure the connection is alive by sending pings. +func (t *http2Client) keepalive() { + p := &ping{data: [8]byte{}} + timer := time.NewTimer(t.kp.Time) + for { + select { + case <-timer.C: + if atomic.CompareAndSwapUint32(&t.activity, 1, 0) { + timer.Reset(t.kp.Time) + continue + } + // Check if keepalive should go dormant. + t.mu.Lock() + if len(t.activeStreams) < 1 && !t.kp.PermitWithoutStream { + // Make awakenKeepalive writable. + <-t.awakenKeepalive + t.mu.Unlock() + select { + case <-t.awakenKeepalive: + // If the control gets here a ping has been sent + // need to reset the timer with keepalive.Timeout. + case <-t.ctx.Done(): + return + } + } else { + t.mu.Unlock() + if channelz.IsOn() { + atomic.AddInt64(&t.czData.kpCount, 1) + } + // Send ping. + t.controlBuf.put(p) + } + + // By the time control gets here a ping has been sent one way or the other. + timer.Reset(t.kp.Timeout) + select { + case <-timer.C: + if atomic.CompareAndSwapUint32(&t.activity, 1, 0) { + timer.Reset(t.kp.Time) + continue + } + t.Close() + return + case <-t.ctx.Done(): + if !timer.Stop() { + <-timer.C + } + return + } + case <-t.ctx.Done(): + if !timer.Stop() { + <-timer.C + } + return + } + } +} + +func (t *http2Client) Error() <-chan struct{} { + return t.ctx.Done() +} + +func (t *http2Client) GoAway() <-chan struct{} { + return t.goAway +} + +func (t *http2Client) ChannelzMetric() *channelz.SocketInternalMetric { + s := channelz.SocketInternalMetric{ + StreamsStarted: atomic.LoadInt64(&t.czData.streamsStarted), + StreamsSucceeded: atomic.LoadInt64(&t.czData.streamsSucceeded), + StreamsFailed: atomic.LoadInt64(&t.czData.streamsFailed), + MessagesSent: atomic.LoadInt64(&t.czData.msgSent), + MessagesReceived: atomic.LoadInt64(&t.czData.msgRecv), + KeepAlivesSent: atomic.LoadInt64(&t.czData.kpCount), + LastLocalStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)), + LastMessageSentTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)), + LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)), + LocalFlowControlWindow: int64(t.fc.getSize()), + SocketOptions: channelz.GetSocketOption(t.conn), + LocalAddr: t.localAddr, + RemoteAddr: t.remoteAddr, + // RemoteName : + } + if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok { + s.Security = au.GetSecurityValue() + } + s.RemoteFlowControlWindow = t.getOutFlowWindow() + return &s +} + +func (t *http2Client) IncrMsgSent() { + atomic.AddInt64(&t.czData.msgSent, 1) + atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano()) +} + +func (t *http2Client) IncrMsgRecv() { + atomic.AddInt64(&t.czData.msgRecv, 1) + atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano()) +} + +func (t *http2Client) getOutFlowWindow() int64 { + resp := make(chan uint32, 1) + timer := time.NewTimer(time.Second) + defer timer.Stop() + t.controlBuf.put(&outFlowControlSizeRequest{resp}) + select { + case sz := <-resp: + return int64(sz) + case <-t.ctxDone: + return -1 + case <-timer.C: + return -2 + } +} diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go new file mode 100644 index 000000000..efb7f53ff --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -0,0 +1,1180 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "bytes" + "errors" + "fmt" + "io" + "math" + "net" + "strconv" + "sync" + "sync/atomic" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" + "google.golang.org/grpc/tap" +) + +var ( + // ErrIllegalHeaderWrite indicates that setting header is illegal because of + // the stream's state. + ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHeader was already called") + // ErrHeaderListSizeLimitViolation indicates that the header list size is larger + // than the limit set by peer. + ErrHeaderListSizeLimitViolation = errors.New("transport: trying to send header list size larger than the limit set by peer") +) + +// http2Server implements the ServerTransport interface with HTTP2. +type http2Server struct { + ctx context.Context + ctxDone <-chan struct{} // Cache the context.Done() chan + cancel context.CancelFunc + conn net.Conn + loopy *loopyWriter + readerDone chan struct{} // sync point to enable testing. + writerDone chan struct{} // sync point to enable testing. + remoteAddr net.Addr + localAddr net.Addr + maxStreamID uint32 // max stream ID ever seen + authInfo credentials.AuthInfo // auth info about the connection + inTapHandle tap.ServerInHandle + framer *framer + // The max number of concurrent streams. + maxStreams uint32 + // controlBuf delivers all the control related tasks (e.g., window + // updates, reset streams, and various settings) to the controller. + controlBuf *controlBuffer + fc *trInFlow + stats stats.Handler + // Flag to keep track of reading activity on transport. + // 1 is true and 0 is false. + activity uint32 // Accessed atomically. + // Keepalive and max-age parameters for the server. + kp keepalive.ServerParameters + + // Keepalive enforcement policy. + kep keepalive.EnforcementPolicy + // The time instance last ping was received. + lastPingAt time.Time + // Number of times the client has violated keepalive ping policy so far. + pingStrikes uint8 + // Flag to signify that number of ping strikes should be reset to 0. + // This is set whenever data or header frames are sent. + // 1 means yes. + resetPingStrikes uint32 // Accessed atomically. + initialWindowSize int32 + bdpEst *bdpEstimator + maxSendHeaderListSize *uint32 + + mu sync.Mutex // guard the following + + // drainChan is initialized when drain(...) is called the first time. + // After which the server writes out the first GoAway(with ID 2^31-1) frame. + // Then an independent goroutine will be launched to later send the second GoAway. + // During this time we don't want to write another first GoAway(with ID 2^31 -1) frame. + // Thus call to drain(...) will be a no-op if drainChan is already initialized since draining is + // already underway. + drainChan chan struct{} + state transportState + activeStreams map[uint32]*Stream + // idle is the time instant when the connection went idle. + // This is either the beginning of the connection or when the number of + // RPCs go down to 0. + // When the connection is busy, this value is set to 0. + idle time.Time + + // Fields below are for channelz metric collection. + channelzID int64 // channelz unique identification number + czData *channelzData +} + +// newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is +// returned if something goes wrong. +func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) { + writeBufSize := config.WriteBufferSize + readBufSize := config.ReadBufferSize + maxHeaderListSize := defaultServerMaxHeaderListSize + if config.MaxHeaderListSize != nil { + maxHeaderListSize = *config.MaxHeaderListSize + } + framer := newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize) + // Send initial settings as connection preface to client. + var isettings []http2.Setting + // TODO(zhaoq): Have a better way to signal "no limit" because 0 is + // permitted in the HTTP2 spec. + maxStreams := config.MaxStreams + if maxStreams == 0 { + maxStreams = math.MaxUint32 + } else { + isettings = append(isettings, http2.Setting{ + ID: http2.SettingMaxConcurrentStreams, + Val: maxStreams, + }) + } + dynamicWindow := true + iwz := int32(initialWindowSize) + if config.InitialWindowSize >= defaultWindowSize { + iwz = config.InitialWindowSize + dynamicWindow = false + } + icwz := int32(initialWindowSize) + if config.InitialConnWindowSize >= defaultWindowSize { + icwz = config.InitialConnWindowSize + dynamicWindow = false + } + if iwz != defaultWindowSize { + isettings = append(isettings, http2.Setting{ + ID: http2.SettingInitialWindowSize, + Val: uint32(iwz)}) + } + if config.MaxHeaderListSize != nil { + isettings = append(isettings, http2.Setting{ + ID: http2.SettingMaxHeaderListSize, + Val: *config.MaxHeaderListSize, + }) + } + if err := framer.fr.WriteSettings(isettings...); err != nil { + return nil, connectionErrorf(false, err, "transport: %v", err) + } + // Adjust the connection flow control window if needed. + if delta := uint32(icwz - defaultWindowSize); delta > 0 { + if err := framer.fr.WriteWindowUpdate(0, delta); err != nil { + return nil, connectionErrorf(false, err, "transport: %v", err) + } + } + kp := config.KeepaliveParams + if kp.MaxConnectionIdle == 0 { + kp.MaxConnectionIdle = defaultMaxConnectionIdle + } + if kp.MaxConnectionAge == 0 { + kp.MaxConnectionAge = defaultMaxConnectionAge + } + // Add a jitter to MaxConnectionAge. + kp.MaxConnectionAge += getJitter(kp.MaxConnectionAge) + if kp.MaxConnectionAgeGrace == 0 { + kp.MaxConnectionAgeGrace = defaultMaxConnectionAgeGrace + } + if kp.Time == 0 { + kp.Time = defaultServerKeepaliveTime + } + if kp.Timeout == 0 { + kp.Timeout = defaultServerKeepaliveTimeout + } + kep := config.KeepalivePolicy + if kep.MinTime == 0 { + kep.MinTime = defaultKeepalivePolicyMinTime + } + ctx, cancel := context.WithCancel(context.Background()) + t := &http2Server{ + ctx: ctx, + cancel: cancel, + ctxDone: ctx.Done(), + conn: conn, + remoteAddr: conn.RemoteAddr(), + localAddr: conn.LocalAddr(), + authInfo: config.AuthInfo, + framer: framer, + readerDone: make(chan struct{}), + writerDone: make(chan struct{}), + maxStreams: maxStreams, + inTapHandle: config.InTapHandle, + fc: &trInFlow{limit: uint32(icwz)}, + state: reachable, + activeStreams: make(map[uint32]*Stream), + stats: config.StatsHandler, + kp: kp, + idle: time.Now(), + kep: kep, + initialWindowSize: iwz, + czData: new(channelzData), + } + t.controlBuf = newControlBuffer(t.ctxDone) + if dynamicWindow { + t.bdpEst = &bdpEstimator{ + bdp: initialWindowSize, + updateFlowControl: t.updateFlowControl, + } + } + if t.stats != nil { + t.ctx = t.stats.TagConn(t.ctx, &stats.ConnTagInfo{ + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + }) + connBegin := &stats.ConnBegin{} + t.stats.HandleConn(t.ctx, connBegin) + } + if channelz.IsOn() { + t.channelzID = channelz.RegisterNormalSocket(t, config.ChannelzParentID, "") + } + t.framer.writer.Flush() + + defer func() { + if err != nil { + t.Close() + } + }() + + // Check the validity of client preface. + preface := make([]byte, len(clientPreface)) + if _, err := io.ReadFull(t.conn, preface); err != nil { + return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to receive the preface from client: %v", err) + } + if !bytes.Equal(preface, clientPreface) { + return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams received bogus greeting from client: %q", preface) + } + + frame, err := t.framer.fr.ReadFrame() + if err == io.EOF || err == io.ErrUnexpectedEOF { + return nil, err + } + if err != nil { + return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to read initial settings frame: %v", err) + } + atomic.StoreUint32(&t.activity, 1) + sf, ok := frame.(*http2.SettingsFrame) + if !ok { + return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams saw invalid preface type %T from client", frame) + } + t.handleSettings(sf) + + go func() { + t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst) + t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler + if err := t.loopy.run(); err != nil { + errorf("transport: loopyWriter.run returning. Err: %v", err) + } + t.conn.Close() + close(t.writerDone) + }() + go t.keepalive() + return t, nil +} + +// operateHeader takes action on the decoded headers. +func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (fatal bool) { + streamID := frame.Header().StreamID + state := decodeState{serverSide: true} + if err := state.decodeHeader(frame); err != nil { + if se, ok := status.FromError(err); ok { + t.controlBuf.put(&cleanupStream{ + streamID: streamID, + rst: true, + rstCode: statusCodeConvTab[se.Code()], + onWrite: func() {}, + }) + } + return false + } + + buf := newRecvBuffer() + s := &Stream{ + id: streamID, + st: t, + buf: buf, + fc: &inFlow{limit: uint32(t.initialWindowSize)}, + recvCompress: state.encoding, + method: state.method, + contentSubtype: state.contentSubtype, + } + if frame.StreamEnded() { + // s is just created by the caller. No lock needed. + s.state = streamReadDone + } + if state.timeoutSet { + s.ctx, s.cancel = context.WithTimeout(t.ctx, state.timeout) + } else { + s.ctx, s.cancel = context.WithCancel(t.ctx) + } + pr := &peer.Peer{ + Addr: t.remoteAddr, + } + // Attach Auth info if there is any. + if t.authInfo != nil { + pr.AuthInfo = t.authInfo + } + s.ctx = peer.NewContext(s.ctx, pr) + // Attach the received metadata to the context. + if len(state.mdata) > 0 { + s.ctx = metadata.NewIncomingContext(s.ctx, state.mdata) + } + if state.statsTags != nil { + s.ctx = stats.SetIncomingTags(s.ctx, state.statsTags) + } + if state.statsTrace != nil { + s.ctx = stats.SetIncomingTrace(s.ctx, state.statsTrace) + } + if t.inTapHandle != nil { + var err error + info := &tap.Info{ + FullMethodName: state.method, + } + s.ctx, err = t.inTapHandle(s.ctx, info) + if err != nil { + warningf("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err) + t.controlBuf.put(&cleanupStream{ + streamID: s.id, + rst: true, + rstCode: http2.ErrCodeRefusedStream, + onWrite: func() {}, + }) + return false + } + } + t.mu.Lock() + if t.state != reachable { + t.mu.Unlock() + return false + } + if uint32(len(t.activeStreams)) >= t.maxStreams { + t.mu.Unlock() + t.controlBuf.put(&cleanupStream{ + streamID: streamID, + rst: true, + rstCode: http2.ErrCodeRefusedStream, + onWrite: func() {}, + }) + return false + } + if streamID%2 != 1 || streamID <= t.maxStreamID { + t.mu.Unlock() + // illegal gRPC stream id. + errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID) + return true + } + t.maxStreamID = streamID + t.activeStreams[streamID] = s + if len(t.activeStreams) == 1 { + t.idle = time.Time{} + } + t.mu.Unlock() + if channelz.IsOn() { + atomic.AddInt64(&t.czData.streamsStarted, 1) + atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano()) + } + s.requestRead = func(n int) { + t.adjustWindow(s, uint32(n)) + } + s.ctx = traceCtx(s.ctx, s.method) + if t.stats != nil { + s.ctx = t.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) + inHeader := &stats.InHeader{ + FullMethod: s.method, + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + Compression: s.recvCompress, + WireLength: int(frame.Header().Length), + } + t.stats.HandleRPC(s.ctx, inHeader) + } + s.ctxDone = s.ctx.Done() + s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone) + s.trReader = &transportReader{ + reader: &recvBufferReader{ + ctx: s.ctx, + ctxDone: s.ctxDone, + recv: s.buf, + }, + windowHandler: func(n int) { + t.updateWindow(s, uint32(n)) + }, + } + // Register the stream with loopy. + t.controlBuf.put(®isterStream{ + streamID: s.id, + wq: s.wq, + }) + handle(s) + return false +} + +// HandleStreams receives incoming streams using the given handler. This is +// typically run in a separate goroutine. +// traceCtx attaches trace to ctx and returns the new context. +func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.Context, string) context.Context) { + defer close(t.readerDone) + for { + frame, err := t.framer.fr.ReadFrame() + atomic.StoreUint32(&t.activity, 1) + if err != nil { + if se, ok := err.(http2.StreamError); ok { + warningf("transport: http2Server.HandleStreams encountered http2.StreamError: %v", se) + t.mu.Lock() + s := t.activeStreams[se.StreamID] + t.mu.Unlock() + if s != nil { + t.closeStream(s, true, se.Code, nil, false) + } else { + t.controlBuf.put(&cleanupStream{ + streamID: se.StreamID, + rst: true, + rstCode: se.Code, + onWrite: func() {}, + }) + } + continue + } + if err == io.EOF || err == io.ErrUnexpectedEOF { + t.Close() + return + } + warningf("transport: http2Server.HandleStreams failed to read frame: %v", err) + t.Close() + return + } + switch frame := frame.(type) { + case *http2.MetaHeadersFrame: + if t.operateHeaders(frame, handle, traceCtx) { + t.Close() + break + } + case *http2.DataFrame: + t.handleData(frame) + case *http2.RSTStreamFrame: + t.handleRSTStream(frame) + case *http2.SettingsFrame: + t.handleSettings(frame) + case *http2.PingFrame: + t.handlePing(frame) + case *http2.WindowUpdateFrame: + t.handleWindowUpdate(frame) + case *http2.GoAwayFrame: + // TODO: Handle GoAway from the client appropriately. + default: + errorf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame) + } + } +} + +func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) { + t.mu.Lock() + defer t.mu.Unlock() + if t.activeStreams == nil { + // The transport is closing. + return nil, false + } + s, ok := t.activeStreams[f.Header().StreamID] + if !ok { + // The stream is already done. + return nil, false + } + return s, true +} + +// adjustWindow sends out extra window update over the initial window size +// of stream if the application is requesting data larger in size than +// the window. +func (t *http2Server) adjustWindow(s *Stream, n uint32) { + if w := s.fc.maybeAdjust(n); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) + } + +} + +// updateWindow adjusts the inbound quota for the stream and the transport. +// Window updates will deliver to the controller for sending when +// the cumulative quota exceeds the corresponding threshold. +func (t *http2Server) updateWindow(s *Stream, n uint32) { + if w := s.fc.onRead(n); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, + increment: w, + }) + } +} + +// updateFlowControl updates the incoming flow control windows +// for the transport and the stream based on the current bdp +// estimation. +func (t *http2Server) updateFlowControl(n uint32) { + t.mu.Lock() + for _, s := range t.activeStreams { + s.fc.newLimit(n) + } + t.initialWindowSize = int32(n) + t.mu.Unlock() + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: t.fc.newLimit(n), + }) + t.controlBuf.put(&outgoingSettings{ + ss: []http2.Setting{ + { + ID: http2.SettingInitialWindowSize, + Val: n, + }, + }, + }) + +} + +func (t *http2Server) handleData(f *http2.DataFrame) { + size := f.Header().Length + var sendBDPPing bool + if t.bdpEst != nil { + sendBDPPing = t.bdpEst.add(size) + } + // Decouple connection's flow control from application's read. + // An update on connection's flow control should not depend on + // whether user application has read the data or not. Such a + // restriction is already imposed on the stream's flow control, + // and therefore the sender will be blocked anyways. + // Decoupling the connection flow control will prevent other + // active(fast) streams from starving in presence of slow or + // inactive streams. + if w := t.fc.onData(size); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: w, + }) + } + if sendBDPPing { + // Avoid excessive ping detection (e.g. in an L7 proxy) + // by sending a window update prior to the BDP ping. + if w := t.fc.reset(); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: w, + }) + } + t.controlBuf.put(bdpPing) + } + // Select the right stream to dispatch. + s, ok := t.getStream(f) + if !ok { + return + } + if size > 0 { + if err := s.fc.onData(size); err != nil { + t.closeStream(s, true, http2.ErrCodeFlowControl, nil, false) + return + } + if f.Header().Flags.Has(http2.FlagDataPadded) { + if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{s.id, w}) + } + } + // TODO(bradfitz, zhaoq): A copy is required here because there is no + // guarantee f.Data() is consumed before the arrival of next frame. + // Can this copy be eliminated? + if len(f.Data()) > 0 { + data := make([]byte, len(f.Data())) + copy(data, f.Data()) + s.write(recvMsg{data: data}) + } + } + if f.Header().Flags.Has(http2.FlagDataEndStream) { + // Received the end of stream from the client. + s.compareAndSwapState(streamActive, streamReadDone) + s.write(recvMsg{err: io.EOF}) + } +} + +func (t *http2Server) handleRSTStream(f *http2.RSTStreamFrame) { + s, ok := t.getStream(f) + if !ok { + return + } + t.closeStream(s, false, 0, nil, false) +} + +func (t *http2Server) handleSettings(f *http2.SettingsFrame) { + if f.IsAck() { + return + } + var ss []http2.Setting + var updateFuncs []func() + f.ForeachSetting(func(s http2.Setting) error { + switch s.ID { + case http2.SettingMaxHeaderListSize: + updateFuncs = append(updateFuncs, func() { + t.maxSendHeaderListSize = new(uint32) + *t.maxSendHeaderListSize = s.Val + }) + default: + ss = append(ss, s) + } + return nil + }) + t.controlBuf.executeAndPut(func(interface{}) bool { + for _, f := range updateFuncs { + f() + } + return true + }, &incomingSettings{ + ss: ss, + }) +} + +const ( + maxPingStrikes = 2 + defaultPingTimeout = 2 * time.Hour +) + +func (t *http2Server) handlePing(f *http2.PingFrame) { + if f.IsAck() { + if f.Data == goAwayPing.data && t.drainChan != nil { + close(t.drainChan) + return + } + // Maybe it's a BDP ping. + if t.bdpEst != nil { + t.bdpEst.calculate(f.Data) + } + return + } + pingAck := &ping{ack: true} + copy(pingAck.data[:], f.Data[:]) + t.controlBuf.put(pingAck) + + now := time.Now() + defer func() { + t.lastPingAt = now + }() + // A reset ping strikes means that we don't need to check for policy + // violation for this ping and the pingStrikes counter should be set + // to 0. + if atomic.CompareAndSwapUint32(&t.resetPingStrikes, 1, 0) { + t.pingStrikes = 0 + return + } + t.mu.Lock() + ns := len(t.activeStreams) + t.mu.Unlock() + if ns < 1 && !t.kep.PermitWithoutStream { + // Keepalive shouldn't be active thus, this new ping should + // have come after at least defaultPingTimeout. + if t.lastPingAt.Add(defaultPingTimeout).After(now) { + t.pingStrikes++ + } + } else { + // Check if keepalive policy is respected. + if t.lastPingAt.Add(t.kep.MinTime).After(now) { + t.pingStrikes++ + } + } + + if t.pingStrikes > maxPingStrikes { + // Send goaway and close the connection. + errorf("transport: Got too many pings from the client, closing the connection.") + t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: true}) + } +} + +func (t *http2Server) handleWindowUpdate(f *http2.WindowUpdateFrame) { + t.controlBuf.put(&incomingWindowUpdate{ + streamID: f.Header().StreamID, + increment: f.Increment, + }) +} + +func appendHeaderFieldsFromMD(headerFields []hpack.HeaderField, md metadata.MD) []hpack.HeaderField { + for k, vv := range md { + if isReservedHeader(k) { + // Clients don't tolerate reading restricted headers after some non restricted ones were sent. + continue + } + for _, v := range vv { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + } + return headerFields +} + +func (t *http2Server) checkForHeaderListSize(it interface{}) bool { + if t.maxSendHeaderListSize == nil { + return true + } + hdrFrame := it.(*headerFrame) + var sz int64 + for _, f := range hdrFrame.hf { + if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) { + errorf("header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize) + return false + } + } + return true +} + +// WriteHeader sends the header metedata md back to the client. +func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { + if s.updateHeaderSent() || s.getState() == streamDone { + return ErrIllegalHeaderWrite + } + s.hdrMu.Lock() + if md.Len() > 0 { + if s.header.Len() > 0 { + s.header = metadata.Join(s.header, md) + } else { + s.header = md + } + } + if err := t.writeHeaderLocked(s); err != nil { + s.hdrMu.Unlock() + return err + } + s.hdrMu.Unlock() + return nil +} + +func (t *http2Server) writeHeaderLocked(s *Stream) error { + // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields + // first and create a slice of that exact size. + headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else. + headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(s.contentSubtype)}) + if s.sendCompress != "" { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress}) + } + headerFields = appendHeaderFieldsFromMD(headerFields, s.header) + success, err := t.controlBuf.executeAndPut(t.checkForHeaderListSize, &headerFrame{ + streamID: s.id, + hf: headerFields, + endStream: false, + onWrite: func() { + atomic.StoreUint32(&t.resetPingStrikes, 1) + }, + }) + if !success { + if err != nil { + return err + } + t.closeStream(s, true, http2.ErrCodeInternal, nil, false) + return ErrHeaderListSizeLimitViolation + } + if t.stats != nil { + // Note: WireLength is not set in outHeader. + // TODO(mmukhi): Revisit this later, if needed. + outHeader := &stats.OutHeader{} + t.stats.HandleRPC(s.Context(), outHeader) + } + return nil +} + +// WriteStatus sends stream status to the client and terminates the stream. +// There is no further I/O operations being able to perform on this stream. +// TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early +// OK is adopted. +func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { + if s.getState() == streamDone { + return nil + } + s.hdrMu.Lock() + // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields + // first and create a slice of that exact size. + headerFields := make([]hpack.HeaderField, 0, 2) // grpc-status and grpc-message will be there if none else. + if !s.updateHeaderSent() { // No headers have been sent. + if len(s.header) > 0 { // Send a separate header frame. + if err := t.writeHeaderLocked(s); err != nil { + s.hdrMu.Unlock() + return err + } + } else { // Send a trailer only response. + headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(s.contentSubtype)}) + } + } + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status", Value: strconv.Itoa(int(st.Code()))}) + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())}) + + if p := st.Proto(); p != nil && len(p.Details) > 0 { + stBytes, err := proto.Marshal(p) + if err != nil { + // TODO: return error instead, when callers are able to handle it. + grpclog.Errorf("transport: failed to marshal rpc status: %v, error: %v", p, err) + } else { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)}) + } + } + + // Attach the trailer metadata. + headerFields = appendHeaderFieldsFromMD(headerFields, s.trailer) + trailingHeader := &headerFrame{ + streamID: s.id, + hf: headerFields, + endStream: true, + onWrite: func() { + atomic.StoreUint32(&t.resetPingStrikes, 1) + }, + } + s.hdrMu.Unlock() + success, err := t.controlBuf.execute(t.checkForHeaderListSize, trailingHeader) + if !success { + if err != nil { + return err + } + t.closeStream(s, true, http2.ErrCodeInternal, nil, false) + return ErrHeaderListSizeLimitViolation + } + t.closeStream(s, false, 0, trailingHeader, true) + if t.stats != nil { + t.stats.HandleRPC(s.Context(), &stats.OutTrailer{}) + } + return nil +} + +// Write converts the data into HTTP2 data frame and sends it out. Non-nil error +// is returns if it fails (e.g., framing error, transport error). +func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { + if !s.isHeaderSent() { // Headers haven't been written yet. + if err := t.WriteHeader(s, nil); err != nil { + // TODO(mmukhi, dfawley): Make sure this is the right code to return. + return status.Errorf(codes.Internal, "transport: %v", err) + } + } else { + // Writing headers checks for this condition. + if s.getState() == streamDone { + // TODO(mmukhi, dfawley): Should the server write also return io.EOF? + s.cancel() + select { + case <-t.ctx.Done(): + return ErrConnClosing + default: + } + return ContextErr(s.ctx.Err()) + } + } + // Add some data to header frame so that we can equally distribute bytes across frames. + emptyLen := http2MaxFrameLen - len(hdr) + if emptyLen > len(data) { + emptyLen = len(data) + } + hdr = append(hdr, data[:emptyLen]...) + data = data[emptyLen:] + df := &dataFrame{ + streamID: s.id, + h: hdr, + d: data, + onEachWrite: func() { + atomic.StoreUint32(&t.resetPingStrikes, 1) + }, + } + if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { + select { + case <-t.ctx.Done(): + return ErrConnClosing + default: + } + return ContextErr(s.ctx.Err()) + } + return t.controlBuf.put(df) +} + +// keepalive running in a separate goroutine does the following: +// 1. Gracefully closes an idle connection after a duration of keepalive.MaxConnectionIdle. +// 2. Gracefully closes any connection after a duration of keepalive.MaxConnectionAge. +// 3. Forcibly closes a connection after an additive period of keepalive.MaxConnectionAgeGrace over keepalive.MaxConnectionAge. +// 4. Makes sure a connection is alive by sending pings with a frequency of keepalive.Time and closes a non-responsive connection +// after an additional duration of keepalive.Timeout. +func (t *http2Server) keepalive() { + p := &ping{} + var pingSent bool + maxIdle := time.NewTimer(t.kp.MaxConnectionIdle) + maxAge := time.NewTimer(t.kp.MaxConnectionAge) + keepalive := time.NewTimer(t.kp.Time) + // NOTE: All exit paths of this function should reset their + // respective timers. A failure to do so will cause the + // following clean-up to deadlock and eventually leak. + defer func() { + if !maxIdle.Stop() { + <-maxIdle.C + } + if !maxAge.Stop() { + <-maxAge.C + } + if !keepalive.Stop() { + <-keepalive.C + } + }() + for { + select { + case <-maxIdle.C: + t.mu.Lock() + idle := t.idle + if idle.IsZero() { // The connection is non-idle. + t.mu.Unlock() + maxIdle.Reset(t.kp.MaxConnectionIdle) + continue + } + val := t.kp.MaxConnectionIdle - time.Since(idle) + t.mu.Unlock() + if val <= 0 { + // The connection has been idle for a duration of keepalive.MaxConnectionIdle or more. + // Gracefully close the connection. + t.drain(http2.ErrCodeNo, []byte{}) + // Resetting the timer so that the clean-up doesn't deadlock. + maxIdle.Reset(infinity) + return + } + maxIdle.Reset(val) + case <-maxAge.C: + t.drain(http2.ErrCodeNo, []byte{}) + maxAge.Reset(t.kp.MaxConnectionAgeGrace) + select { + case <-maxAge.C: + // Close the connection after grace period. + t.Close() + // Resetting the timer so that the clean-up doesn't deadlock. + maxAge.Reset(infinity) + case <-t.ctx.Done(): + } + return + case <-keepalive.C: + if atomic.CompareAndSwapUint32(&t.activity, 1, 0) { + pingSent = false + keepalive.Reset(t.kp.Time) + continue + } + if pingSent { + t.Close() + // Resetting the timer so that the clean-up doesn't deadlock. + keepalive.Reset(infinity) + return + } + pingSent = true + if channelz.IsOn() { + atomic.AddInt64(&t.czData.kpCount, 1) + } + t.controlBuf.put(p) + keepalive.Reset(t.kp.Timeout) + case <-t.ctx.Done(): + return + } + } +} + +// Close starts shutting down the http2Server transport. +// TODO(zhaoq): Now the destruction is not blocked on any pending streams. This +// could cause some resource issue. Revisit this later. +func (t *http2Server) Close() error { + t.mu.Lock() + if t.state == closing { + t.mu.Unlock() + return errors.New("transport: Close() was already called") + } + t.state = closing + streams := t.activeStreams + t.activeStreams = nil + t.mu.Unlock() + t.controlBuf.finish() + t.cancel() + err := t.conn.Close() + if channelz.IsOn() { + channelz.RemoveEntry(t.channelzID) + } + // Cancel all active streams. + for _, s := range streams { + s.cancel() + } + if t.stats != nil { + connEnd := &stats.ConnEnd{} + t.stats.HandleConn(t.ctx, connEnd) + } + return err +} + +// closeStream clears the footprint of a stream when the stream is not needed +// any more. +func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) { + if s.swapState(streamDone) == streamDone { + // If the stream was already done, return. + return + } + // In case stream sending and receiving are invoked in separate + // goroutines (e.g., bi-directional streaming), cancel needs to be + // called to interrupt the potential blocking on other goroutines. + s.cancel() + cleanup := &cleanupStream{ + streamID: s.id, + rst: rst, + rstCode: rstCode, + onWrite: func() { + t.mu.Lock() + if t.activeStreams != nil { + delete(t.activeStreams, s.id) + if len(t.activeStreams) == 0 { + t.idle = time.Now() + } + } + t.mu.Unlock() + if channelz.IsOn() { + if eosReceived { + atomic.AddInt64(&t.czData.streamsSucceeded, 1) + } else { + atomic.AddInt64(&t.czData.streamsFailed, 1) + } + } + }, + } + if hdr != nil { + hdr.cleanup = cleanup + t.controlBuf.put(hdr) + } else { + t.controlBuf.put(cleanup) + } +} + +func (t *http2Server) RemoteAddr() net.Addr { + return t.remoteAddr +} + +func (t *http2Server) Drain() { + t.drain(http2.ErrCodeNo, []byte{}) +} + +func (t *http2Server) drain(code http2.ErrCode, debugData []byte) { + t.mu.Lock() + defer t.mu.Unlock() + if t.drainChan != nil { + return + } + t.drainChan = make(chan struct{}) + t.controlBuf.put(&goAway{code: code, debugData: debugData, headsUp: true}) +} + +var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}} + +// Handles outgoing GoAway and returns true if loopy needs to put itself +// in draining mode. +func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { + t.mu.Lock() + if t.state == closing { // TODO(mmukhi): This seems unnecessary. + t.mu.Unlock() + // The transport is closing. + return false, ErrConnClosing + } + sid := t.maxStreamID + if !g.headsUp { + // Stop accepting more streams now. + t.state = draining + if len(t.activeStreams) == 0 { + g.closeConn = true + } + t.mu.Unlock() + if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil { + return false, err + } + if g.closeConn { + // Abruptly close the connection following the GoAway (via + // loopywriter). But flush out what's inside the buffer first. + t.framer.writer.Flush() + return false, fmt.Errorf("transport: Connection closing") + } + return true, nil + } + t.mu.Unlock() + // For a graceful close, send out a GoAway with stream ID of MaxUInt32, + // Follow that with a ping and wait for the ack to come back or a timer + // to expire. During this time accept new streams since they might have + // originated before the GoAway reaches the client. + // After getting the ack or timer expiration send out another GoAway this + // time with an ID of the max stream server intends to process. + if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, []byte{}); err != nil { + return false, err + } + if err := t.framer.fr.WritePing(false, goAwayPing.data); err != nil { + return false, err + } + go func() { + timer := time.NewTimer(time.Minute) + defer timer.Stop() + select { + case <-t.drainChan: + case <-timer.C: + case <-t.ctx.Done(): + return + } + t.controlBuf.put(&goAway{code: g.code, debugData: g.debugData}) + }() + return false, nil +} + +func (t *http2Server) ChannelzMetric() *channelz.SocketInternalMetric { + s := channelz.SocketInternalMetric{ + StreamsStarted: atomic.LoadInt64(&t.czData.streamsStarted), + StreamsSucceeded: atomic.LoadInt64(&t.czData.streamsSucceeded), + StreamsFailed: atomic.LoadInt64(&t.czData.streamsFailed), + MessagesSent: atomic.LoadInt64(&t.czData.msgSent), + MessagesReceived: atomic.LoadInt64(&t.czData.msgRecv), + KeepAlivesSent: atomic.LoadInt64(&t.czData.kpCount), + LastRemoteStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)), + LastMessageSentTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)), + LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)), + LocalFlowControlWindow: int64(t.fc.getSize()), + SocketOptions: channelz.GetSocketOption(t.conn), + LocalAddr: t.localAddr, + RemoteAddr: t.remoteAddr, + // RemoteName : + } + if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok { + s.Security = au.GetSecurityValue() + } + s.RemoteFlowControlWindow = t.getOutFlowWindow() + return &s +} + +func (t *http2Server) IncrMsgSent() { + atomic.AddInt64(&t.czData.msgSent, 1) + atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano()) +} + +func (t *http2Server) IncrMsgRecv() { + atomic.AddInt64(&t.czData.msgRecv, 1) + atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano()) +} + +func (t *http2Server) getOutFlowWindow() int64 { + resp := make(chan uint32) + timer := time.NewTimer(time.Second) + defer timer.Stop() + t.controlBuf.put(&outFlowControlSizeRequest{resp}) + select { + case sz := <-resp: + return int64(sz) + case <-t.ctxDone: + return -1 + case <-timer.C: + return -2 + } +} + +func getJitter(v time.Duration) time.Duration { + if v == infinity { + return 0 + } + // Generate a jitter between +/- 10% of the value. + r := int64(v / 10) + j := grpcrand.Int63n(2*r) - r + return time.Duration(j) +} diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go new file mode 100644 index 000000000..21da6e80b --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -0,0 +1,613 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "bufio" + "bytes" + "encoding/base64" + "fmt" + "io" + "net" + "net/http" + "strconv" + "strings" + "time" + "unicode/utf8" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" + spb "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +const ( + // http2MaxFrameLen specifies the max length of a HTTP2 frame. + http2MaxFrameLen = 16384 // 16KB frame + // http://http2.github.io/http2-spec/#SettingValues + http2InitHeaderTableSize = 4096 + // baseContentType is the base content-type for gRPC. This is a valid + // content-type on it's own, but can also include a content-subtype such as + // "proto" as a suffix after "+" or ";". See + // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests + // for more details. + baseContentType = "application/grpc" +) + +var ( + clientPreface = []byte(http2.ClientPreface) + http2ErrConvTab = map[http2.ErrCode]codes.Code{ + http2.ErrCodeNo: codes.Internal, + http2.ErrCodeProtocol: codes.Internal, + http2.ErrCodeInternal: codes.Internal, + http2.ErrCodeFlowControl: codes.ResourceExhausted, + http2.ErrCodeSettingsTimeout: codes.Internal, + http2.ErrCodeStreamClosed: codes.Internal, + http2.ErrCodeFrameSize: codes.Internal, + http2.ErrCodeRefusedStream: codes.Unavailable, + http2.ErrCodeCancel: codes.Canceled, + http2.ErrCodeCompression: codes.Internal, + http2.ErrCodeConnect: codes.Internal, + http2.ErrCodeEnhanceYourCalm: codes.ResourceExhausted, + http2.ErrCodeInadequateSecurity: codes.PermissionDenied, + http2.ErrCodeHTTP11Required: codes.Internal, + } + statusCodeConvTab = map[codes.Code]http2.ErrCode{ + codes.Internal: http2.ErrCodeInternal, + codes.Canceled: http2.ErrCodeCancel, + codes.Unavailable: http2.ErrCodeRefusedStream, + codes.ResourceExhausted: http2.ErrCodeEnhanceYourCalm, + codes.PermissionDenied: http2.ErrCodeInadequateSecurity, + } + httpStatusConvTab = map[int]codes.Code{ + // 400 Bad Request - INTERNAL. + http.StatusBadRequest: codes.Internal, + // 401 Unauthorized - UNAUTHENTICATED. + http.StatusUnauthorized: codes.Unauthenticated, + // 403 Forbidden - PERMISSION_DENIED. + http.StatusForbidden: codes.PermissionDenied, + // 404 Not Found - UNIMPLEMENTED. + http.StatusNotFound: codes.Unimplemented, + // 429 Too Many Requests - UNAVAILABLE. + http.StatusTooManyRequests: codes.Unavailable, + // 502 Bad Gateway - UNAVAILABLE. + http.StatusBadGateway: codes.Unavailable, + // 503 Service Unavailable - UNAVAILABLE. + http.StatusServiceUnavailable: codes.Unavailable, + // 504 Gateway timeout - UNAVAILABLE. + http.StatusGatewayTimeout: codes.Unavailable, + } +) + +// Records the states during HPACK decoding. Must be reset once the +// decoding of the entire headers are finished. +type decodeState struct { + encoding string + // statusGen caches the stream status received from the trailer the server + // sent. Client side only. Do not access directly. After all trailers are + // parsed, use the status method to retrieve the status. + statusGen *status.Status + // rawStatusCode and rawStatusMsg are set from the raw trailer fields and are not + // intended for direct access outside of parsing. + rawStatusCode *int + rawStatusMsg string + httpStatus *int + // Server side only fields. + timeoutSet bool + timeout time.Duration + method string + // key-value metadata map from the peer. + mdata map[string][]string + statsTags []byte + statsTrace []byte + contentSubtype string + // whether decoding on server side or not + serverSide bool +} + +// isReservedHeader checks whether hdr belongs to HTTP2 headers +// reserved by gRPC protocol. Any other headers are classified as the +// user-specified metadata. +func isReservedHeader(hdr string) bool { + if hdr != "" && hdr[0] == ':' { + return true + } + switch hdr { + case "content-type", + "user-agent", + "grpc-message-type", + "grpc-encoding", + "grpc-message", + "grpc-status", + "grpc-timeout", + "grpc-status-details-bin", + // Intentionally exclude grpc-previous-rpc-attempts and + // grpc-retry-pushback-ms, which are "reserved", but their API + // intentionally works via metadata. + "te": + return true + default: + return false + } +} + +// isWhitelistedHeader checks whether hdr should be propagated into metadata +// visible to users, even though it is classified as "reserved", above. +func isWhitelistedHeader(hdr string) bool { + switch hdr { + case ":authority", "user-agent": + return true + default: + return false + } +} + +// contentSubtype returns the content-subtype for the given content-type. The +// given content-type must be a valid content-type that starts with +// "application/grpc". A content-subtype will follow "application/grpc" after a +// "+" or ";". See +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +// +// If contentType is not a valid content-type for gRPC, the boolean +// will be false, otherwise true. If content-type == "application/grpc", +// "application/grpc+", or "application/grpc;", the boolean will be true, +// but no content-subtype will be returned. +// +// contentType is assumed to be lowercase already. +func contentSubtype(contentType string) (string, bool) { + if contentType == baseContentType { + return "", true + } + if !strings.HasPrefix(contentType, baseContentType) { + return "", false + } + // guaranteed since != baseContentType and has baseContentType prefix + switch contentType[len(baseContentType)] { + case '+', ';': + // this will return true for "application/grpc+" or "application/grpc;" + // which the previous validContentType function tested to be valid, so we + // just say that no content-subtype is specified in this case + return contentType[len(baseContentType)+1:], true + default: + return "", false + } +} + +// contentSubtype is assumed to be lowercase +func contentType(contentSubtype string) string { + if contentSubtype == "" { + return baseContentType + } + return baseContentType + "+" + contentSubtype +} + +func (d *decodeState) status() *status.Status { + if d.statusGen == nil { + // No status-details were provided; generate status using code/msg. + d.statusGen = status.New(codes.Code(int32(*(d.rawStatusCode))), d.rawStatusMsg) + } + return d.statusGen +} + +const binHdrSuffix = "-bin" + +func encodeBinHeader(v []byte) string { + return base64.RawStdEncoding.EncodeToString(v) +} + +func decodeBinHeader(v string) ([]byte, error) { + if len(v)%4 == 0 { + // Input was padded, or padding was not necessary. + return base64.StdEncoding.DecodeString(v) + } + return base64.RawStdEncoding.DecodeString(v) +} + +func encodeMetadataHeader(k, v string) string { + if strings.HasSuffix(k, binHdrSuffix) { + return encodeBinHeader(([]byte)(v)) + } + return v +} + +func decodeMetadataHeader(k, v string) (string, error) { + if strings.HasSuffix(k, binHdrSuffix) { + b, err := decodeBinHeader(v) + return string(b), err + } + return v, nil +} + +func (d *decodeState) decodeHeader(frame *http2.MetaHeadersFrame) error { + // frame.Truncated is set to true when framer detects that the current header + // list size hits MaxHeaderListSize limit. + if frame.Truncated { + return status.Error(codes.Internal, "peer header list size exceeded limit") + } + for _, hf := range frame.Fields { + if err := d.processHeaderField(hf); err != nil { + return err + } + } + + if d.serverSide { + return nil + } + + // If grpc status exists, no need to check further. + if d.rawStatusCode != nil || d.statusGen != nil { + return nil + } + + // If grpc status doesn't exist and http status doesn't exist, + // then it's a malformed header. + if d.httpStatus == nil { + return status.Error(codes.Internal, "malformed header: doesn't contain status(gRPC or HTTP)") + } + + if *(d.httpStatus) != http.StatusOK { + code, ok := httpStatusConvTab[*(d.httpStatus)] + if !ok { + code = codes.Unknown + } + return status.Error(code, http.StatusText(*(d.httpStatus))) + } + + // gRPC status doesn't exist and http status is OK. + // Set rawStatusCode to be unknown and return nil error. + // So that, if the stream has ended this Unknown status + // will be propagated to the user. + // Otherwise, it will be ignored. In which case, status from + // a later trailer, that has StreamEnded flag set, is propagated. + code := int(codes.Unknown) + d.rawStatusCode = &code + return nil +} + +func (d *decodeState) addMetadata(k, v string) { + if d.mdata == nil { + d.mdata = make(map[string][]string) + } + d.mdata[k] = append(d.mdata[k], v) +} + +func (d *decodeState) processHeaderField(f hpack.HeaderField) error { + switch f.Name { + case "content-type": + contentSubtype, validContentType := contentSubtype(f.Value) + if !validContentType { + return status.Errorf(codes.Internal, "transport: received the unexpected content-type %q", f.Value) + } + d.contentSubtype = contentSubtype + // TODO: do we want to propagate the whole content-type in the metadata, + // or come up with a way to just propagate the content-subtype if it was set? + // ie {"content-type": "application/grpc+proto"} or {"content-subtype": "proto"} + // in the metadata? + d.addMetadata(f.Name, f.Value) + case "grpc-encoding": + d.encoding = f.Value + case "grpc-status": + code, err := strconv.Atoi(f.Value) + if err != nil { + return status.Errorf(codes.Internal, "transport: malformed grpc-status: %v", err) + } + d.rawStatusCode = &code + case "grpc-message": + d.rawStatusMsg = decodeGrpcMessage(f.Value) + case "grpc-status-details-bin": + v, err := decodeBinHeader(f.Value) + if err != nil { + return status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err) + } + s := &spb.Status{} + if err := proto.Unmarshal(v, s); err != nil { + return status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err) + } + d.statusGen = status.FromProto(s) + case "grpc-timeout": + d.timeoutSet = true + var err error + if d.timeout, err = decodeTimeout(f.Value); err != nil { + return status.Errorf(codes.Internal, "transport: malformed time-out: %v", err) + } + case ":path": + d.method = f.Value + case ":status": + code, err := strconv.Atoi(f.Value) + if err != nil { + return status.Errorf(codes.Internal, "transport: malformed http-status: %v", err) + } + d.httpStatus = &code + case "grpc-tags-bin": + v, err := decodeBinHeader(f.Value) + if err != nil { + return status.Errorf(codes.Internal, "transport: malformed grpc-tags-bin: %v", err) + } + d.statsTags = v + d.addMetadata(f.Name, string(v)) + case "grpc-trace-bin": + v, err := decodeBinHeader(f.Value) + if err != nil { + return status.Errorf(codes.Internal, "transport: malformed grpc-trace-bin: %v", err) + } + d.statsTrace = v + d.addMetadata(f.Name, string(v)) + default: + if isReservedHeader(f.Name) && !isWhitelistedHeader(f.Name) { + break + } + v, err := decodeMetadataHeader(f.Name, f.Value) + if err != nil { + errorf("Failed to decode metadata header (%q, %q): %v", f.Name, f.Value, err) + return nil + } + d.addMetadata(f.Name, v) + } + return nil +} + +type timeoutUnit uint8 + +const ( + hour timeoutUnit = 'H' + minute timeoutUnit = 'M' + second timeoutUnit = 'S' + millisecond timeoutUnit = 'm' + microsecond timeoutUnit = 'u' + nanosecond timeoutUnit = 'n' +) + +func timeoutUnitToDuration(u timeoutUnit) (d time.Duration, ok bool) { + switch u { + case hour: + return time.Hour, true + case minute: + return time.Minute, true + case second: + return time.Second, true + case millisecond: + return time.Millisecond, true + case microsecond: + return time.Microsecond, true + case nanosecond: + return time.Nanosecond, true + default: + } + return +} + +const maxTimeoutValue int64 = 100000000 - 1 + +// div does integer division and round-up the result. Note that this is +// equivalent to (d+r-1)/r but has less chance to overflow. +func div(d, r time.Duration) int64 { + if m := d % r; m > 0 { + return int64(d/r + 1) + } + return int64(d / r) +} + +// TODO(zhaoq): It is the simplistic and not bandwidth efficient. Improve it. +func encodeTimeout(t time.Duration) string { + if t <= 0 { + return "0n" + } + if d := div(t, time.Nanosecond); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "n" + } + if d := div(t, time.Microsecond); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "u" + } + if d := div(t, time.Millisecond); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "m" + } + if d := div(t, time.Second); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "S" + } + if d := div(t, time.Minute); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "M" + } + // Note that maxTimeoutValue * time.Hour > MaxInt64. + return strconv.FormatInt(div(t, time.Hour), 10) + "H" +} + +func decodeTimeout(s string) (time.Duration, error) { + size := len(s) + if size < 2 { + return 0, fmt.Errorf("transport: timeout string is too short: %q", s) + } + unit := timeoutUnit(s[size-1]) + d, ok := timeoutUnitToDuration(unit) + if !ok { + return 0, fmt.Errorf("transport: timeout unit is not recognized: %q", s) + } + t, err := strconv.ParseInt(s[:size-1], 10, 64) + if err != nil { + return 0, err + } + return d * time.Duration(t), nil +} + +const ( + spaceByte = ' ' + tildeByte = '~' + percentByte = '%' +) + +// encodeGrpcMessage is used to encode status code in header field +// "grpc-message". It does percent encoding and also replaces invalid utf-8 +// characters with Unicode replacement character. +// +// It checks to see if each individual byte in msg is an allowable byte, and +// then either percent encoding or passing it through. When percent encoding, +// the byte is converted into hexadecimal notation with a '%' prepended. +func encodeGrpcMessage(msg string) string { + if msg == "" { + return "" + } + lenMsg := len(msg) + for i := 0; i < lenMsg; i++ { + c := msg[i] + if !(c >= spaceByte && c <= tildeByte && c != percentByte) { + return encodeGrpcMessageUnchecked(msg) + } + } + return msg +} + +func encodeGrpcMessageUnchecked(msg string) string { + var buf bytes.Buffer + for len(msg) > 0 { + r, size := utf8.DecodeRuneInString(msg) + for _, b := range []byte(string(r)) { + if size > 1 { + // If size > 1, r is not ascii. Always do percent encoding. + buf.WriteString(fmt.Sprintf("%%%02X", b)) + continue + } + + // The for loop is necessary even if size == 1. r could be + // utf8.RuneError. + // + // fmt.Sprintf("%%%02X", utf8.RuneError) gives "%FFFD". + if b >= spaceByte && b <= tildeByte && b != percentByte { + buf.WriteByte(b) + } else { + buf.WriteString(fmt.Sprintf("%%%02X", b)) + } + } + msg = msg[size:] + } + return buf.String() +} + +// decodeGrpcMessage decodes the msg encoded by encodeGrpcMessage. +func decodeGrpcMessage(msg string) string { + if msg == "" { + return "" + } + lenMsg := len(msg) + for i := 0; i < lenMsg; i++ { + if msg[i] == percentByte && i+2 < lenMsg { + return decodeGrpcMessageUnchecked(msg) + } + } + return msg +} + +func decodeGrpcMessageUnchecked(msg string) string { + var buf bytes.Buffer + lenMsg := len(msg) + for i := 0; i < lenMsg; i++ { + c := msg[i] + if c == percentByte && i+2 < lenMsg { + parsed, err := strconv.ParseUint(msg[i+1:i+3], 16, 8) + if err != nil { + buf.WriteByte(c) + } else { + buf.WriteByte(byte(parsed)) + i += 2 + } + } else { + buf.WriteByte(c) + } + } + return buf.String() +} + +type bufWriter struct { + buf []byte + offset int + batchSize int + conn net.Conn + err error + + onFlush func() +} + +func newBufWriter(conn net.Conn, batchSize int) *bufWriter { + return &bufWriter{ + buf: make([]byte, batchSize*2), + batchSize: batchSize, + conn: conn, + } +} + +func (w *bufWriter) Write(b []byte) (n int, err error) { + if w.err != nil { + return 0, w.err + } + if w.batchSize == 0 { // Buffer has been disabled. + return w.conn.Write(b) + } + for len(b) > 0 { + nn := copy(w.buf[w.offset:], b) + b = b[nn:] + w.offset += nn + n += nn + if w.offset >= w.batchSize { + err = w.Flush() + } + } + return n, err +} + +func (w *bufWriter) Flush() error { + if w.err != nil { + return w.err + } + if w.offset == 0 { + return nil + } + if w.onFlush != nil { + w.onFlush() + } + _, w.err = w.conn.Write(w.buf[:w.offset]) + w.offset = 0 + return w.err +} + +type framer struct { + writer *bufWriter + fr *http2.Framer +} + +func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderListSize uint32) *framer { + if writeBufferSize < 0 { + writeBufferSize = 0 + } + var r io.Reader = conn + if readBufferSize > 0 { + r = bufio.NewReaderSize(r, readBufferSize) + } + w := newBufWriter(conn, writeBufferSize) + f := &framer{ + writer: w, + fr: http2.NewFramer(w, r), + } + // Opt-in to Frame reuse API on framer to reduce garbage. + // Frames aren't safe to read from after a subsequent call to ReadFrame. + f.fr.SetReuseFrames() + f.fr.MaxHeaderListSize = maxHeaderListSize + f.fr.ReadMetaHeaders = hpack.NewDecoder(http2InitHeaderTableSize, nil) + return f +} diff --git a/vendor/google.golang.org/grpc/internal/transport/log.go b/vendor/google.golang.org/grpc/internal/transport/log.go new file mode 100644 index 000000000..879df80c4 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/log.go @@ -0,0 +1,44 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// This file contains wrappers for grpclog functions. +// The transport package only logs to verbose level 2 by default. + +package transport + +import "google.golang.org/grpc/grpclog" + +const logLevel = 2 + +func infof(format string, args ...interface{}) { + if grpclog.V(logLevel) { + grpclog.Infof(format, args...) + } +} + +func warningf(format string, args ...interface{}) { + if grpclog.V(logLevel) { + grpclog.Warningf(format, args...) + } +} + +func errorf(format string, args ...interface{}) { + if grpclog.V(logLevel) { + grpclog.Errorf(format, args...) + } +} diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go new file mode 100644 index 000000000..fdf8ad684 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -0,0 +1,708 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package transport defines and implements message oriented communication +// channel to complete various transactions (e.g., an RPC). It is meant for +// grpc-internal usage and is not intended to be imported directly by users. +package transport + +import ( + "errors" + "fmt" + "io" + "net" + "sync" + "sync/atomic" + + "golang.org/x/net/context" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" + "google.golang.org/grpc/tap" +) + +// recvMsg represents the received msg from the transport. All transport +// protocol specific info has been removed. +type recvMsg struct { + data []byte + // nil: received some data + // io.EOF: stream is completed. data is nil. + // other non-nil error: transport failure. data is nil. + err error +} + +// recvBuffer is an unbounded channel of recvMsg structs. +// Note recvBuffer differs from controlBuffer only in that recvBuffer +// holds a channel of only recvMsg structs instead of objects implementing "item" interface. +// recvBuffer is written to much more often than +// controlBuffer and using strict recvMsg structs helps avoid allocation in "recvBuffer.put" +type recvBuffer struct { + c chan recvMsg + mu sync.Mutex + backlog []recvMsg + err error +} + +func newRecvBuffer() *recvBuffer { + b := &recvBuffer{ + c: make(chan recvMsg, 1), + } + return b +} + +func (b *recvBuffer) put(r recvMsg) { + b.mu.Lock() + if b.err != nil { + b.mu.Unlock() + // An error had occurred earlier, don't accept more + // data or errors. + return + } + b.err = r.err + if len(b.backlog) == 0 { + select { + case b.c <- r: + b.mu.Unlock() + return + default: + } + } + b.backlog = append(b.backlog, r) + b.mu.Unlock() +} + +func (b *recvBuffer) load() { + b.mu.Lock() + if len(b.backlog) > 0 { + select { + case b.c <- b.backlog[0]: + b.backlog[0] = recvMsg{} + b.backlog = b.backlog[1:] + default: + } + } + b.mu.Unlock() +} + +// get returns the channel that receives a recvMsg in the buffer. +// +// Upon receipt of a recvMsg, the caller should call load to send another +// recvMsg onto the channel if there is any. +func (b *recvBuffer) get() <-chan recvMsg { + return b.c +} + +// +// recvBufferReader implements io.Reader interface to read the data from +// recvBuffer. +type recvBufferReader struct { + ctx context.Context + ctxDone <-chan struct{} // cache of ctx.Done() (for performance). + recv *recvBuffer + last []byte // Stores the remaining data in the previous calls. + err error +} + +// Read reads the next len(p) bytes from last. If last is drained, it tries to +// read additional data from recv. It blocks if there no additional data available +// in recv. If Read returns any non-nil error, it will continue to return that error. +func (r *recvBufferReader) Read(p []byte) (n int, err error) { + if r.err != nil { + return 0, r.err + } + n, r.err = r.read(p) + return n, r.err +} + +func (r *recvBufferReader) read(p []byte) (n int, err error) { + if r.last != nil && len(r.last) > 0 { + // Read remaining data left in last call. + copied := copy(p, r.last) + r.last = r.last[copied:] + return copied, nil + } + select { + case <-r.ctxDone: + return 0, ContextErr(r.ctx.Err()) + case m := <-r.recv.get(): + r.recv.load() + if m.err != nil { + return 0, m.err + } + copied := copy(p, m.data) + r.last = m.data[copied:] + return copied, nil + } +} + +type streamState uint32 + +const ( + streamActive streamState = iota + streamWriteDone // EndStream sent + streamReadDone // EndStream received + streamDone // the entire stream is finished. +) + +// Stream represents an RPC in the transport layer. +type Stream struct { + id uint32 + st ServerTransport // nil for client side Stream + ctx context.Context // the associated context of the stream + cancel context.CancelFunc // always nil for client side Stream + done chan struct{} // closed at the end of stream to unblock writers. On the client side. + ctxDone <-chan struct{} // same as done chan but for server side. Cache of ctx.Done() (for performance) + method string // the associated RPC method of the stream + recvCompress string + sendCompress string + buf *recvBuffer + trReader io.Reader + fc *inFlow + wq *writeQuota + + // Callback to state application's intentions to read data. This + // is used to adjust flow control, if needed. + requestRead func(int) + + headerChan chan struct{} // closed to indicate the end of header metadata. + headerDone uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times. + + // hdrMu protects header and trailer metadata on the server-side. + hdrMu sync.Mutex + header metadata.MD // the received header metadata. + trailer metadata.MD // the key-value map of trailer metadata. + + noHeaders bool // set if the client never received headers (set only after the stream is done). + + // On the server-side, headerSent is atomically set to 1 when the headers are sent out. + headerSent uint32 + + state streamState + + // On client-side it is the status error received from the server. + // On server-side it is unused. + status *status.Status + + bytesReceived uint32 // indicates whether any bytes have been received on this stream + unprocessed uint32 // set if the server sends a refused stream or GOAWAY including this stream + + // contentSubtype is the content-subtype for requests. + // this must be lowercase or the behavior is undefined. + contentSubtype string +} + +// isHeaderSent is only valid on the server-side. +func (s *Stream) isHeaderSent() bool { + return atomic.LoadUint32(&s.headerSent) == 1 +} + +// updateHeaderSent updates headerSent and returns true +// if it was alreay set. It is valid only on server-side. +func (s *Stream) updateHeaderSent() bool { + return atomic.SwapUint32(&s.headerSent, 1) == 1 +} + +func (s *Stream) swapState(st streamState) streamState { + return streamState(atomic.SwapUint32((*uint32)(&s.state), uint32(st))) +} + +func (s *Stream) compareAndSwapState(oldState, newState streamState) bool { + return atomic.CompareAndSwapUint32((*uint32)(&s.state), uint32(oldState), uint32(newState)) +} + +func (s *Stream) getState() streamState { + return streamState(atomic.LoadUint32((*uint32)(&s.state))) +} + +func (s *Stream) waitOnHeader() error { + if s.headerChan == nil { + // On the server headerChan is always nil since a stream originates + // only after having received headers. + return nil + } + select { + case <-s.ctx.Done(): + return ContextErr(s.ctx.Err()) + case <-s.headerChan: + return nil + } +} + +// RecvCompress returns the compression algorithm applied to the inbound +// message. It is empty string if there is no compression applied. +func (s *Stream) RecvCompress() string { + if err := s.waitOnHeader(); err != nil { + return "" + } + return s.recvCompress +} + +// SetSendCompress sets the compression algorithm to the stream. +func (s *Stream) SetSendCompress(str string) { + s.sendCompress = str +} + +// Done returns a channel which is closed when it receives the final status +// from the server. +func (s *Stream) Done() <-chan struct{} { + return s.done +} + +// Header acquires the key-value pairs of header metadata once it +// is available. It blocks until i) the metadata is ready or ii) there is no +// header metadata or iii) the stream is canceled/expired. +func (s *Stream) Header() (metadata.MD, error) { + err := s.waitOnHeader() + // Even if the stream is closed, header is returned if available. + select { + case <-s.headerChan: + if s.header == nil { + return nil, nil + } + return s.header.Copy(), nil + default: + } + return nil, err +} + +// TrailersOnly blocks until a header or trailers-only frame is received and +// then returns true if the stream was trailers-only. If the stream ends +// before headers are received, returns true, nil. If a context error happens +// first, returns it as a status error. Client-side only. +func (s *Stream) TrailersOnly() (bool, error) { + err := s.waitOnHeader() + if err != nil { + return false, err + } + // if !headerDone, some other connection error occurred. + return s.noHeaders && atomic.LoadUint32(&s.headerDone) == 1, nil +} + +// Trailer returns the cached trailer metedata. Note that if it is not called +// after the entire stream is done, it could return an empty MD. Client +// side only. +// It can be safely read only after stream has ended that is either read +// or write have returned io.EOF. +func (s *Stream) Trailer() metadata.MD { + c := s.trailer.Copy() + return c +} + +// ContentSubtype returns the content-subtype for a request. For example, a +// content-subtype of "proto" will result in a content-type of +// "application/grpc+proto". This will always be lowercase. See +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +func (s *Stream) ContentSubtype() string { + return s.contentSubtype +} + +// Context returns the context of the stream. +func (s *Stream) Context() context.Context { + return s.ctx +} + +// Method returns the method for the stream. +func (s *Stream) Method() string { + return s.method +} + +// Status returns the status received from the server. +// Status can be read safely only after the stream has ended, +// that is, after Done() is closed. +func (s *Stream) Status() *status.Status { + return s.status +} + +// SetHeader sets the header metadata. This can be called multiple times. +// Server side only. +// This should not be called in parallel to other data writes. +func (s *Stream) SetHeader(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + if s.isHeaderSent() || s.getState() == streamDone { + return ErrIllegalHeaderWrite + } + s.hdrMu.Lock() + s.header = metadata.Join(s.header, md) + s.hdrMu.Unlock() + return nil +} + +// SendHeader sends the given header metadata. The given metadata is +// combined with any metadata set by previous calls to SetHeader and +// then written to the transport stream. +func (s *Stream) SendHeader(md metadata.MD) error { + return s.st.WriteHeader(s, md) +} + +// SetTrailer sets the trailer metadata which will be sent with the RPC status +// by the server. This can be called multiple times. Server side only. +// This should not be called parallel to other data writes. +func (s *Stream) SetTrailer(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + if s.getState() == streamDone { + return ErrIllegalHeaderWrite + } + s.hdrMu.Lock() + s.trailer = metadata.Join(s.trailer, md) + s.hdrMu.Unlock() + return nil +} + +func (s *Stream) write(m recvMsg) { + s.buf.put(m) +} + +// Read reads all p bytes from the wire for this stream. +func (s *Stream) Read(p []byte) (n int, err error) { + // Don't request a read if there was an error earlier + if er := s.trReader.(*transportReader).er; er != nil { + return 0, er + } + s.requestRead(len(p)) + return io.ReadFull(s.trReader, p) +} + +// tranportReader reads all the data available for this Stream from the transport and +// passes them into the decoder, which converts them into a gRPC message stream. +// The error is io.EOF when the stream is done or another non-nil error if +// the stream broke. +type transportReader struct { + reader io.Reader + // The handler to control the window update procedure for both this + // particular stream and the associated transport. + windowHandler func(int) + er error +} + +func (t *transportReader) Read(p []byte) (n int, err error) { + n, err = t.reader.Read(p) + if err != nil { + t.er = err + return + } + t.windowHandler(n) + return +} + +// BytesReceived indicates whether any bytes have been received on this stream. +func (s *Stream) BytesReceived() bool { + return atomic.LoadUint32(&s.bytesReceived) == 1 +} + +// Unprocessed indicates whether the server did not process this stream -- +// i.e. it sent a refused stream or GOAWAY including this stream ID. +func (s *Stream) Unprocessed() bool { + return atomic.LoadUint32(&s.unprocessed) == 1 +} + +// GoString is implemented by Stream so context.String() won't +// race when printing %#v. +func (s *Stream) GoString() string { + return fmt.Sprintf("", s, s.method) +} + +// state of transport +type transportState int + +const ( + reachable transportState = iota + closing + draining +) + +// ServerConfig consists of all the configurations to establish a server transport. +type ServerConfig struct { + MaxStreams uint32 + AuthInfo credentials.AuthInfo + InTapHandle tap.ServerInHandle + StatsHandler stats.Handler + KeepaliveParams keepalive.ServerParameters + KeepalivePolicy keepalive.EnforcementPolicy + InitialWindowSize int32 + InitialConnWindowSize int32 + WriteBufferSize int + ReadBufferSize int + ChannelzParentID int64 + MaxHeaderListSize *uint32 +} + +// NewServerTransport creates a ServerTransport with conn or non-nil error +// if it fails. +func NewServerTransport(protocol string, conn net.Conn, config *ServerConfig) (ServerTransport, error) { + return newHTTP2Server(conn, config) +} + +// ConnectOptions covers all relevant options for communicating with the server. +type ConnectOptions struct { + // UserAgent is the application user agent. + UserAgent string + // Dialer specifies how to dial a network address. + Dialer func(context.Context, string) (net.Conn, error) + // FailOnNonTempDialError specifies if gRPC fails on non-temporary dial errors. + FailOnNonTempDialError bool + // PerRPCCredentials stores the PerRPCCredentials required to issue RPCs. + PerRPCCredentials []credentials.PerRPCCredentials + // TransportCredentials stores the Authenticator required to setup a client connection. + TransportCredentials credentials.TransportCredentials + // KeepaliveParams stores the keepalive parameters. + KeepaliveParams keepalive.ClientParameters + // StatsHandler stores the handler for stats. + StatsHandler stats.Handler + // InitialWindowSize sets the initial window size for a stream. + InitialWindowSize int32 + // InitialConnWindowSize sets the initial window size for a connection. + InitialConnWindowSize int32 + // WriteBufferSize sets the size of write buffer which in turn determines how much data can be batched before it's written on the wire. + WriteBufferSize int + // ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall. + ReadBufferSize int + // ChannelzParentID sets the addrConn id which initiate the creation of this client transport. + ChannelzParentID int64 + // MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received. + MaxHeaderListSize *uint32 +} + +// TargetInfo contains the information of the target such as network address and metadata. +type TargetInfo struct { + Addr string + Metadata interface{} + Authority string +} + +// NewClientTransport establishes the transport with the required ConnectOptions +// and returns it to the caller. +func NewClientTransport(connectCtx, ctx context.Context, target TargetInfo, opts ConnectOptions, onSuccess func()) (ClientTransport, error) { + return newHTTP2Client(connectCtx, ctx, target, opts, onSuccess) +} + +// Options provides additional hints and information for message +// transmission. +type Options struct { + // Last indicates whether this write is the last piece for + // this stream. + Last bool +} + +// CallHdr carries the information of a particular RPC. +type CallHdr struct { + // Host specifies the peer's host. + Host string + + // Method specifies the operation to perform. + Method string + + // SendCompress specifies the compression algorithm applied on + // outbound message. + SendCompress string + + // Creds specifies credentials.PerRPCCredentials for a call. + Creds credentials.PerRPCCredentials + + // ContentSubtype specifies the content-subtype for a request. For example, a + // content-subtype of "proto" will result in a content-type of + // "application/grpc+proto". The value of ContentSubtype must be all + // lowercase, otherwise the behavior is undefined. See + // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests + // for more details. + ContentSubtype string + + PreviousAttempts int // value of grpc-previous-rpc-attempts header to set +} + +// ClientTransport is the common interface for all gRPC client-side transport +// implementations. +type ClientTransport interface { + // Close tears down this transport. Once it returns, the transport + // should not be accessed any more. The caller must make sure this + // is called only once. + Close() error + + // GracefulClose starts to tear down the transport. It stops accepting + // new RPCs and wait the completion of the pending RPCs. + GracefulClose() error + + // Write sends the data for the given stream. A nil stream indicates + // the write is to be performed on the transport as a whole. + Write(s *Stream, hdr []byte, data []byte, opts *Options) error + + // NewStream creates a Stream for an RPC. + NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) + + // CloseStream clears the footprint of a stream when the stream is + // not needed any more. The err indicates the error incurred when + // CloseStream is called. Must be called when a stream is finished + // unless the associated transport is closing. + CloseStream(stream *Stream, err error) + + // Error returns a channel that is closed when some I/O error + // happens. Typically the caller should have a goroutine to monitor + // this in order to take action (e.g., close the current transport + // and create a new one) in error case. It should not return nil + // once the transport is initiated. + Error() <-chan struct{} + + // GoAway returns a channel that is closed when ClientTransport + // receives the draining signal from the server (e.g., GOAWAY frame in + // HTTP/2). + GoAway() <-chan struct{} + + // GetGoAwayReason returns the reason why GoAway frame was received. + GetGoAwayReason() GoAwayReason + + // IncrMsgSent increments the number of message sent through this transport. + IncrMsgSent() + + // IncrMsgRecv increments the number of message received through this transport. + IncrMsgRecv() +} + +// ServerTransport is the common interface for all gRPC server-side transport +// implementations. +// +// Methods may be called concurrently from multiple goroutines, but +// Write methods for a given Stream will be called serially. +type ServerTransport interface { + // HandleStreams receives incoming streams using the given handler. + HandleStreams(func(*Stream), func(context.Context, string) context.Context) + + // WriteHeader sends the header metadata for the given stream. + // WriteHeader may not be called on all streams. + WriteHeader(s *Stream, md metadata.MD) error + + // Write sends the data for the given stream. + // Write may not be called on all streams. + Write(s *Stream, hdr []byte, data []byte, opts *Options) error + + // WriteStatus sends the status of a stream to the client. WriteStatus is + // the final call made on a stream and always occurs. + WriteStatus(s *Stream, st *status.Status) error + + // Close tears down the transport. Once it is called, the transport + // should not be accessed any more. All the pending streams and their + // handlers will be terminated asynchronously. + Close() error + + // RemoteAddr returns the remote network address. + RemoteAddr() net.Addr + + // Drain notifies the client this ServerTransport stops accepting new RPCs. + Drain() + + // IncrMsgSent increments the number of message sent through this transport. + IncrMsgSent() + + // IncrMsgRecv increments the number of message received through this transport. + IncrMsgRecv() +} + +// connectionErrorf creates an ConnectionError with the specified error description. +func connectionErrorf(temp bool, e error, format string, a ...interface{}) ConnectionError { + return ConnectionError{ + Desc: fmt.Sprintf(format, a...), + temp: temp, + err: e, + } +} + +// ConnectionError is an error that results in the termination of the +// entire connection and the retry of all the active streams. +type ConnectionError struct { + Desc string + temp bool + err error +} + +func (e ConnectionError) Error() string { + return fmt.Sprintf("connection error: desc = %q", e.Desc) +} + +// Temporary indicates if this connection error is temporary or fatal. +func (e ConnectionError) Temporary() bool { + return e.temp +} + +// Origin returns the original error of this connection error. +func (e ConnectionError) Origin() error { + // Never return nil error here. + // If the original error is nil, return itself. + if e.err == nil { + return e + } + return e.err +} + +var ( + // ErrConnClosing indicates that the transport is closing. + ErrConnClosing = connectionErrorf(true, nil, "transport is closing") + // errStreamDrain indicates that the stream is rejected because the + // connection is draining. This could be caused by goaway or balancer + // removing the address. + errStreamDrain = status.Error(codes.Unavailable, "the connection is draining") + // errStreamDone is returned from write at the client side to indiacte application + // layer of an error. + errStreamDone = errors.New("the stream is done") + // StatusGoAway indicates that the server sent a GOAWAY that included this + // stream's ID in unprocessed RPCs. + statusGoAway = status.New(codes.Unavailable, "the stream is rejected because server is draining the connection") +) + +// GoAwayReason contains the reason for the GoAway frame received. +type GoAwayReason uint8 + +const ( + // GoAwayInvalid indicates that no GoAway frame is received. + GoAwayInvalid GoAwayReason = 0 + // GoAwayNoReason is the default value when GoAway frame is received. + GoAwayNoReason GoAwayReason = 1 + // GoAwayTooManyPings indicates that a GoAway frame with + // ErrCodeEnhanceYourCalm was received and that the debug data said + // "too_many_pings". + GoAwayTooManyPings GoAwayReason = 2 +) + +// channelzData is used to store channelz related data for http2Client and http2Server. +// These fields cannot be embedded in the original structs (e.g. http2Client), since to do atomic +// operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment. +// Here, by grouping those int64 fields inside a struct, we are enforcing the alignment. +type channelzData struct { + kpCount int64 + // The number of streams that have started, including already finished ones. + streamsStarted int64 + // Client side: The number of streams that have ended successfully by receiving + // EoS bit set frame from server. + // Server side: The number of streams that have ended successfully by sending + // frame with EoS bit set. + streamsSucceeded int64 + streamsFailed int64 + // lastStreamCreatedTime stores the timestamp that the last stream gets created. It is of int64 type + // instead of time.Time since it's more costly to atomically update time.Time variable than int64 + // variable. The same goes for lastMsgSentTime and lastMsgRecvTime. + lastStreamCreatedTime int64 + msgSent int64 + msgRecv int64 + lastMsgSentTime int64 + lastMsgRecvTime int64 +} diff --git a/vendor/google.golang.org/grpc/keepalive/keepalive.go b/vendor/google.golang.org/grpc/keepalive/keepalive.go new file mode 100644 index 000000000..f8adc7e6d --- /dev/null +++ b/vendor/google.golang.org/grpc/keepalive/keepalive.go @@ -0,0 +1,65 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package keepalive defines configurable parameters for point-to-point healthcheck. +package keepalive + +import ( + "time" +) + +// ClientParameters is used to set keepalive parameters on the client-side. +// These configure how the client will actively probe to notice when a connection is broken +// and send pings so intermediaries will be aware of the liveness of the connection. +// Make sure these parameters are set in coordination with the keepalive policy on the server, +// as incompatible settings can result in closing of connection. +type ClientParameters struct { + // After a duration of this time if the client doesn't see any activity it pings the server to see if the transport is still alive. + Time time.Duration // The current default value is infinity. + // After having pinged for keepalive check, the client waits for a duration of Timeout and if no activity is seen even after that + // the connection is closed. + Timeout time.Duration // The current default value is 20 seconds. + // If true, client runs keepalive checks even with no active RPCs. + PermitWithoutStream bool // false by default. +} + +// ServerParameters is used to set keepalive and max-age parameters on the server-side. +type ServerParameters struct { + // MaxConnectionIdle is a duration for the amount of time after which an idle connection would be closed by sending a GoAway. + // Idleness duration is defined since the most recent time the number of outstanding RPCs became zero or the connection establishment. + MaxConnectionIdle time.Duration // The current default value is infinity. + // MaxConnectionAge is a duration for the maximum amount of time a connection may exist before it will be closed by sending a GoAway. + // A random jitter of +/-10% will be added to MaxConnectionAge to spread out connection storms. + MaxConnectionAge time.Duration // The current default value is infinity. + // MaxConnectinoAgeGrace is an additive period after MaxConnectionAge after which the connection will be forcibly closed. + MaxConnectionAgeGrace time.Duration // The current default value is infinity. + // After a duration of this time if the server doesn't see any activity it pings the client to see if the transport is still alive. + Time time.Duration // The current default value is 2 hours. + // After having pinged for keepalive check, the server waits for a duration of Timeout and if no activity is seen even after that + // the connection is closed. + Timeout time.Duration // The current default value is 20 seconds. +} + +// EnforcementPolicy is used to set keepalive enforcement policy on the server-side. +// Server will close connection with a client that violates this policy. +type EnforcementPolicy struct { + // MinTime is the minimum amount of time a client should wait before sending a keepalive ping. + MinTime time.Duration // The current default value is 5 minutes. + // If true, server expects keepalive pings even when there are no active streams(RPCs). + PermitWithoutStream bool // false by default. +} diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go new file mode 100644 index 000000000..bd2eaf408 --- /dev/null +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -0,0 +1,210 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package metadata define the structure of the metadata supported by gRPC library. +// Please refer to https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md +// for more information about custom-metadata. +package metadata // import "google.golang.org/grpc/metadata" + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" +) + +// DecodeKeyValue returns k, v, nil. +// +// Deprecated: use k and v directly instead. +func DecodeKeyValue(k, v string) (string, string, error) { + return k, v, nil +} + +// MD is a mapping from metadata keys to values. Users should use the following +// two convenience functions New and Pairs to generate MD. +type MD map[string][]string + +// New creates an MD from a given key-value map. +// +// Only the following ASCII characters are allowed in keys: +// - digits: 0-9 +// - uppercase letters: A-Z (normalized to lower) +// - lowercase letters: a-z +// - special characters: -_. +// Uppercase letters are automatically converted to lowercase. +// +// Keys beginning with "grpc-" are reserved for grpc-internal use only and may +// result in errors if set in metadata. +func New(m map[string]string) MD { + md := MD{} + for k, val := range m { + key := strings.ToLower(k) + md[key] = append(md[key], val) + } + return md +} + +// Pairs returns an MD formed by the mapping of key, value ... +// Pairs panics if len(kv) is odd. +// +// Only the following ASCII characters are allowed in keys: +// - digits: 0-9 +// - uppercase letters: A-Z (normalized to lower) +// - lowercase letters: a-z +// - special characters: -_. +// Uppercase letters are automatically converted to lowercase. +// +// Keys beginning with "grpc-" are reserved for grpc-internal use only and may +// result in errors if set in metadata. +func Pairs(kv ...string) MD { + if len(kv)%2 == 1 { + panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv))) + } + md := MD{} + var key string + for i, s := range kv { + if i%2 == 0 { + key = strings.ToLower(s) + continue + } + md[key] = append(md[key], s) + } + return md +} + +// Len returns the number of items in md. +func (md MD) Len() int { + return len(md) +} + +// Copy returns a copy of md. +func (md MD) Copy() MD { + return Join(md) +} + +// Get obtains the values for a given key. +func (md MD) Get(k string) []string { + k = strings.ToLower(k) + return md[k] +} + +// Set sets the value of a given key with a slice of values. +func (md MD) Set(k string, vals ...string) { + if len(vals) == 0 { + return + } + k = strings.ToLower(k) + md[k] = vals +} + +// Append adds the values to key k, not overwriting what was already stored at that key. +func (md MD) Append(k string, vals ...string) { + if len(vals) == 0 { + return + } + k = strings.ToLower(k) + md[k] = append(md[k], vals...) +} + +// Join joins any number of mds into a single MD. +// The order of values for each key is determined by the order in which +// the mds containing those values are presented to Join. +func Join(mds ...MD) MD { + out := MD{} + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return out +} + +type mdIncomingKey struct{} +type mdOutgoingKey struct{} + +// NewIncomingContext creates a new context with incoming md attached. +func NewIncomingContext(ctx context.Context, md MD) context.Context { + return context.WithValue(ctx, mdIncomingKey{}, md) +} + +// NewOutgoingContext creates a new context with outgoing md attached. If used +// in conjunction with AppendToOutgoingContext, NewOutgoingContext will +// overwrite any previously-appended metadata. +func NewOutgoingContext(ctx context.Context, md MD) context.Context { + return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md}) +} + +// AppendToOutgoingContext returns a new context with the provided kv merged +// with any existing metadata in the context. Please refer to the +// documentation of Pairs for a description of kv. +func AppendToOutgoingContext(ctx context.Context, kv ...string) context.Context { + if len(kv)%2 == 1 { + panic(fmt.Sprintf("metadata: AppendToOutgoingContext got an odd number of input pairs for metadata: %d", len(kv))) + } + md, _ := ctx.Value(mdOutgoingKey{}).(rawMD) + added := make([][]string, len(md.added)+1) + copy(added, md.added) + added[len(added)-1] = make([]string, len(kv)) + copy(added[len(added)-1], kv) + return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md.md, added: added}) +} + +// FromIncomingContext returns the incoming metadata in ctx if it exists. The +// returned MD should not be modified. Writing to it may cause races. +// Modification should be made to copies of the returned MD. +func FromIncomingContext(ctx context.Context) (md MD, ok bool) { + md, ok = ctx.Value(mdIncomingKey{}).(MD) + return +} + +// FromOutgoingContextRaw returns the un-merged, intermediary contents +// of rawMD. Remember to perform strings.ToLower on the keys. The returned +// MD should not be modified. Writing to it may cause races. Modification +// should be made to copies of the returned MD. +// +// This is intended for gRPC-internal use ONLY. +func FromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) { + raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD) + if !ok { + return nil, nil, false + } + + return raw.md, raw.added, true +} + +// FromOutgoingContext returns the outgoing metadata in ctx if it exists. The +// returned MD should not be modified. Writing to it may cause races. +// Modification should be made to copies of the returned MD. +func FromOutgoingContext(ctx context.Context) (MD, bool) { + raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD) + if !ok { + return nil, false + } + + mds := make([]MD, 0, len(raw.added)+1) + mds = append(mds, raw.md) + for _, vv := range raw.added { + mds = append(mds, Pairs(vv...)) + } + return Join(mds...), ok +} + +type rawMD struct { + md MD + added [][]string +} diff --git a/vendor/google.golang.org/grpc/naming/dns_resolver.go b/vendor/google.golang.org/grpc/naming/dns_resolver.go new file mode 100644 index 000000000..0f8a908ea --- /dev/null +++ b/vendor/google.golang.org/grpc/naming/dns_resolver.go @@ -0,0 +1,290 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package naming + +import ( + "errors" + "fmt" + "net" + "strconv" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc/grpclog" +) + +const ( + defaultPort = "443" + defaultFreq = time.Minute * 30 +) + +var ( + errMissingAddr = errors.New("missing address") + errWatcherClose = errors.New("watcher has been closed") +) + +// NewDNSResolverWithFreq creates a DNS Resolver that can resolve DNS names, and +// create watchers that poll the DNS server using the frequency set by freq. +func NewDNSResolverWithFreq(freq time.Duration) (Resolver, error) { + return &dnsResolver{freq: freq}, nil +} + +// NewDNSResolver creates a DNS Resolver that can resolve DNS names, and create +// watchers that poll the DNS server using the default frequency defined by defaultFreq. +func NewDNSResolver() (Resolver, error) { + return NewDNSResolverWithFreq(defaultFreq) +} + +// dnsResolver handles name resolution for names following the DNS scheme +type dnsResolver struct { + // frequency of polling the DNS server that the watchers created by this resolver will use. + freq time.Duration +} + +// formatIP returns ok = false if addr is not a valid textual representation of an IP address. +// If addr is an IPv4 address, return the addr and ok = true. +// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true. +func formatIP(addr string) (addrIP string, ok bool) { + ip := net.ParseIP(addr) + if ip == nil { + return "", false + } + if ip.To4() != nil { + return addr, true + } + return "[" + addr + "]", true +} + +// parseTarget takes the user input target string, returns formatted host and port info. +// If target doesn't specify a port, set the port to be the defaultPort. +// If target is in IPv6 format and host-name is enclosed in sqarue brackets, brackets +// are strippd when setting the host. +// examples: +// target: "www.google.com" returns host: "www.google.com", port: "443" +// target: "ipv4-host:80" returns host: "ipv4-host", port: "80" +// target: "[ipv6-host]" returns host: "ipv6-host", port: "443" +// target: ":80" returns host: "localhost", port: "80" +// target: ":" returns host: "localhost", port: "443" +func parseTarget(target string) (host, port string, err error) { + if target == "" { + return "", "", errMissingAddr + } + + if ip := net.ParseIP(target); ip != nil { + // target is an IPv4 or IPv6(without brackets) address + return target, defaultPort, nil + } + if host, port, err := net.SplitHostPort(target); err == nil { + // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port + if host == "" { + // Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed. + host = "localhost" + } + if port == "" { + // If the port field is empty(target ends with colon), e.g. "[::1]:", defaultPort is used. + port = defaultPort + } + return host, port, nil + } + if host, port, err := net.SplitHostPort(target + ":" + defaultPort); err == nil { + // target doesn't have port + return host, port, nil + } + return "", "", fmt.Errorf("invalid target address %v", target) +} + +// Resolve creates a watcher that watches the name resolution of the target. +func (r *dnsResolver) Resolve(target string) (Watcher, error) { + host, port, err := parseTarget(target) + if err != nil { + return nil, err + } + + if net.ParseIP(host) != nil { + ipWatcher := &ipWatcher{ + updateChan: make(chan *Update, 1), + } + host, _ = formatIP(host) + ipWatcher.updateChan <- &Update{Op: Add, Addr: host + ":" + port} + return ipWatcher, nil + } + + ctx, cancel := context.WithCancel(context.Background()) + return &dnsWatcher{ + r: r, + host: host, + port: port, + ctx: ctx, + cancel: cancel, + t: time.NewTimer(0), + }, nil +} + +// dnsWatcher watches for the name resolution update for a specific target +type dnsWatcher struct { + r *dnsResolver + host string + port string + // The latest resolved address set + curAddrs map[string]*Update + ctx context.Context + cancel context.CancelFunc + t *time.Timer +} + +// ipWatcher watches for the name resolution update for an IP address. +type ipWatcher struct { + updateChan chan *Update +} + +// Next returns the address resolution Update for the target. For IP address, +// the resolution is itself, thus polling name server is unnecessary. Therefore, +// Next() will return an Update the first time it is called, and will be blocked +// for all following calls as no Update exists until watcher is closed. +func (i *ipWatcher) Next() ([]*Update, error) { + u, ok := <-i.updateChan + if !ok { + return nil, errWatcherClose + } + return []*Update{u}, nil +} + +// Close closes the ipWatcher. +func (i *ipWatcher) Close() { + close(i.updateChan) +} + +// AddressType indicates the address type returned by name resolution. +type AddressType uint8 + +const ( + // Backend indicates the server is a backend server. + Backend AddressType = iota + // GRPCLB indicates the server is a grpclb load balancer. + GRPCLB +) + +// AddrMetadataGRPCLB contains the information the name resolver for grpclb should provide. The +// name resolver used by the grpclb balancer is required to provide this type of metadata in +// its address updates. +type AddrMetadataGRPCLB struct { + // AddrType is the type of server (grpc load balancer or backend). + AddrType AddressType + // ServerName is the name of the grpc load balancer. Used for authentication. + ServerName string +} + +// compileUpdate compares the old resolved addresses and newly resolved addresses, +// and generates an update list +func (w *dnsWatcher) compileUpdate(newAddrs map[string]*Update) []*Update { + var res []*Update + for a, u := range w.curAddrs { + if _, ok := newAddrs[a]; !ok { + u.Op = Delete + res = append(res, u) + } + } + for a, u := range newAddrs { + if _, ok := w.curAddrs[a]; !ok { + res = append(res, u) + } + } + return res +} + +func (w *dnsWatcher) lookupSRV() map[string]*Update { + newAddrs := make(map[string]*Update) + _, srvs, err := lookupSRV(w.ctx, "grpclb", "tcp", w.host) + if err != nil { + grpclog.Infof("grpc: failed dns SRV record lookup due to %v.\n", err) + return nil + } + for _, s := range srvs { + lbAddrs, err := lookupHost(w.ctx, s.Target) + if err != nil { + grpclog.Warningf("grpc: failed load banlacer address dns lookup due to %v.\n", err) + continue + } + for _, a := range lbAddrs { + a, ok := formatIP(a) + if !ok { + grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err) + continue + } + addr := a + ":" + strconv.Itoa(int(s.Port)) + newAddrs[addr] = &Update{Addr: addr, + Metadata: AddrMetadataGRPCLB{AddrType: GRPCLB, ServerName: s.Target}} + } + } + return newAddrs +} + +func (w *dnsWatcher) lookupHost() map[string]*Update { + newAddrs := make(map[string]*Update) + addrs, err := lookupHost(w.ctx, w.host) + if err != nil { + grpclog.Warningf("grpc: failed dns A record lookup due to %v.\n", err) + return nil + } + for _, a := range addrs { + a, ok := formatIP(a) + if !ok { + grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err) + continue + } + addr := a + ":" + w.port + newAddrs[addr] = &Update{Addr: addr} + } + return newAddrs +} + +func (w *dnsWatcher) lookup() []*Update { + newAddrs := w.lookupSRV() + if newAddrs == nil { + // If failed to get any balancer address (either no corresponding SRV for the + // target, or caused by failure during resolution/parsing of the balancer target), + // return any A record info available. + newAddrs = w.lookupHost() + } + result := w.compileUpdate(newAddrs) + w.curAddrs = newAddrs + return result +} + +// Next returns the resolved address update(delta) for the target. If there's no +// change, it will sleep for 30 mins and try to resolve again after that. +func (w *dnsWatcher) Next() ([]*Update, error) { + for { + select { + case <-w.ctx.Done(): + return nil, errWatcherClose + case <-w.t.C: + } + result := w.lookup() + // Next lookup should happen after an interval defined by w.r.freq. + w.t.Reset(w.r.freq) + if len(result) > 0 { + return result, nil + } + } +} + +func (w *dnsWatcher) Close() { + w.cancel() +} diff --git a/vendor/google.golang.org/grpc/naming/go17.go b/vendor/google.golang.org/grpc/naming/go17.go new file mode 100644 index 000000000..57b65d7b8 --- /dev/null +++ b/vendor/google.golang.org/grpc/naming/go17.go @@ -0,0 +1,34 @@ +// +build go1.6,!go1.8 + +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package naming + +import ( + "net" + + "golang.org/x/net/context" +) + +var ( + lookupHost = func(ctx context.Context, host string) ([]string, error) { return net.LookupHost(host) } + lookupSRV = func(ctx context.Context, service, proto, name string) (string, []*net.SRV, error) { + return net.LookupSRV(service, proto, name) + } +) diff --git a/vendor/google.golang.org/grpc/naming/go18.go b/vendor/google.golang.org/grpc/naming/go18.go new file mode 100644 index 000000000..b5a0f8427 --- /dev/null +++ b/vendor/google.golang.org/grpc/naming/go18.go @@ -0,0 +1,28 @@ +// +build go1.8 + +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package naming + +import "net" + +var ( + lookupHost = net.DefaultResolver.LookupHost + lookupSRV = net.DefaultResolver.LookupSRV +) diff --git a/vendor/google.golang.org/grpc/naming/naming.go b/vendor/google.golang.org/grpc/naming/naming.go new file mode 100644 index 000000000..8cc39e937 --- /dev/null +++ b/vendor/google.golang.org/grpc/naming/naming.go @@ -0,0 +1,69 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package naming defines the naming API and related data structures for gRPC. +// The interface is EXPERIMENTAL and may be suject to change. +// +// Deprecated: please use package resolver. +package naming + +// Operation defines the corresponding operations for a name resolution change. +// +// Deprecated: please use package resolver. +type Operation uint8 + +const ( + // Add indicates a new address is added. + Add Operation = iota + // Delete indicates an existing address is deleted. + Delete +) + +// Update defines a name resolution update. Notice that it is not valid having both +// empty string Addr and nil Metadata in an Update. +// +// Deprecated: please use package resolver. +type Update struct { + // Op indicates the operation of the update. + Op Operation + // Addr is the updated address. It is empty string if there is no address update. + Addr string + // Metadata is the updated metadata. It is nil if there is no metadata update. + // Metadata is not required for a custom naming implementation. + Metadata interface{} +} + +// Resolver creates a Watcher for a target to track its resolution changes. +// +// Deprecated: please use package resolver. +type Resolver interface { + // Resolve creates a Watcher for target. + Resolve(target string) (Watcher, error) +} + +// Watcher watches for the updates on the specified target. +// +// Deprecated: please use package resolver. +type Watcher interface { + // Next blocks until an update or error happens. It may return one or more + // updates. The first call should get the full set of the results. It should + // return an error if and only if Watcher cannot recover. + Next() ([]*Update, error) + // Close closes the Watcher. + Close() +} diff --git a/vendor/google.golang.org/grpc/peer/peer.go b/vendor/google.golang.org/grpc/peer/peer.go new file mode 100644 index 000000000..317b8b9d0 --- /dev/null +++ b/vendor/google.golang.org/grpc/peer/peer.go @@ -0,0 +1,51 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package peer defines various peer information associated with RPCs and +// corresponding utils. +package peer + +import ( + "net" + + "golang.org/x/net/context" + "google.golang.org/grpc/credentials" +) + +// Peer contains the information of the peer for an RPC, such as the address +// and authentication information. +type Peer struct { + // Addr is the peer address. + Addr net.Addr + // AuthInfo is the authentication information of the transport. + // It is nil if there is no transport security being used. + AuthInfo credentials.AuthInfo +} + +type peerKey struct{} + +// NewContext creates a new context with peer information attached. +func NewContext(ctx context.Context, p *Peer) context.Context { + return context.WithValue(ctx, peerKey{}, p) +} + +// FromContext returns the peer information in ctx if it exists. +func FromContext(ctx context.Context) (p *Peer, ok bool) { + p, ok = ctx.Value(peerKey{}).(*Peer) + return +} diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go new file mode 100644 index 000000000..76cc456aa --- /dev/null +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -0,0 +1,180 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "io" + "sync" + + "golang.org/x/net/context" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/status" +) + +// pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick +// actions and unblock when there's a picker update. +type pickerWrapper struct { + mu sync.Mutex + done bool + blockingCh chan struct{} + picker balancer.Picker + + // The latest connection happened. + connErrMu sync.Mutex + connErr error +} + +func newPickerWrapper() *pickerWrapper { + bp := &pickerWrapper{blockingCh: make(chan struct{})} + return bp +} + +func (bp *pickerWrapper) updateConnectionError(err error) { + bp.connErrMu.Lock() + bp.connErr = err + bp.connErrMu.Unlock() +} + +func (bp *pickerWrapper) connectionError() error { + bp.connErrMu.Lock() + err := bp.connErr + bp.connErrMu.Unlock() + return err +} + +// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. +func (bp *pickerWrapper) updatePicker(p balancer.Picker) { + bp.mu.Lock() + if bp.done { + bp.mu.Unlock() + return + } + bp.picker = p + // bp.blockingCh should never be nil. + close(bp.blockingCh) + bp.blockingCh = make(chan struct{}) + bp.mu.Unlock() +} + +func doneChannelzWrapper(acw *acBalancerWrapper, done func(balancer.DoneInfo)) func(balancer.DoneInfo) { + acw.mu.Lock() + ac := acw.ac + acw.mu.Unlock() + ac.incrCallsStarted() + return func(b balancer.DoneInfo) { + if b.Err != nil && b.Err != io.EOF { + ac.incrCallsFailed() + } else { + ac.incrCallsSucceeded() + } + if done != nil { + done(b) + } + } +} + +// pick returns the transport that will be used for the RPC. +// It may block in the following cases: +// - there's no picker +// - the current picker returns ErrNoSubConnAvailable +// - the current picker returns other errors and failfast is false. +// - the subConn returned by the current picker is not READY +// When one of these situations happens, pick blocks until the picker gets updated. +func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer.PickOptions) (transport.ClientTransport, func(balancer.DoneInfo), error) { + var ( + p balancer.Picker + ch chan struct{} + ) + + for { + bp.mu.Lock() + if bp.done { + bp.mu.Unlock() + return nil, nil, ErrClientConnClosing + } + + if bp.picker == nil { + ch = bp.blockingCh + } + if ch == bp.blockingCh { + // This could happen when either: + // - bp.picker is nil (the previous if condition), or + // - has called pick on the current picker. + bp.mu.Unlock() + select { + case <-ctx.Done(): + return nil, nil, ctx.Err() + case <-ch: + } + continue + } + + ch = bp.blockingCh + p = bp.picker + bp.mu.Unlock() + + subConn, done, err := p.Pick(ctx, opts) + + if err != nil { + switch err { + case balancer.ErrNoSubConnAvailable: + continue + case balancer.ErrTransientFailure: + if !failfast { + continue + } + return nil, nil, status.Errorf(codes.Unavailable, "%v, latest connection error: %v", err, bp.connectionError()) + default: + // err is some other error. + return nil, nil, toRPCErr(err) + } + } + + acw, ok := subConn.(*acBalancerWrapper) + if !ok { + grpclog.Infof("subconn returned from pick is not *acBalancerWrapper") + continue + } + if t, ok := acw.getAddrConn().getReadyTransport(); ok { + if channelz.IsOn() { + return t, doneChannelzWrapper(acw, done), nil + } + return t, done, nil + } + grpclog.Infof("blockingPicker: the picked transport is not ready, loop back to repick") + // If ok == false, ac.state is not READY. + // A valid picker always returns READY subConn. This means the state of ac + // just changed, and picker will be updated shortly. + // continue back to the beginning of the for loop to repick. + } +} + +func (bp *pickerWrapper) close() { + bp.mu.Lock() + defer bp.mu.Unlock() + if bp.done { + return + } + bp.done = true + close(bp.blockingCh) +} diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go new file mode 100644 index 000000000..bf659d49d --- /dev/null +++ b/vendor/google.golang.org/grpc/pickfirst.go @@ -0,0 +1,108 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/resolver" +) + +// PickFirstBalancerName is the name of the pick_first balancer. +const PickFirstBalancerName = "pick_first" + +func newPickfirstBuilder() balancer.Builder { + return &pickfirstBuilder{} +} + +type pickfirstBuilder struct{} + +func (*pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { + return &pickfirstBalancer{cc: cc} +} + +func (*pickfirstBuilder) Name() string { + return PickFirstBalancerName +} + +type pickfirstBalancer struct { + cc balancer.ClientConn + sc balancer.SubConn +} + +func (b *pickfirstBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) { + if err != nil { + grpclog.Infof("pickfirstBalancer: HandleResolvedAddrs called with error %v", err) + return + } + if b.sc == nil { + b.sc, err = b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{}) + if err != nil { + grpclog.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) + return + } + b.cc.UpdateBalancerState(connectivity.Idle, &picker{sc: b.sc}) + b.sc.Connect() + } else { + b.sc.UpdateAddresses(addrs) + b.sc.Connect() + } +} + +func (b *pickfirstBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { + grpclog.Infof("pickfirstBalancer: HandleSubConnStateChange: %p, %v", sc, s) + if b.sc != sc { + grpclog.Infof("pickfirstBalancer: ignored state change because sc is not recognized") + return + } + if s == connectivity.Shutdown { + b.sc = nil + return + } + + switch s { + case connectivity.Ready, connectivity.Idle: + b.cc.UpdateBalancerState(s, &picker{sc: sc}) + case connectivity.Connecting: + b.cc.UpdateBalancerState(s, &picker{err: balancer.ErrNoSubConnAvailable}) + case connectivity.TransientFailure: + b.cc.UpdateBalancerState(s, &picker{err: balancer.ErrTransientFailure}) + } +} + +func (b *pickfirstBalancer) Close() { +} + +type picker struct { + err error + sc balancer.SubConn +} + +func (p *picker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) { + if p.err != nil { + return nil, nil, p.err + } + return p.sc, nil, nil +} + +func init() { + balancer.Register(newPickfirstBuilder()) +} diff --git a/vendor/google.golang.org/grpc/proxy.go b/vendor/google.golang.org/grpc/proxy.go new file mode 100644 index 000000000..2d40236e2 --- /dev/null +++ b/vendor/google.golang.org/grpc/proxy.go @@ -0,0 +1,130 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "bufio" + "errors" + "fmt" + "io" + "net" + "net/http" + "net/http/httputil" + "net/url" + + "golang.org/x/net/context" +) + +var ( + // errDisabled indicates that proxy is disabled for the address. + errDisabled = errors.New("proxy is disabled for the address") + // The following variable will be overwritten in the tests. + httpProxyFromEnvironment = http.ProxyFromEnvironment +) + +func mapAddress(ctx context.Context, address string) (string, error) { + req := &http.Request{ + URL: &url.URL{ + Scheme: "https", + Host: address, + }, + } + url, err := httpProxyFromEnvironment(req) + if err != nil { + return "", err + } + if url == nil { + return "", errDisabled + } + return url.Host, nil +} + +// To read a response from a net.Conn, http.ReadResponse() takes a bufio.Reader. +// It's possible that this reader reads more than what's need for the response and stores +// those bytes in the buffer. +// bufConn wraps the original net.Conn and the bufio.Reader to make sure we don't lose the +// bytes in the buffer. +type bufConn struct { + net.Conn + r io.Reader +} + +func (c *bufConn) Read(b []byte) (int, error) { + return c.r.Read(b) +} + +func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, addr string) (_ net.Conn, err error) { + defer func() { + if err != nil { + conn.Close() + } + }() + + req := (&http.Request{ + Method: http.MethodConnect, + URL: &url.URL{Host: addr}, + Header: map[string][]string{"User-Agent": {grpcUA}}, + }) + + if err := sendHTTPRequest(ctx, req, conn); err != nil { + return nil, fmt.Errorf("failed to write the HTTP request: %v", err) + } + + r := bufio.NewReader(conn) + resp, err := http.ReadResponse(r, req) + if err != nil { + return nil, fmt.Errorf("reading server HTTP response: %v", err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + dump, err := httputil.DumpResponse(resp, true) + if err != nil { + return nil, fmt.Errorf("failed to do connect handshake, status code: %s", resp.Status) + } + return nil, fmt.Errorf("failed to do connect handshake, response: %q", dump) + } + + return &bufConn{Conn: conn, r: r}, nil +} + +// newProxyDialer returns a dialer that connects to proxy first if necessary. +// The returned dialer checks if a proxy is necessary, dial to the proxy with the +// provided dialer, does HTTP CONNECT handshake and returns the connection. +func newProxyDialer(dialer func(context.Context, string) (net.Conn, error)) func(context.Context, string) (net.Conn, error) { + return func(ctx context.Context, addr string) (conn net.Conn, err error) { + var skipHandshake bool + newAddr, err := mapAddress(ctx, addr) + if err != nil { + if err != errDisabled { + return nil, err + } + skipHandshake = true + newAddr = addr + } + + conn, err = dialer(ctx, newAddr) + if err != nil { + return + } + if !skipHandshake { + conn, err = doHTTPConnectHandshake(ctx, conn, addr) + } + return + } +} diff --git a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go new file mode 100644 index 000000000..4ce81671d --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go @@ -0,0 +1,397 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package dns implements a dns resolver to be installed as the default resolver +// in grpc. +package dns + +import ( + "encoding/json" + "errors" + "fmt" + "net" + "os" + "strconv" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/resolver" +) + +func init() { + resolver.Register(NewBuilder()) +} + +const ( + defaultPort = "443" + defaultFreq = time.Minute * 30 + golang = "GO" + // In DNS, service config is encoded in a TXT record via the mechanism + // described in RFC-1464 using the attribute name grpc_config. + txtAttribute = "grpc_config=" +) + +var ( + errMissingAddr = errors.New("dns resolver: missing address") + + // Addresses ending with a colon that is supposed to be the separator + // between host and port is not allowed. E.g. "::" is a valid address as + // it is an IPv6 address (host only) and "[::]:" is invalid as it ends with + // a colon as the host and port separator + errEndsWithColon = errors.New("dns resolver: missing port after port-separator colon") +) + +// NewBuilder creates a dnsBuilder which is used to factory DNS resolvers. +func NewBuilder() resolver.Builder { + return &dnsBuilder{minFreq: defaultFreq} +} + +type dnsBuilder struct { + // minimum frequency of polling the DNS server. + minFreq time.Duration +} + +// Build creates and starts a DNS resolver that watches the name resolution of the target. +func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) { + if target.Authority != "" { + return nil, fmt.Errorf("default DNS resolver does not support custom DNS server") + } + host, port, err := parseTarget(target.Endpoint) + if err != nil { + return nil, err + } + + // IP address. + if net.ParseIP(host) != nil { + host, _ = formatIP(host) + addr := []resolver.Address{{Addr: host + ":" + port}} + i := &ipResolver{ + cc: cc, + ip: addr, + rn: make(chan struct{}, 1), + q: make(chan struct{}), + } + cc.NewAddress(addr) + go i.watcher() + return i, nil + } + + // DNS address (non-IP). + ctx, cancel := context.WithCancel(context.Background()) + d := &dnsResolver{ + freq: b.minFreq, + backoff: backoff.Exponential{MaxDelay: b.minFreq}, + host: host, + port: port, + ctx: ctx, + cancel: cancel, + cc: cc, + t: time.NewTimer(0), + rn: make(chan struct{}, 1), + disableServiceConfig: opts.DisableServiceConfig, + } + + d.wg.Add(1) + go d.watcher() + return d, nil +} + +// Scheme returns the naming scheme of this resolver builder, which is "dns". +func (b *dnsBuilder) Scheme() string { + return "dns" +} + +// ipResolver watches for the name resolution update for an IP address. +type ipResolver struct { + cc resolver.ClientConn + ip []resolver.Address + // rn channel is used by ResolveNow() to force an immediate resolution of the target. + rn chan struct{} + q chan struct{} +} + +// ResolveNow resend the address it stores, no resolution is needed. +func (i *ipResolver) ResolveNow(opt resolver.ResolveNowOption) { + select { + case i.rn <- struct{}{}: + default: + } +} + +// Close closes the ipResolver. +func (i *ipResolver) Close() { + close(i.q) +} + +func (i *ipResolver) watcher() { + for { + select { + case <-i.rn: + i.cc.NewAddress(i.ip) + case <-i.q: + return + } + } +} + +// dnsResolver watches for the name resolution update for a non-IP target. +type dnsResolver struct { + freq time.Duration + backoff backoff.Exponential + retryCount int + host string + port string + ctx context.Context + cancel context.CancelFunc + cc resolver.ClientConn + // rn channel is used by ResolveNow() to force an immediate resolution of the target. + rn chan struct{} + t *time.Timer + // wg is used to enforce Close() to return after the watcher() goroutine has finished. + // Otherwise, data race will be possible. [Race Example] in dns_resolver_test we + // replace the real lookup functions with mocked ones to facilitate testing. + // If Close() doesn't wait for watcher() goroutine finishes, race detector sometimes + // will warns lookup (READ the lookup function pointers) inside watcher() goroutine + // has data race with replaceNetFunc (WRITE the lookup function pointers). + wg sync.WaitGroup + disableServiceConfig bool +} + +// ResolveNow invoke an immediate resolution of the target that this dnsResolver watches. +func (d *dnsResolver) ResolveNow(opt resolver.ResolveNowOption) { + select { + case d.rn <- struct{}{}: + default: + } +} + +// Close closes the dnsResolver. +func (d *dnsResolver) Close() { + d.cancel() + d.wg.Wait() + d.t.Stop() +} + +func (d *dnsResolver) watcher() { + defer d.wg.Done() + for { + select { + case <-d.ctx.Done(): + return + case <-d.t.C: + case <-d.rn: + } + result, sc := d.lookup() + // Next lookup should happen within an interval defined by d.freq. It may be + // more often due to exponential retry on empty address list. + if len(result) == 0 { + d.retryCount++ + d.t.Reset(d.backoff.Backoff(d.retryCount)) + } else { + d.retryCount = 0 + d.t.Reset(d.freq) + } + d.cc.NewServiceConfig(sc) + d.cc.NewAddress(result) + } +} + +func (d *dnsResolver) lookupSRV() []resolver.Address { + var newAddrs []resolver.Address + _, srvs, err := lookupSRV(d.ctx, "grpclb", "tcp", d.host) + if err != nil { + grpclog.Infof("grpc: failed dns SRV record lookup due to %v.\n", err) + return nil + } + for _, s := range srvs { + lbAddrs, err := lookupHost(d.ctx, s.Target) + if err != nil { + grpclog.Infof("grpc: failed load balancer address dns lookup due to %v.\n", err) + continue + } + for _, a := range lbAddrs { + a, ok := formatIP(a) + if !ok { + grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err) + continue + } + addr := a + ":" + strconv.Itoa(int(s.Port)) + newAddrs = append(newAddrs, resolver.Address{Addr: addr, Type: resolver.GRPCLB, ServerName: s.Target}) + } + } + return newAddrs +} + +func (d *dnsResolver) lookupTXT() string { + ss, err := lookupTXT(d.ctx, d.host) + if err != nil { + grpclog.Infof("grpc: failed dns TXT record lookup due to %v.\n", err) + return "" + } + var res string + for _, s := range ss { + res += s + } + + // TXT record must have "grpc_config=" attribute in order to be used as service config. + if !strings.HasPrefix(res, txtAttribute) { + grpclog.Warningf("grpc: TXT record %v missing %v attribute", res, txtAttribute) + return "" + } + return strings.TrimPrefix(res, txtAttribute) +} + +func (d *dnsResolver) lookupHost() []resolver.Address { + var newAddrs []resolver.Address + addrs, err := lookupHost(d.ctx, d.host) + if err != nil { + grpclog.Warningf("grpc: failed dns A record lookup due to %v.\n", err) + return nil + } + for _, a := range addrs { + a, ok := formatIP(a) + if !ok { + grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err) + continue + } + addr := a + ":" + d.port + newAddrs = append(newAddrs, resolver.Address{Addr: addr}) + } + return newAddrs +} + +func (d *dnsResolver) lookup() ([]resolver.Address, string) { + newAddrs := d.lookupSRV() + // Support fallback to non-balancer address. + newAddrs = append(newAddrs, d.lookupHost()...) + if d.disableServiceConfig { + return newAddrs, "" + } + sc := d.lookupTXT() + return newAddrs, canaryingSC(sc) +} + +// formatIP returns ok = false if addr is not a valid textual representation of an IP address. +// If addr is an IPv4 address, return the addr and ok = true. +// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true. +func formatIP(addr string) (addrIP string, ok bool) { + ip := net.ParseIP(addr) + if ip == nil { + return "", false + } + if ip.To4() != nil { + return addr, true + } + return "[" + addr + "]", true +} + +// parseTarget takes the user input target string, returns formatted host and port info. +// If target doesn't specify a port, set the port to be the defaultPort. +// If target is in IPv6 format and host-name is enclosed in sqarue brackets, brackets +// are strippd when setting the host. +// examples: +// target: "www.google.com" returns host: "www.google.com", port: "443" +// target: "ipv4-host:80" returns host: "ipv4-host", port: "80" +// target: "[ipv6-host]" returns host: "ipv6-host", port: "443" +// target: ":80" returns host: "localhost", port: "80" +func parseTarget(target string) (host, port string, err error) { + if target == "" { + return "", "", errMissingAddr + } + if ip := net.ParseIP(target); ip != nil { + // target is an IPv4 or IPv6(without brackets) address + return target, defaultPort, nil + } + if host, port, err = net.SplitHostPort(target); err == nil { + if port == "" { + // If the port field is empty (target ends with colon), e.g. "[::1]:", this is an error. + return "", "", errEndsWithColon + } + // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port + if host == "" { + // Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed. + host = "localhost" + } + return host, port, nil + } + if host, port, err = net.SplitHostPort(target + ":" + defaultPort); err == nil { + // target doesn't have port + return host, port, nil + } + return "", "", fmt.Errorf("invalid target address %v, error info: %v", target, err) +} + +type rawChoice struct { + ClientLanguage *[]string `json:"clientLanguage,omitempty"` + Percentage *int `json:"percentage,omitempty"` + ClientHostName *[]string `json:"clientHostName,omitempty"` + ServiceConfig *json.RawMessage `json:"serviceConfig,omitempty"` +} + +func containsString(a *[]string, b string) bool { + if a == nil { + return true + } + for _, c := range *a { + if c == b { + return true + } + } + return false +} + +func chosenByPercentage(a *int) bool { + if a == nil { + return true + } + return grpcrand.Intn(100)+1 <= *a +} + +func canaryingSC(js string) string { + if js == "" { + return "" + } + var rcs []rawChoice + err := json.Unmarshal([]byte(js), &rcs) + if err != nil { + grpclog.Warningf("grpc: failed to parse service config json string due to %v.\n", err) + return "" + } + cliHostname, err := os.Hostname() + if err != nil { + grpclog.Warningf("grpc: failed to get client hostname due to %v.\n", err) + return "" + } + var sc string + for _, c := range rcs { + if !containsString(c.ClientLanguage, golang) || + !chosenByPercentage(c.Percentage) || + !containsString(c.ClientHostName, cliHostname) || + c.ServiceConfig == nil { + continue + } + sc = string(*c.ServiceConfig) + break + } + return sc +} diff --git a/vendor/google.golang.org/grpc/resolver/dns/go17.go b/vendor/google.golang.org/grpc/resolver/dns/go17.go new file mode 100644 index 000000000..b466bc8f6 --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/dns/go17.go @@ -0,0 +1,35 @@ +// +build go1.6, !go1.8 + +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package dns + +import ( + "net" + + "golang.org/x/net/context" +) + +var ( + lookupHost = func(ctx context.Context, host string) ([]string, error) { return net.LookupHost(host) } + lookupSRV = func(ctx context.Context, service, proto, name string) (string, []*net.SRV, error) { + return net.LookupSRV(service, proto, name) + } + lookupTXT = func(ctx context.Context, name string) ([]string, error) { return net.LookupTXT(name) } +) diff --git a/vendor/google.golang.org/grpc/resolver/dns/go18.go b/vendor/google.golang.org/grpc/resolver/dns/go18.go new file mode 100644 index 000000000..fa34f14ca --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/dns/go18.go @@ -0,0 +1,29 @@ +// +build go1.8 + +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package dns + +import "net" + +var ( + lookupHost = net.DefaultResolver.LookupHost + lookupSRV = net.DefaultResolver.LookupSRV + lookupTXT = net.DefaultResolver.LookupTXT +) diff --git a/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go b/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go new file mode 100644 index 000000000..b76010d74 --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go @@ -0,0 +1,57 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package passthrough implements a pass-through resolver. It sends the target +// name without scheme back to gRPC as resolved address. +package passthrough + +import "google.golang.org/grpc/resolver" + +const scheme = "passthrough" + +type passthroughBuilder struct{} + +func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) { + r := &passthroughResolver{ + target: target, + cc: cc, + } + r.start() + return r, nil +} + +func (*passthroughBuilder) Scheme() string { + return scheme +} + +type passthroughResolver struct { + target resolver.Target + cc resolver.ClientConn +} + +func (r *passthroughResolver) start() { + r.cc.NewAddress([]resolver.Address{{Addr: r.target.Endpoint}}) +} + +func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOption) {} + +func (*passthroughResolver) Close() {} + +func init() { + resolver.Register(&passthroughBuilder{}) +} diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go new file mode 100644 index 000000000..145cf477e --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -0,0 +1,158 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package resolver defines APIs for name resolution in gRPC. +// All APIs in this package are experimental. +package resolver + +var ( + // m is a map from scheme to resolver builder. + m = make(map[string]Builder) + // defaultScheme is the default scheme to use. + defaultScheme = "passthrough" +) + +// TODO(bar) install dns resolver in init(){}. + +// Register registers the resolver builder to the resolver map. b.Scheme will be +// used as the scheme registered with this builder. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Resolvers are +// registered with the same name, the one registered last will take effect. +func Register(b Builder) { + m[b.Scheme()] = b +} + +// Get returns the resolver builder registered with the given scheme. +// +// If no builder is register with the scheme, nil will be returned. +func Get(scheme string) Builder { + if b, ok := m[scheme]; ok { + return b + } + return nil +} + +// SetDefaultScheme sets the default scheme that will be used. The default +// default scheme is "passthrough". +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. The scheme set last overrides +// previously set values. +func SetDefaultScheme(scheme string) { + defaultScheme = scheme +} + +// GetDefaultScheme gets the default scheme that will be used. +func GetDefaultScheme() string { + return defaultScheme +} + +// AddressType indicates the address type returned by name resolution. +type AddressType uint8 + +const ( + // Backend indicates the address is for a backend server. + Backend AddressType = iota + // GRPCLB indicates the address is for a grpclb load balancer. + GRPCLB +) + +// Address represents a server the client connects to. +// This is the EXPERIMENTAL API and may be changed or extended in the future. +type Address struct { + // Addr is the server address on which a connection will be established. + Addr string + // Type is the type of this address. + Type AddressType + // ServerName is the name of this address. + // + // e.g. if Type is GRPCLB, ServerName should be the name of the remote load + // balancer, not the name of the backend. + ServerName string + // Metadata is the information associated with Addr, which may be used + // to make load balancing decision. + Metadata interface{} +} + +// BuildOption includes additional information for the builder to create +// the resolver. +type BuildOption struct { + // DisableServiceConfig indicates whether resolver should fetch service config data. + DisableServiceConfig bool +} + +// ClientConn contains the callbacks for resolver to notify any updates +// to the gRPC ClientConn. +// +// This interface is to be implemented by gRPC. Users should not need a +// brand new implementation of this interface. For the situations like +// testing, the new implementation should embed this interface. This allows +// gRPC to add new methods to this interface. +type ClientConn interface { + // NewAddress is called by resolver to notify ClientConn a new list + // of resolved addresses. + // The address list should be the complete list of resolved addresses. + NewAddress(addresses []Address) + // NewServiceConfig is called by resolver to notify ClientConn a new + // service config. The service config should be provided as a json string. + NewServiceConfig(serviceConfig string) +} + +// Target represents a target for gRPC, as specified in: +// https://github.com/grpc/grpc/blob/master/doc/naming.md. +type Target struct { + Scheme string + Authority string + Endpoint string +} + +// Builder creates a resolver that will be used to watch name resolution updates. +type Builder interface { + // Build creates a new resolver for the given target. + // + // gRPC dial calls Build synchronously, and fails if the returned error is + // not nil. + Build(target Target, cc ClientConn, opts BuildOption) (Resolver, error) + // Scheme returns the scheme supported by this resolver. + // Scheme is defined at https://github.com/grpc/grpc/blob/master/doc/naming.md. + Scheme() string +} + +// ResolveNowOption includes additional information for ResolveNow. +type ResolveNowOption struct{} + +// Resolver watches for the updates on the specified target. +// Updates include address updates and service config updates. +type Resolver interface { + // ResolveNow will be called by gRPC to try to resolve the target name + // again. It's just a hint, resolver can ignore this if it's not necessary. + // + // It could be called multiple times concurrently. + ResolveNow(ResolveNowOption) + // Close closes the resolver. + Close() +} + +// UnregisterForTesting removes the resolver builder with the given scheme from the +// resolver map. +// This function is for testing only. +func UnregisterForTesting(scheme string) { + delete(m, scheme) +} diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go new file mode 100644 index 000000000..494d6931e --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go @@ -0,0 +1,158 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "fmt" + "strings" + + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/resolver" +) + +// ccResolverWrapper is a wrapper on top of cc for resolvers. +// It implements resolver.ClientConnection interface. +type ccResolverWrapper struct { + cc *ClientConn + resolver resolver.Resolver + addrCh chan []resolver.Address + scCh chan string + done chan struct{} +} + +// split2 returns the values from strings.SplitN(s, sep, 2). +// If sep is not found, it returns ("", s, false) instead. +func split2(s, sep string) (string, string, bool) { + spl := strings.SplitN(s, sep, 2) + if len(spl) < 2 { + return "", "", false + } + return spl[0], spl[1], true +} + +// parseTarget splits target into a struct containing scheme, authority and +// endpoint. +// +// If target is not a valid scheme://authority/endpoint, it returns {Endpoint: +// target}. +func parseTarget(target string) (ret resolver.Target) { + var ok bool + ret.Scheme, ret.Endpoint, ok = split2(target, "://") + if !ok { + return resolver.Target{Endpoint: target} + } + ret.Authority, ret.Endpoint, ok = split2(ret.Endpoint, "/") + if !ok { + return resolver.Target{Endpoint: target} + } + return ret +} + +// newCCResolverWrapper parses cc.target for scheme and gets the resolver +// builder for this scheme and builds the resolver. The monitoring goroutine +// for it is not started yet and can be created by calling start(). +// +// If withResolverBuilder dial option is set, the specified resolver will be +// used instead. +func newCCResolverWrapper(cc *ClientConn) (*ccResolverWrapper, error) { + rb := cc.dopts.resolverBuilder + if rb == nil { + return nil, fmt.Errorf("could not get resolver for scheme: %q", cc.parsedTarget.Scheme) + } + + ccr := &ccResolverWrapper{ + cc: cc, + addrCh: make(chan []resolver.Address, 1), + scCh: make(chan string, 1), + done: make(chan struct{}), + } + + var err error + ccr.resolver, err = rb.Build(cc.parsedTarget, ccr, resolver.BuildOption{DisableServiceConfig: cc.dopts.disableServiceConfig}) + if err != nil { + return nil, err + } + return ccr, nil +} + +func (ccr *ccResolverWrapper) start() { + go ccr.watcher() +} + +// watcher processes address updates and service config updates sequentially. +// Otherwise, we need to resolve possible races between address and service +// config (e.g. they specify different balancer types). +func (ccr *ccResolverWrapper) watcher() { + for { + select { + case <-ccr.done: + return + default: + } + + select { + case addrs := <-ccr.addrCh: + select { + case <-ccr.done: + return + default: + } + grpclog.Infof("ccResolverWrapper: sending new addresses to cc: %v", addrs) + ccr.cc.handleResolvedAddrs(addrs, nil) + case sc := <-ccr.scCh: + select { + case <-ccr.done: + return + default: + } + grpclog.Infof("ccResolverWrapper: got new service config: %v", sc) + ccr.cc.handleServiceConfig(sc) + case <-ccr.done: + return + } + } +} + +func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOption) { + ccr.resolver.ResolveNow(o) +} + +func (ccr *ccResolverWrapper) close() { + ccr.resolver.Close() + close(ccr.done) +} + +// NewAddress is called by the resolver implemenetion to send addresses to gRPC. +func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { + select { + case <-ccr.addrCh: + default: + } + ccr.addrCh <- addrs +} + +// NewServiceConfig is called by the resolver implemenetion to send service +// configs to gRPC. +func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { + select { + case <-ccr.scCh: + default: + } + ccr.scCh <- sc +} diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go new file mode 100644 index 000000000..fa0568302 --- /dev/null +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -0,0 +1,778 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "bytes" + "compress/gzip" + "encoding/binary" + "fmt" + "io" + "io/ioutil" + "math" + "net/url" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/encoding" + "google.golang.org/grpc/encoding/proto" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +// Compressor defines the interface gRPC uses to compress a message. +// +// Deprecated: use package encoding. +type Compressor interface { + // Do compresses p into w. + Do(w io.Writer, p []byte) error + // Type returns the compression algorithm the Compressor uses. + Type() string +} + +type gzipCompressor struct { + pool sync.Pool +} + +// NewGZIPCompressor creates a Compressor based on GZIP. +// +// Deprecated: use package encoding/gzip. +func NewGZIPCompressor() Compressor { + c, _ := NewGZIPCompressorWithLevel(gzip.DefaultCompression) + return c +} + +// NewGZIPCompressorWithLevel is like NewGZIPCompressor but specifies the gzip compression level instead +// of assuming DefaultCompression. +// +// The error returned will be nil if the level is valid. +// +// Deprecated: use package encoding/gzip. +func NewGZIPCompressorWithLevel(level int) (Compressor, error) { + if level < gzip.DefaultCompression || level > gzip.BestCompression { + return nil, fmt.Errorf("grpc: invalid compression level: %d", level) + } + return &gzipCompressor{ + pool: sync.Pool{ + New: func() interface{} { + w, err := gzip.NewWriterLevel(ioutil.Discard, level) + if err != nil { + panic(err) + } + return w + }, + }, + }, nil +} + +func (c *gzipCompressor) Do(w io.Writer, p []byte) error { + z := c.pool.Get().(*gzip.Writer) + defer c.pool.Put(z) + z.Reset(w) + if _, err := z.Write(p); err != nil { + return err + } + return z.Close() +} + +func (c *gzipCompressor) Type() string { + return "gzip" +} + +// Decompressor defines the interface gRPC uses to decompress a message. +// +// Deprecated: use package encoding. +type Decompressor interface { + // Do reads the data from r and uncompress them. + Do(r io.Reader) ([]byte, error) + // Type returns the compression algorithm the Decompressor uses. + Type() string +} + +type gzipDecompressor struct { + pool sync.Pool +} + +// NewGZIPDecompressor creates a Decompressor based on GZIP. +// +// Deprecated: use package encoding/gzip. +func NewGZIPDecompressor() Decompressor { + return &gzipDecompressor{} +} + +func (d *gzipDecompressor) Do(r io.Reader) ([]byte, error) { + var z *gzip.Reader + switch maybeZ := d.pool.Get().(type) { + case nil: + newZ, err := gzip.NewReader(r) + if err != nil { + return nil, err + } + z = newZ + case *gzip.Reader: + z = maybeZ + if err := z.Reset(r); err != nil { + d.pool.Put(z) + return nil, err + } + } + + defer func() { + z.Close() + d.pool.Put(z) + }() + return ioutil.ReadAll(z) +} + +func (d *gzipDecompressor) Type() string { + return "gzip" +} + +// callInfo contains all related configuration and information about an RPC. +type callInfo struct { + compressorType string + failFast bool + stream *clientStream + maxReceiveMessageSize *int + maxSendMessageSize *int + creds credentials.PerRPCCredentials + contentSubtype string + codec baseCodec + maxRetryRPCBufferSize int +} + +func defaultCallInfo() *callInfo { + return &callInfo{ + failFast: true, + maxRetryRPCBufferSize: 256 * 1024, // 256KB + } +} + +// CallOption configures a Call before it starts or extracts information from +// a Call after it completes. +type CallOption interface { + // before is called before the call is sent to any server. If before + // returns a non-nil error, the RPC fails with that error. + before(*callInfo) error + + // after is called after the call has completed. after cannot return an + // error, so any failures should be reported via output parameters. + after(*callInfo) +} + +// EmptyCallOption does not alter the Call configuration. +// It can be embedded in another structure to carry satellite data for use +// by interceptors. +type EmptyCallOption struct{} + +func (EmptyCallOption) before(*callInfo) error { return nil } +func (EmptyCallOption) after(*callInfo) {} + +// Header returns a CallOptions that retrieves the header metadata +// for a unary RPC. +func Header(md *metadata.MD) CallOption { + return HeaderCallOption{HeaderAddr: md} +} + +// HeaderCallOption is a CallOption for collecting response header metadata. +// The metadata field will be populated *after* the RPC completes. +// This is an EXPERIMENTAL API. +type HeaderCallOption struct { + HeaderAddr *metadata.MD +} + +func (o HeaderCallOption) before(c *callInfo) error { return nil } +func (o HeaderCallOption) after(c *callInfo) { + if c.stream != nil { + *o.HeaderAddr, _ = c.stream.Header() + } +} + +// Trailer returns a CallOptions that retrieves the trailer metadata +// for a unary RPC. +func Trailer(md *metadata.MD) CallOption { + return TrailerCallOption{TrailerAddr: md} +} + +// TrailerCallOption is a CallOption for collecting response trailer metadata. +// The metadata field will be populated *after* the RPC completes. +// This is an EXPERIMENTAL API. +type TrailerCallOption struct { + TrailerAddr *metadata.MD +} + +func (o TrailerCallOption) before(c *callInfo) error { return nil } +func (o TrailerCallOption) after(c *callInfo) { + if c.stream != nil { + *o.TrailerAddr = c.stream.Trailer() + } +} + +// Peer returns a CallOption that retrieves peer information for a unary RPC. +// The peer field will be populated *after* the RPC completes. +func Peer(p *peer.Peer) CallOption { + return PeerCallOption{PeerAddr: p} +} + +// PeerCallOption is a CallOption for collecting the identity of the remote +// peer. The peer field will be populated *after* the RPC completes. +// This is an EXPERIMENTAL API. +type PeerCallOption struct { + PeerAddr *peer.Peer +} + +func (o PeerCallOption) before(c *callInfo) error { return nil } +func (o PeerCallOption) after(c *callInfo) { + if c.stream != nil { + if x, ok := peer.FromContext(c.stream.Context()); ok { + *o.PeerAddr = *x + } + } +} + +// FailFast configures the action to take when an RPC is attempted on broken +// connections or unreachable servers. If failFast is true, the RPC will fail +// immediately. Otherwise, the RPC client will block the call until a +// connection is available (or the call is canceled or times out) and will +// retry the call if it fails due to a transient error. gRPC will not retry if +// data was written to the wire unless the server indicates it did not process +// the data. Please refer to +// https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md. +// +// By default, RPCs are "Fail Fast". +func FailFast(failFast bool) CallOption { + return FailFastCallOption{FailFast: failFast} +} + +// FailFastCallOption is a CallOption for indicating whether an RPC should fail +// fast or not. +// This is an EXPERIMENTAL API. +type FailFastCallOption struct { + FailFast bool +} + +func (o FailFastCallOption) before(c *callInfo) error { + c.failFast = o.FailFast + return nil +} +func (o FailFastCallOption) after(c *callInfo) {} + +// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size the client can receive. +func MaxCallRecvMsgSize(s int) CallOption { + return MaxRecvMsgSizeCallOption{MaxRecvMsgSize: s} +} + +// MaxRecvMsgSizeCallOption is a CallOption that indicates the maximum message +// size the client can receive. +// This is an EXPERIMENTAL API. +type MaxRecvMsgSizeCallOption struct { + MaxRecvMsgSize int +} + +func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error { + c.maxReceiveMessageSize = &o.MaxRecvMsgSize + return nil +} +func (o MaxRecvMsgSizeCallOption) after(c *callInfo) {} + +// MaxCallSendMsgSize returns a CallOption which sets the maximum message size the client can send. +func MaxCallSendMsgSize(s int) CallOption { + return MaxSendMsgSizeCallOption{MaxSendMsgSize: s} +} + +// MaxSendMsgSizeCallOption is a CallOption that indicates the maximum message +// size the client can send. +// This is an EXPERIMENTAL API. +type MaxSendMsgSizeCallOption struct { + MaxSendMsgSize int +} + +func (o MaxSendMsgSizeCallOption) before(c *callInfo) error { + c.maxSendMessageSize = &o.MaxSendMsgSize + return nil +} +func (o MaxSendMsgSizeCallOption) after(c *callInfo) {} + +// PerRPCCredentials returns a CallOption that sets credentials.PerRPCCredentials +// for a call. +func PerRPCCredentials(creds credentials.PerRPCCredentials) CallOption { + return PerRPCCredsCallOption{Creds: creds} +} + +// PerRPCCredsCallOption is a CallOption that indicates the per-RPC +// credentials to use for the call. +// This is an EXPERIMENTAL API. +type PerRPCCredsCallOption struct { + Creds credentials.PerRPCCredentials +} + +func (o PerRPCCredsCallOption) before(c *callInfo) error { + c.creds = o.Creds + return nil +} +func (o PerRPCCredsCallOption) after(c *callInfo) {} + +// UseCompressor returns a CallOption which sets the compressor used when +// sending the request. If WithCompressor is also set, UseCompressor has +// higher priority. +// +// This API is EXPERIMENTAL. +func UseCompressor(name string) CallOption { + return CompressorCallOption{CompressorType: name} +} + +// CompressorCallOption is a CallOption that indicates the compressor to use. +// This is an EXPERIMENTAL API. +type CompressorCallOption struct { + CompressorType string +} + +func (o CompressorCallOption) before(c *callInfo) error { + c.compressorType = o.CompressorType + return nil +} +func (o CompressorCallOption) after(c *callInfo) {} + +// CallContentSubtype returns a CallOption that will set the content-subtype +// for a call. For example, if content-subtype is "json", the Content-Type over +// the wire will be "application/grpc+json". The content-subtype is converted +// to lowercase before being included in Content-Type. See Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +// +// If CallCustomCodec is not also used, the content-subtype will be used to +// look up the Codec to use in the registry controlled by RegisterCodec. See +// the documentation on RegisterCodec for details on registration. The lookup +// of content-subtype is case-insensitive. If no such Codec is found, the call +// will result in an error with code codes.Internal. +// +// If CallCustomCodec is also used, that Codec will be used for all request and +// response messages, with the content-subtype set to the given contentSubtype +// here for requests. +func CallContentSubtype(contentSubtype string) CallOption { + return ContentSubtypeCallOption{ContentSubtype: strings.ToLower(contentSubtype)} +} + +// ContentSubtypeCallOption is a CallOption that indicates the content-subtype +// used for marshaling messages. +// This is an EXPERIMENTAL API. +type ContentSubtypeCallOption struct { + ContentSubtype string +} + +func (o ContentSubtypeCallOption) before(c *callInfo) error { + c.contentSubtype = o.ContentSubtype + return nil +} +func (o ContentSubtypeCallOption) after(c *callInfo) {} + +// CallCustomCodec returns a CallOption that will set the given Codec to be +// used for all request and response messages for a call. The result of calling +// String() will be used as the content-subtype in a case-insensitive manner. +// +// See Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. Also see the documentation on RegisterCodec and +// CallContentSubtype for more details on the interaction between Codec and +// content-subtype. +// +// This function is provided for advanced users; prefer to use only +// CallContentSubtype to select a registered codec instead. +func CallCustomCodec(codec Codec) CallOption { + return CustomCodecCallOption{Codec: codec} +} + +// CustomCodecCallOption is a CallOption that indicates the codec used for +// marshaling messages. +// This is an EXPERIMENTAL API. +type CustomCodecCallOption struct { + Codec Codec +} + +func (o CustomCodecCallOption) before(c *callInfo) error { + c.codec = o.Codec + return nil +} +func (o CustomCodecCallOption) after(c *callInfo) {} + +// MaxRetryRPCBufferSize returns a CallOption that limits the amount of memory +// used for buffering this RPC's requests for retry purposes. +// +// This API is EXPERIMENTAL. +func MaxRetryRPCBufferSize(bytes int) CallOption { + return MaxRetryRPCBufferSizeCallOption{bytes} +} + +// MaxRetryRPCBufferSizeCallOption is a CallOption indicating the amount of +// memory to be used for caching this RPC for retry purposes. +// This is an EXPERIMENTAL API. +type MaxRetryRPCBufferSizeCallOption struct { + MaxRetryRPCBufferSize int +} + +func (o MaxRetryRPCBufferSizeCallOption) before(c *callInfo) error { + c.maxRetryRPCBufferSize = o.MaxRetryRPCBufferSize + return nil +} +func (o MaxRetryRPCBufferSizeCallOption) after(c *callInfo) {} + +// The format of the payload: compressed or not? +type payloadFormat uint8 + +const ( + compressionNone payloadFormat = 0 // no compression + compressionMade payloadFormat = 1 // compressed +) + +// parser reads complete gRPC messages from the underlying reader. +type parser struct { + // r is the underlying reader. + // See the comment on recvMsg for the permissible + // error types. + r io.Reader + + // The header of a gRPC message. Find more detail at + // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md + header [5]byte +} + +// recvMsg reads a complete gRPC message from the stream. +// +// It returns the message and its payload (compression/encoding) +// format. The caller owns the returned msg memory. +// +// If there is an error, possible values are: +// * io.EOF, when no messages remain +// * io.ErrUnexpectedEOF +// * of type transport.ConnectionError +// * an error from the status package +// No other error values or types must be returned, which also means +// that the underlying io.Reader must not return an incompatible +// error. +func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byte, err error) { + if _, err := p.r.Read(p.header[:]); err != nil { + return 0, nil, err + } + + pf = payloadFormat(p.header[0]) + length := binary.BigEndian.Uint32(p.header[1:]) + + if length == 0 { + return pf, nil, nil + } + if int64(length) > int64(maxInt) { + return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max length allowed on current machine (%d vs. %d)", length, maxInt) + } + if int(length) > maxReceiveMessageSize { + return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize) + } + // TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead + // of making it for each message: + msg = make([]byte, int(length)) + if _, err := p.r.Read(msg); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return 0, nil, err + } + return pf, msg, nil +} + +// encode serializes msg and returns a buffer containing the message, or an +// error if it is too large to be transmitted by grpc. If msg is nil, it +// generates an empty message. +func encode(c baseCodec, msg interface{}) ([]byte, error) { + if msg == nil { // NOTE: typed nils will not be caught by this check + return nil, nil + } + b, err := c.Marshal(msg) + if err != nil { + return nil, status.Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error()) + } + if uint(len(b)) > math.MaxUint32 { + return nil, status.Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b)) + } + return b, nil +} + +// compress returns the input bytes compressed by compressor or cp. If both +// compressors are nil, returns nil. +// +// TODO(dfawley): eliminate cp parameter by wrapping Compressor in an encoding.Compressor. +func compress(in []byte, cp Compressor, compressor encoding.Compressor) ([]byte, error) { + if compressor == nil && cp == nil { + return nil, nil + } + wrapErr := func(err error) error { + return status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error()) + } + cbuf := &bytes.Buffer{} + if compressor != nil { + z, _ := compressor.Compress(cbuf) + if _, err := z.Write(in); err != nil { + return nil, wrapErr(err) + } + if err := z.Close(); err != nil { + return nil, wrapErr(err) + } + } else { + if err := cp.Do(cbuf, in); err != nil { + return nil, wrapErr(err) + } + } + return cbuf.Bytes(), nil +} + +const ( + payloadLen = 1 + sizeLen = 4 + headerLen = payloadLen + sizeLen +) + +// msgHeader returns a 5-byte header for the message being transmitted and the +// payload, which is compData if non-nil or data otherwise. +func msgHeader(data, compData []byte) (hdr []byte, payload []byte) { + hdr = make([]byte, headerLen) + if compData != nil { + hdr[0] = byte(compressionMade) + data = compData + } else { + hdr[0] = byte(compressionNone) + } + + // Write length of payload into buf + binary.BigEndian.PutUint32(hdr[payloadLen:], uint32(len(data))) + return hdr, data +} + +func outPayload(client bool, msg interface{}, data, payload []byte, t time.Time) *stats.OutPayload { + return &stats.OutPayload{ + Client: client, + Payload: msg, + Data: data, + Length: len(data), + WireLength: len(payload) + headerLen, + SentTime: t, + } +} + +func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool) *status.Status { + switch pf { + case compressionNone: + case compressionMade: + if recvCompress == "" || recvCompress == encoding.Identity { + return status.New(codes.Internal, "grpc: compressed flag set with identity or empty encoding") + } + if !haveCompressor { + return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) + } + default: + return status.Newf(codes.Internal, "grpc: received unexpected payload format %d", pf) + } + return nil +} + +// For the two compressor parameters, both should not be set, but if they are, +// dc takes precedence over compressor. +// TODO(dfawley): wrap the old compressor/decompressor using the new API? +func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, inPayload *stats.InPayload, compressor encoding.Compressor) error { + pf, d, err := p.recvMsg(maxReceiveMessageSize) + if err != nil { + return err + } + if inPayload != nil { + inPayload.WireLength = len(d) + } + + if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil { + return st.Err() + } + + if pf == compressionMade { + // To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor, + // use this decompressor as the default. + if dc != nil { + d, err = dc.Do(bytes.NewReader(d)) + if err != nil { + return status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) + } + } else { + dcReader, err := compressor.Decompress(bytes.NewReader(d)) + if err != nil { + return status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) + } + d, err = ioutil.ReadAll(dcReader) + if err != nil { + return status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) + } + } + } + if len(d) > maxReceiveMessageSize { + // TODO: Revisit the error code. Currently keep it consistent with java + // implementation. + return status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(d), maxReceiveMessageSize) + } + if err := c.Unmarshal(d, m); err != nil { + return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err) + } + if inPayload != nil { + inPayload.RecvTime = time.Now() + inPayload.Payload = m + // TODO truncate large payload. + inPayload.Data = d + inPayload.Length = len(d) + } + return nil +} + +type rpcInfo struct { + failfast bool +} + +type rpcInfoContextKey struct{} + +func newContextWithRPCInfo(ctx context.Context, failfast bool) context.Context { + return context.WithValue(ctx, rpcInfoContextKey{}, &rpcInfo{failfast: failfast}) +} + +func rpcInfoFromContext(ctx context.Context) (s *rpcInfo, ok bool) { + s, ok = ctx.Value(rpcInfoContextKey{}).(*rpcInfo) + return +} + +// Code returns the error code for err if it was produced by the rpc system. +// Otherwise, it returns codes.Unknown. +// +// Deprecated: use status.FromError and Code method instead. +func Code(err error) codes.Code { + if s, ok := status.FromError(err); ok { + return s.Code() + } + return codes.Unknown +} + +// ErrorDesc returns the error description of err if it was produced by the rpc system. +// Otherwise, it returns err.Error() or empty string when err is nil. +// +// Deprecated: use status.FromError and Message method instead. +func ErrorDesc(err error) string { + if s, ok := status.FromError(err); ok { + return s.Message() + } + return err.Error() +} + +// Errorf returns an error containing an error code and a description; +// Errorf returns nil if c is OK. +// +// Deprecated: use status.Errorf instead. +func Errorf(c codes.Code, format string, a ...interface{}) error { + return status.Errorf(c, format, a...) +} + +// setCallInfoCodec should only be called after CallOptions have been applied. +func setCallInfoCodec(c *callInfo) error { + if c.codec != nil { + // codec was already set by a CallOption; use it. + return nil + } + + if c.contentSubtype == "" { + // No codec specified in CallOptions; use proto by default. + c.codec = encoding.GetCodec(proto.Name) + return nil + } + + // c.contentSubtype is already lowercased in CallContentSubtype + c.codec = encoding.GetCodec(c.contentSubtype) + if c.codec == nil { + return status.Errorf(codes.Internal, "no codec registered for content-subtype %s", c.contentSubtype) + } + return nil +} + +// parseDialTarget returns the network and address to pass to dialer +func parseDialTarget(target string) (net string, addr string) { + net = "tcp" + + m1 := strings.Index(target, ":") + m2 := strings.Index(target, ":/") + + // handle unix:addr which will fail with url.Parse + if m1 >= 0 && m2 < 0 { + if n := target[0:m1]; n == "unix" { + net = n + addr = target[m1+1:] + return net, addr + } + } + if m2 >= 0 { + t, err := url.Parse(target) + if err != nil { + return net, target + } + scheme := t.Scheme + addr = t.Path + if scheme == "unix" { + net = scheme + if addr == "" { + addr = t.Host + } + return net, addr + } + } + + return net, target +} + +// channelzData is used to store channelz related data for ClientConn, addrConn and Server. +// These fields cannot be embedded in the original structs (e.g. ClientConn), since to do atomic +// operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment. +// Here, by grouping those int64 fields inside a struct, we are enforcing the alignment. +type channelzData struct { + callsStarted int64 + callsFailed int64 + callsSucceeded int64 + // lastCallStartedTime stores the timestamp that last call starts. It is of int64 type instead of + // time.Time since it's more costly to atomically update time.Time variable than int64 variable. + lastCallStartedTime int64 +} + +// The SupportPackageIsVersion variables are referenced from generated protocol +// buffer files to ensure compatibility with the gRPC version used. The latest +// support package version is 5. +// +// Older versions are kept for compatibility. They may be removed if +// compatibility cannot be maintained. +// +// These constants should not be referenced from any other code. +const ( + SupportPackageIsVersion3 = true + SupportPackageIsVersion4 = true + SupportPackageIsVersion5 = true +) + +const grpcUA = "grpc-go/" + Version diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go new file mode 100644 index 000000000..5c7d5b635 --- /dev/null +++ b/vendor/google.golang.org/grpc/server.go @@ -0,0 +1,1447 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "bytes" + "errors" + "fmt" + "io" + "math" + "net" + "net/http" + "reflect" + "runtime" + "strings" + "sync" + "sync/atomic" + "time" + + "io/ioutil" + + "golang.org/x/net/context" + "golang.org/x/net/trace" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/encoding" + "google.golang.org/grpc/encoding/proto" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" + "google.golang.org/grpc/tap" +) + +const ( + defaultServerMaxReceiveMessageSize = 1024 * 1024 * 4 + defaultServerMaxSendMessageSize = math.MaxInt32 +) + +type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor UnaryServerInterceptor) (interface{}, error) + +// MethodDesc represents an RPC service's method specification. +type MethodDesc struct { + MethodName string + Handler methodHandler +} + +// ServiceDesc represents an RPC service's specification. +type ServiceDesc struct { + ServiceName string + // The pointer to the service interface. Used to check whether the user + // provided implementation satisfies the interface requirements. + HandlerType interface{} + Methods []MethodDesc + Streams []StreamDesc + Metadata interface{} +} + +// service consists of the information of the server serving this service and +// the methods in this service. +type service struct { + server interface{} // the server for service methods + md map[string]*MethodDesc + sd map[string]*StreamDesc + mdata interface{} +} + +// Server is a gRPC server to serve RPC requests. +type Server struct { + opts options + + mu sync.Mutex // guards following + lis map[net.Listener]bool + conns map[io.Closer]bool + serve bool + drain bool + cv *sync.Cond // signaled when connections close for GracefulStop + m map[string]*service // service name -> service info + events trace.EventLog + + quit chan struct{} + done chan struct{} + quitOnce sync.Once + doneOnce sync.Once + channelzRemoveOnce sync.Once + serveWG sync.WaitGroup // counts active Serve goroutines for GracefulStop + + channelzID int64 // channelz unique identification number + czData *channelzData +} + +type options struct { + creds credentials.TransportCredentials + codec baseCodec + cp Compressor + dc Decompressor + unaryInt UnaryServerInterceptor + streamInt StreamServerInterceptor + inTapHandle tap.ServerInHandle + statsHandler stats.Handler + maxConcurrentStreams uint32 + maxReceiveMessageSize int + maxSendMessageSize int + unknownStreamDesc *StreamDesc + keepaliveParams keepalive.ServerParameters + keepalivePolicy keepalive.EnforcementPolicy + initialWindowSize int32 + initialConnWindowSize int32 + writeBufferSize int + readBufferSize int + connectionTimeout time.Duration + maxHeaderListSize *uint32 +} + +var defaultServerOptions = options{ + maxReceiveMessageSize: defaultServerMaxReceiveMessageSize, + maxSendMessageSize: defaultServerMaxSendMessageSize, + connectionTimeout: 120 * time.Second, + writeBufferSize: defaultWriteBufSize, + readBufferSize: defaultReadBufSize, +} + +// A ServerOption sets options such as credentials, codec and keepalive parameters, etc. +type ServerOption func(*options) + +// WriteBufferSize determines how much data can be batched before doing a write on the wire. +// The corresponding memory allocation for this buffer will be twice the size to keep syscalls low. +// The default value for this buffer is 32KB. +// Zero will disable the write buffer such that each write will be on underlying connection. +// Note: A Send call may not directly translate to a write. +func WriteBufferSize(s int) ServerOption { + return func(o *options) { + o.writeBufferSize = s + } +} + +// ReadBufferSize lets you set the size of read buffer, this determines how much data can be read at most +// for one read syscall. +// The default value for this buffer is 32KB. +// Zero will disable read buffer for a connection so data framer can access the underlying +// conn directly. +func ReadBufferSize(s int) ServerOption { + return func(o *options) { + o.readBufferSize = s + } +} + +// InitialWindowSize returns a ServerOption that sets window size for stream. +// The lower bound for window size is 64K and any value smaller than that will be ignored. +func InitialWindowSize(s int32) ServerOption { + return func(o *options) { + o.initialWindowSize = s + } +} + +// InitialConnWindowSize returns a ServerOption that sets window size for a connection. +// The lower bound for window size is 64K and any value smaller than that will be ignored. +func InitialConnWindowSize(s int32) ServerOption { + return func(o *options) { + o.initialConnWindowSize = s + } +} + +// KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server. +func KeepaliveParams(kp keepalive.ServerParameters) ServerOption { + return func(o *options) { + o.keepaliveParams = kp + } +} + +// KeepaliveEnforcementPolicy returns a ServerOption that sets keepalive enforcement policy for the server. +func KeepaliveEnforcementPolicy(kep keepalive.EnforcementPolicy) ServerOption { + return func(o *options) { + o.keepalivePolicy = kep + } +} + +// CustomCodec returns a ServerOption that sets a codec for message marshaling and unmarshaling. +// +// This will override any lookups by content-subtype for Codecs registered with RegisterCodec. +func CustomCodec(codec Codec) ServerOption { + return func(o *options) { + o.codec = codec + } +} + +// RPCCompressor returns a ServerOption that sets a compressor for outbound +// messages. For backward compatibility, all outbound messages will be sent +// using this compressor, regardless of incoming message compression. By +// default, server messages will be sent using the same compressor with which +// request messages were sent. +// +// Deprecated: use encoding.RegisterCompressor instead. +func RPCCompressor(cp Compressor) ServerOption { + return func(o *options) { + o.cp = cp + } +} + +// RPCDecompressor returns a ServerOption that sets a decompressor for inbound +// messages. It has higher priority than decompressors registered via +// encoding.RegisterCompressor. +// +// Deprecated: use encoding.RegisterCompressor instead. +func RPCDecompressor(dc Decompressor) ServerOption { + return func(o *options) { + o.dc = dc + } +} + +// MaxMsgSize returns a ServerOption to set the max message size in bytes the server can receive. +// If this is not set, gRPC uses the default limit. +// +// Deprecated: use MaxRecvMsgSize instead. +func MaxMsgSize(m int) ServerOption { + return MaxRecvMsgSize(m) +} + +// MaxRecvMsgSize returns a ServerOption to set the max message size in bytes the server can receive. +// If this is not set, gRPC uses the default 4MB. +func MaxRecvMsgSize(m int) ServerOption { + return func(o *options) { + o.maxReceiveMessageSize = m + } +} + +// MaxSendMsgSize returns a ServerOption to set the max message size in bytes the server can send. +// If this is not set, gRPC uses the default 4MB. +func MaxSendMsgSize(m int) ServerOption { + return func(o *options) { + o.maxSendMessageSize = m + } +} + +// MaxConcurrentStreams returns a ServerOption that will apply a limit on the number +// of concurrent streams to each ServerTransport. +func MaxConcurrentStreams(n uint32) ServerOption { + return func(o *options) { + o.maxConcurrentStreams = n + } +} + +// Creds returns a ServerOption that sets credentials for server connections. +func Creds(c credentials.TransportCredentials) ServerOption { + return func(o *options) { + o.creds = c + } +} + +// UnaryInterceptor returns a ServerOption that sets the UnaryServerInterceptor for the +// server. Only one unary interceptor can be installed. The construction of multiple +// interceptors (e.g., chaining) can be implemented at the caller. +func UnaryInterceptor(i UnaryServerInterceptor) ServerOption { + return func(o *options) { + if o.unaryInt != nil { + panic("The unary server interceptor was already set and may not be reset.") + } + o.unaryInt = i + } +} + +// StreamInterceptor returns a ServerOption that sets the StreamServerInterceptor for the +// server. Only one stream interceptor can be installed. +func StreamInterceptor(i StreamServerInterceptor) ServerOption { + return func(o *options) { + if o.streamInt != nil { + panic("The stream server interceptor was already set and may not be reset.") + } + o.streamInt = i + } +} + +// InTapHandle returns a ServerOption that sets the tap handle for all the server +// transport to be created. Only one can be installed. +func InTapHandle(h tap.ServerInHandle) ServerOption { + return func(o *options) { + if o.inTapHandle != nil { + panic("The tap handle was already set and may not be reset.") + } + o.inTapHandle = h + } +} + +// StatsHandler returns a ServerOption that sets the stats handler for the server. +func StatsHandler(h stats.Handler) ServerOption { + return func(o *options) { + o.statsHandler = h + } +} + +// UnknownServiceHandler returns a ServerOption that allows for adding a custom +// unknown service handler. The provided method is a bidi-streaming RPC service +// handler that will be invoked instead of returning the "unimplemented" gRPC +// error whenever a request is received for an unregistered service or method. +// The handling function has full access to the Context of the request and the +// stream, and the invocation bypasses interceptors. +func UnknownServiceHandler(streamHandler StreamHandler) ServerOption { + return func(o *options) { + o.unknownStreamDesc = &StreamDesc{ + StreamName: "unknown_service_handler", + Handler: streamHandler, + // We need to assume that the users of the streamHandler will want to use both. + ClientStreams: true, + ServerStreams: true, + } + } +} + +// ConnectionTimeout returns a ServerOption that sets the timeout for +// connection establishment (up to and including HTTP/2 handshaking) for all +// new connections. If this is not set, the default is 120 seconds. A zero or +// negative value will result in an immediate timeout. +// +// This API is EXPERIMENTAL. +func ConnectionTimeout(d time.Duration) ServerOption { + return func(o *options) { + o.connectionTimeout = d + } +} + +// MaxHeaderListSize returns a ServerOption that sets the max (uncompressed) size +// of header list that the server is prepared to accept. +func MaxHeaderListSize(s uint32) ServerOption { + return func(o *options) { + o.maxHeaderListSize = &s + } +} + +// NewServer creates a gRPC server which has no service registered and has not +// started to accept requests yet. +func NewServer(opt ...ServerOption) *Server { + opts := defaultServerOptions + for _, o := range opt { + o(&opts) + } + s := &Server{ + lis: make(map[net.Listener]bool), + opts: opts, + conns: make(map[io.Closer]bool), + m: make(map[string]*service), + quit: make(chan struct{}), + done: make(chan struct{}), + czData: new(channelzData), + } + s.cv = sync.NewCond(&s.mu) + if EnableTracing { + _, file, line, _ := runtime.Caller(1) + s.events = trace.NewEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line)) + } + + if channelz.IsOn() { + s.channelzID = channelz.RegisterServer(&channelzServer{s}, "") + } + return s +} + +// printf records an event in s's event log, unless s has been stopped. +// REQUIRES s.mu is held. +func (s *Server) printf(format string, a ...interface{}) { + if s.events != nil { + s.events.Printf(format, a...) + } +} + +// errorf records an error in s's event log, unless s has been stopped. +// REQUIRES s.mu is held. +func (s *Server) errorf(format string, a ...interface{}) { + if s.events != nil { + s.events.Errorf(format, a...) + } +} + +// RegisterService registers a service and its implementation to the gRPC +// server. It is called from the IDL generated code. This must be called before +// invoking Serve. +func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) { + ht := reflect.TypeOf(sd.HandlerType).Elem() + st := reflect.TypeOf(ss) + if !st.Implements(ht) { + grpclog.Fatalf("grpc: Server.RegisterService found the handler of type %v that does not satisfy %v", st, ht) + } + s.register(sd, ss) +} + +func (s *Server) register(sd *ServiceDesc, ss interface{}) { + s.mu.Lock() + defer s.mu.Unlock() + s.printf("RegisterService(%q)", sd.ServiceName) + if s.serve { + grpclog.Fatalf("grpc: Server.RegisterService after Server.Serve for %q", sd.ServiceName) + } + if _, ok := s.m[sd.ServiceName]; ok { + grpclog.Fatalf("grpc: Server.RegisterService found duplicate service registration for %q", sd.ServiceName) + } + srv := &service{ + server: ss, + md: make(map[string]*MethodDesc), + sd: make(map[string]*StreamDesc), + mdata: sd.Metadata, + } + for i := range sd.Methods { + d := &sd.Methods[i] + srv.md[d.MethodName] = d + } + for i := range sd.Streams { + d := &sd.Streams[i] + srv.sd[d.StreamName] = d + } + s.m[sd.ServiceName] = srv +} + +// MethodInfo contains the information of an RPC including its method name and type. +type MethodInfo struct { + // Name is the method name only, without the service name or package name. + Name string + // IsClientStream indicates whether the RPC is a client streaming RPC. + IsClientStream bool + // IsServerStream indicates whether the RPC is a server streaming RPC. + IsServerStream bool +} + +// ServiceInfo contains unary RPC method info, streaming RPC method info and metadata for a service. +type ServiceInfo struct { + Methods []MethodInfo + // Metadata is the metadata specified in ServiceDesc when registering service. + Metadata interface{} +} + +// GetServiceInfo returns a map from service names to ServiceInfo. +// Service names include the package names, in the form of .. +func (s *Server) GetServiceInfo() map[string]ServiceInfo { + ret := make(map[string]ServiceInfo) + for n, srv := range s.m { + methods := make([]MethodInfo, 0, len(srv.md)+len(srv.sd)) + for m := range srv.md { + methods = append(methods, MethodInfo{ + Name: m, + IsClientStream: false, + IsServerStream: false, + }) + } + for m, d := range srv.sd { + methods = append(methods, MethodInfo{ + Name: m, + IsClientStream: d.ClientStreams, + IsServerStream: d.ServerStreams, + }) + } + + ret[n] = ServiceInfo{ + Methods: methods, + Metadata: srv.mdata, + } + } + return ret +} + +// ErrServerStopped indicates that the operation is now illegal because of +// the server being stopped. +var ErrServerStopped = errors.New("grpc: the server has been stopped") + +func (s *Server) useTransportAuthenticator(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { + if s.opts.creds == nil { + return rawConn, nil, nil + } + return s.opts.creds.ServerHandshake(rawConn) +} + +type listenSocket struct { + net.Listener + channelzID int64 +} + +func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric { + return &channelz.SocketInternalMetric{ + SocketOptions: channelz.GetSocketOption(l.Listener), + LocalAddr: l.Listener.Addr(), + } +} + +func (l *listenSocket) Close() error { + err := l.Listener.Close() + if channelz.IsOn() { + channelz.RemoveEntry(l.channelzID) + } + return err +} + +// Serve accepts incoming connections on the listener lis, creating a new +// ServerTransport and service goroutine for each. The service goroutines +// read gRPC requests and then call the registered handlers to reply to them. +// Serve returns when lis.Accept fails with fatal errors. lis will be closed when +// this method returns. +// Serve will return a non-nil error unless Stop or GracefulStop is called. +func (s *Server) Serve(lis net.Listener) error { + s.mu.Lock() + s.printf("serving") + s.serve = true + if s.lis == nil { + // Serve called after Stop or GracefulStop. + s.mu.Unlock() + lis.Close() + return ErrServerStopped + } + + s.serveWG.Add(1) + defer func() { + s.serveWG.Done() + select { + // Stop or GracefulStop called; block until done and return nil. + case <-s.quit: + <-s.done + default: + } + }() + + ls := &listenSocket{Listener: lis} + s.lis[ls] = true + + if channelz.IsOn() { + ls.channelzID = channelz.RegisterListenSocket(ls, s.channelzID, "") + } + s.mu.Unlock() + + defer func() { + s.mu.Lock() + if s.lis != nil && s.lis[ls] { + ls.Close() + delete(s.lis, ls) + } + s.mu.Unlock() + }() + + var tempDelay time.Duration // how long to sleep on accept failure + + for { + rawConn, err := lis.Accept() + if err != nil { + if ne, ok := err.(interface { + Temporary() bool + }); ok && ne.Temporary() { + if tempDelay == 0 { + tempDelay = 5 * time.Millisecond + } else { + tempDelay *= 2 + } + if max := 1 * time.Second; tempDelay > max { + tempDelay = max + } + s.mu.Lock() + s.printf("Accept error: %v; retrying in %v", err, tempDelay) + s.mu.Unlock() + timer := time.NewTimer(tempDelay) + select { + case <-timer.C: + case <-s.quit: + timer.Stop() + return nil + } + continue + } + s.mu.Lock() + s.printf("done serving; Accept = %v", err) + s.mu.Unlock() + + select { + case <-s.quit: + return nil + default: + } + return err + } + tempDelay = 0 + // Start a new goroutine to deal with rawConn so we don't stall this Accept + // loop goroutine. + // + // Make sure we account for the goroutine so GracefulStop doesn't nil out + // s.conns before this conn can be added. + s.serveWG.Add(1) + go func() { + s.handleRawConn(rawConn) + s.serveWG.Done() + }() + } +} + +// handleRawConn forks a goroutine to handle a just-accepted connection that +// has not had any I/O performed on it yet. +func (s *Server) handleRawConn(rawConn net.Conn) { + rawConn.SetDeadline(time.Now().Add(s.opts.connectionTimeout)) + conn, authInfo, err := s.useTransportAuthenticator(rawConn) + if err != nil { + s.mu.Lock() + s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) + s.mu.Unlock() + grpclog.Warningf("grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err) + // If serverHandshake returns ErrConnDispatched, keep rawConn open. + if err != credentials.ErrConnDispatched { + rawConn.Close() + } + rawConn.SetDeadline(time.Time{}) + return + } + + s.mu.Lock() + if s.conns == nil { + s.mu.Unlock() + conn.Close() + return + } + s.mu.Unlock() + + // Finish handshaking (HTTP2) + st := s.newHTTP2Transport(conn, authInfo) + if st == nil { + return + } + + rawConn.SetDeadline(time.Time{}) + if !s.addConn(st) { + return + } + go func() { + s.serveStreams(st) + s.removeConn(st) + }() +} + +// newHTTP2Transport sets up a http/2 transport (using the +// gRPC http2 server transport in transport/http2_server.go). +func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) transport.ServerTransport { + config := &transport.ServerConfig{ + MaxStreams: s.opts.maxConcurrentStreams, + AuthInfo: authInfo, + InTapHandle: s.opts.inTapHandle, + StatsHandler: s.opts.statsHandler, + KeepaliveParams: s.opts.keepaliveParams, + KeepalivePolicy: s.opts.keepalivePolicy, + InitialWindowSize: s.opts.initialWindowSize, + InitialConnWindowSize: s.opts.initialConnWindowSize, + WriteBufferSize: s.opts.writeBufferSize, + ReadBufferSize: s.opts.readBufferSize, + ChannelzParentID: s.channelzID, + MaxHeaderListSize: s.opts.maxHeaderListSize, + } + st, err := transport.NewServerTransport("http2", c, config) + if err != nil { + s.mu.Lock() + s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err) + s.mu.Unlock() + c.Close() + grpclog.Warningln("grpc: Server.Serve failed to create ServerTransport: ", err) + return nil + } + + return st +} + +func (s *Server) serveStreams(st transport.ServerTransport) { + defer st.Close() + var wg sync.WaitGroup + st.HandleStreams(func(stream *transport.Stream) { + wg.Add(1) + go func() { + defer wg.Done() + s.handleStream(st, stream, s.traceInfo(st, stream)) + }() + }, func(ctx context.Context, method string) context.Context { + if !EnableTracing { + return ctx + } + tr := trace.New("grpc.Recv."+methodFamily(method), method) + return trace.NewContext(ctx, tr) + }) + wg.Wait() +} + +var _ http.Handler = (*Server)(nil) + +// ServeHTTP implements the Go standard library's http.Handler +// interface by responding to the gRPC request r, by looking up +// the requested gRPC method in the gRPC server s. +// +// The provided HTTP request must have arrived on an HTTP/2 +// connection. When using the Go standard library's server, +// practically this means that the Request must also have arrived +// over TLS. +// +// To share one port (such as 443 for https) between gRPC and an +// existing http.Handler, use a root http.Handler such as: +// +// if r.ProtoMajor == 2 && strings.HasPrefix( +// r.Header.Get("Content-Type"), "application/grpc") { +// grpcServer.ServeHTTP(w, r) +// } else { +// yourMux.ServeHTTP(w, r) +// } +// +// Note that ServeHTTP uses Go's HTTP/2 server implementation which is totally +// separate from grpc-go's HTTP/2 server. Performance and features may vary +// between the two paths. ServeHTTP does not support some gRPC features +// available through grpc-go's HTTP/2 server, and it is currently EXPERIMENTAL +// and subject to change. +func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { + st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandler) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + if !s.addConn(st) { + return + } + defer s.removeConn(st) + s.serveStreams(st) +} + +// traceInfo returns a traceInfo and associates it with stream, if tracing is enabled. +// If tracing is not enabled, it returns nil. +func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Stream) (trInfo *traceInfo) { + tr, ok := trace.FromContext(stream.Context()) + if !ok { + return nil + } + + trInfo = &traceInfo{ + tr: tr, + } + trInfo.firstLine.client = false + trInfo.firstLine.remoteAddr = st.RemoteAddr() + + if dl, ok := stream.Context().Deadline(); ok { + trInfo.firstLine.deadline = dl.Sub(time.Now()) + } + return trInfo +} + +func (s *Server) addConn(c io.Closer) bool { + s.mu.Lock() + defer s.mu.Unlock() + if s.conns == nil { + c.Close() + return false + } + if s.drain { + // Transport added after we drained our existing conns: drain it + // immediately. + c.(transport.ServerTransport).Drain() + } + s.conns[c] = true + return true +} + +func (s *Server) removeConn(c io.Closer) { + s.mu.Lock() + defer s.mu.Unlock() + if s.conns != nil { + delete(s.conns, c) + s.cv.Broadcast() + } +} + +func (s *Server) channelzMetric() *channelz.ServerInternalMetric { + return &channelz.ServerInternalMetric{ + CallsStarted: atomic.LoadInt64(&s.czData.callsStarted), + CallsSucceeded: atomic.LoadInt64(&s.czData.callsSucceeded), + CallsFailed: atomic.LoadInt64(&s.czData.callsFailed), + LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&s.czData.lastCallStartedTime)), + } +} + +func (s *Server) incrCallsStarted() { + atomic.AddInt64(&s.czData.callsStarted, 1) + atomic.StoreInt64(&s.czData.lastCallStartedTime, time.Now().UnixNano()) +} + +func (s *Server) incrCallsSucceeded() { + atomic.AddInt64(&s.czData.callsSucceeded, 1) +} + +func (s *Server) incrCallsFailed() { + atomic.AddInt64(&s.czData.callsFailed, 1) +} + +func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { + data, err := encode(s.getCodec(stream.ContentSubtype()), msg) + if err != nil { + grpclog.Errorln("grpc: server failed to encode response: ", err) + return err + } + compData, err := compress(data, cp, comp) + if err != nil { + grpclog.Errorln("grpc: server failed to compress response: ", err) + return err + } + hdr, payload := msgHeader(data, compData) + // TODO(dfawley): should we be checking len(data) instead? + if len(payload) > s.opts.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(payload), s.opts.maxSendMessageSize) + } + err = t.Write(stream, hdr, payload, opts) + if err == nil && s.opts.statsHandler != nil { + s.opts.statsHandler.HandleRPC(stream.Context(), outPayload(false, msg, data, payload, time.Now())) + } + return err +} + +func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, md *MethodDesc, trInfo *traceInfo) (err error) { + if channelz.IsOn() { + s.incrCallsStarted() + defer func() { + if err != nil && err != io.EOF { + s.incrCallsFailed() + } else { + s.incrCallsSucceeded() + } + }() + } + sh := s.opts.statsHandler + if sh != nil { + beginTime := time.Now() + begin := &stats.Begin{ + BeginTime: beginTime, + } + sh.HandleRPC(stream.Context(), begin) + defer func() { + end := &stats.End{ + BeginTime: beginTime, + EndTime: time.Now(), + } + if err != nil && err != io.EOF { + end.Error = toRPCErr(err) + } + sh.HandleRPC(stream.Context(), end) + }() + } + if trInfo != nil { + defer trInfo.tr.Finish() + trInfo.firstLine.client = false + trInfo.tr.LazyLog(&trInfo.firstLine, false) + defer func() { + if err != nil && err != io.EOF { + trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.SetError() + } + }() + } + + // comp and cp are used for compression. decomp and dc are used for + // decompression. If comp and decomp are both set, they are the same; + // however they are kept separate to ensure that at most one of the + // compressor/decompressor variable pairs are set for use later. + var comp, decomp encoding.Compressor + var cp Compressor + var dc Decompressor + + // If dc is set and matches the stream's compression, use it. Otherwise, try + // to find a matching registered compressor for decomp. + if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc { + dc = s.opts.dc + } else if rc != "" && rc != encoding.Identity { + decomp = encoding.GetCompressor(rc) + if decomp == nil { + st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc) + t.WriteStatus(stream, st) + return st.Err() + } + } + + // If cp is set, use it. Otherwise, attempt to compress the response using + // the incoming message compression method. + // + // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. + if s.opts.cp != nil { + cp = s.opts.cp + stream.SetSendCompress(cp.Type()) + } else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity { + // Legacy compressor not specified; attempt to respond with same encoding. + comp = encoding.GetCompressor(rc) + if comp != nil { + stream.SetSendCompress(rc) + } + } + + p := &parser{r: stream} + pf, req, err := p.recvMsg(s.opts.maxReceiveMessageSize) + if err == io.EOF { + // The entire stream is done (for unary RPC only). + return err + } + if err == io.ErrUnexpectedEOF { + err = status.Errorf(codes.Internal, io.ErrUnexpectedEOF.Error()) + } + if err != nil { + if st, ok := status.FromError(err); ok { + if e := t.WriteStatus(stream, st); e != nil { + grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e) + } + } else { + switch st := err.(type) { + case transport.ConnectionError: + // Nothing to do here. + default: + panic(fmt.Sprintf("grpc: Unexpected error (%T) from recvMsg: %v", st, st)) + } + } + return err + } + if channelz.IsOn() { + t.IncrMsgRecv() + } + if st := checkRecvPayload(pf, stream.RecvCompress(), dc != nil || decomp != nil); st != nil { + if e := t.WriteStatus(stream, st); e != nil { + grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e) + } + return st.Err() + } + var inPayload *stats.InPayload + if sh != nil { + inPayload = &stats.InPayload{ + RecvTime: time.Now(), + } + } + df := func(v interface{}) error { + if inPayload != nil { + inPayload.WireLength = len(req) + } + if pf == compressionMade { + var err error + if dc != nil { + req, err = dc.Do(bytes.NewReader(req)) + if err != nil { + return status.Errorf(codes.Internal, err.Error()) + } + } else { + tmp, _ := decomp.Decompress(bytes.NewReader(req)) + req, err = ioutil.ReadAll(tmp) + if err != nil { + return status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) + } + } + } + if len(req) > s.opts.maxReceiveMessageSize { + // TODO: Revisit the error code. Currently keep it consistent with + // java implementation. + return status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(req), s.opts.maxReceiveMessageSize) + } + if err := s.getCodec(stream.ContentSubtype()).Unmarshal(req, v); err != nil { + return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) + } + if inPayload != nil { + inPayload.Payload = v + inPayload.Data = req + inPayload.Length = len(req) + sh.HandleRPC(stream.Context(), inPayload) + } + if trInfo != nil { + trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true) + } + return nil + } + ctx := NewContextWithServerTransportStream(stream.Context(), stream) + reply, appErr := md.Handler(srv.server, ctx, df, s.opts.unaryInt) + if appErr != nil { + appStatus, ok := status.FromError(appErr) + if !ok { + // Convert appErr if it is not a grpc status error. + appErr = status.Error(codes.Unknown, appErr.Error()) + appStatus, _ = status.FromError(appErr) + } + if trInfo != nil { + trInfo.tr.LazyLog(stringer(appStatus.Message()), true) + trInfo.tr.SetError() + } + if e := t.WriteStatus(stream, appStatus); e != nil { + grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status: %v", e) + } + return appErr + } + if trInfo != nil { + trInfo.tr.LazyLog(stringer("OK"), false) + } + opts := &transport.Options{Last: true} + + if err := s.sendResponse(t, stream, reply, cp, opts, comp); err != nil { + if err == io.EOF { + // The entire stream is done (for unary RPC only). + return err + } + if s, ok := status.FromError(err); ok { + if e := t.WriteStatus(stream, s); e != nil { + grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status: %v", e) + } + } else { + switch st := err.(type) { + case transport.ConnectionError: + // Nothing to do here. + default: + panic(fmt.Sprintf("grpc: Unexpected error (%T) from sendResponse: %v", st, st)) + } + } + return err + } + if channelz.IsOn() { + t.IncrMsgSent() + } + if trInfo != nil { + trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true) + } + // TODO: Should we be logging if writing status failed here, like above? + // Should the logging be in WriteStatus? Should we ignore the WriteStatus + // error or allow the stats handler to see it? + return t.WriteStatus(stream, status.New(codes.OK, "")) +} + +func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc, trInfo *traceInfo) (err error) { + if channelz.IsOn() { + s.incrCallsStarted() + defer func() { + if err != nil && err != io.EOF { + s.incrCallsFailed() + } else { + s.incrCallsSucceeded() + } + }() + } + sh := s.opts.statsHandler + if sh != nil { + beginTime := time.Now() + begin := &stats.Begin{ + BeginTime: beginTime, + } + sh.HandleRPC(stream.Context(), begin) + defer func() { + end := &stats.End{ + BeginTime: beginTime, + EndTime: time.Now(), + } + if err != nil && err != io.EOF { + end.Error = toRPCErr(err) + } + sh.HandleRPC(stream.Context(), end) + }() + } + ctx := NewContextWithServerTransportStream(stream.Context(), stream) + ss := &serverStream{ + ctx: ctx, + t: t, + s: stream, + p: &parser{r: stream}, + codec: s.getCodec(stream.ContentSubtype()), + maxReceiveMessageSize: s.opts.maxReceiveMessageSize, + maxSendMessageSize: s.opts.maxSendMessageSize, + trInfo: trInfo, + statsHandler: sh, + } + + // If dc is set and matches the stream's compression, use it. Otherwise, try + // to find a matching registered compressor for decomp. + if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc { + ss.dc = s.opts.dc + } else if rc != "" && rc != encoding.Identity { + ss.decomp = encoding.GetCompressor(rc) + if ss.decomp == nil { + st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc) + t.WriteStatus(ss.s, st) + return st.Err() + } + } + + // If cp is set, use it. Otherwise, attempt to compress the response using + // the incoming message compression method. + // + // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. + if s.opts.cp != nil { + ss.cp = s.opts.cp + stream.SetSendCompress(s.opts.cp.Type()) + } else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity { + // Legacy compressor not specified; attempt to respond with same encoding. + ss.comp = encoding.GetCompressor(rc) + if ss.comp != nil { + stream.SetSendCompress(rc) + } + } + + if trInfo != nil { + trInfo.tr.LazyLog(&trInfo.firstLine, false) + defer func() { + ss.mu.Lock() + if err != nil && err != io.EOF { + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.SetError() + } + ss.trInfo.tr.Finish() + ss.trInfo.tr = nil + ss.mu.Unlock() + }() + } + var appErr error + var server interface{} + if srv != nil { + server = srv.server + } + if s.opts.streamInt == nil { + appErr = sd.Handler(server, ss) + } else { + info := &StreamServerInfo{ + FullMethod: stream.Method(), + IsClientStream: sd.ClientStreams, + IsServerStream: sd.ServerStreams, + } + appErr = s.opts.streamInt(server, ss, info, sd.Handler) + } + if appErr != nil { + appStatus, ok := status.FromError(appErr) + if !ok { + appStatus = status.New(codes.Unknown, appErr.Error()) + appErr = appStatus.Err() + } + if trInfo != nil { + ss.mu.Lock() + ss.trInfo.tr.LazyLog(stringer(appStatus.Message()), true) + ss.trInfo.tr.SetError() + ss.mu.Unlock() + } + t.WriteStatus(ss.s, appStatus) + // TODO: Should we log an error from WriteStatus here and below? + return appErr + } + if trInfo != nil { + ss.mu.Lock() + ss.trInfo.tr.LazyLog(stringer("OK"), false) + ss.mu.Unlock() + } + return t.WriteStatus(ss.s, status.New(codes.OK, "")) +} + +func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) { + sm := stream.Method() + if sm != "" && sm[0] == '/' { + sm = sm[1:] + } + pos := strings.LastIndex(sm, "/") + if pos == -1 { + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"Malformed method name %q", []interface{}{sm}}, true) + trInfo.tr.SetError() + } + errDesc := fmt.Sprintf("malformed method name: %q", stream.Method()) + if err := t.WriteStatus(stream, status.New(codes.ResourceExhausted, errDesc)); err != nil { + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.SetError() + } + grpclog.Warningf("grpc: Server.handleStream failed to write status: %v", err) + } + if trInfo != nil { + trInfo.tr.Finish() + } + return + } + service := sm[:pos] + method := sm[pos+1:] + srv, ok := s.m[service] + if !ok { + if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil { + s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo) + return + } + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"Unknown service %v", []interface{}{service}}, true) + trInfo.tr.SetError() + } + errDesc := fmt.Sprintf("unknown service %v", service) + if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.SetError() + } + grpclog.Warningf("grpc: Server.handleStream failed to write status: %v", err) + } + if trInfo != nil { + trInfo.tr.Finish() + } + return + } + // Unary RPC or Streaming RPC? + if md, ok := srv.md[method]; ok { + s.processUnaryRPC(t, stream, srv, md, trInfo) + return + } + if sd, ok := srv.sd[method]; ok { + s.processStreamingRPC(t, stream, srv, sd, trInfo) + return + } + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"Unknown method %v", []interface{}{method}}, true) + trInfo.tr.SetError() + } + if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil { + s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo) + return + } + errDesc := fmt.Sprintf("unknown method %v", method) + if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.SetError() + } + grpclog.Warningf("grpc: Server.handleStream failed to write status: %v", err) + } + if trInfo != nil { + trInfo.tr.Finish() + } +} + +// The key to save ServerTransportStream in the context. +type streamKey struct{} + +// NewContextWithServerTransportStream creates a new context from ctx and +// attaches stream to it. +// +// This API is EXPERIMENTAL. +func NewContextWithServerTransportStream(ctx context.Context, stream ServerTransportStream) context.Context { + return context.WithValue(ctx, streamKey{}, stream) +} + +// ServerTransportStream is a minimal interface that a transport stream must +// implement. This can be used to mock an actual transport stream for tests of +// handler code that use, for example, grpc.SetHeader (which requires some +// stream to be in context). +// +// See also NewContextWithServerTransportStream. +// +// This API is EXPERIMENTAL. +type ServerTransportStream interface { + Method() string + SetHeader(md metadata.MD) error + SendHeader(md metadata.MD) error + SetTrailer(md metadata.MD) error +} + +// ServerTransportStreamFromContext returns the ServerTransportStream saved in +// ctx. Returns nil if the given context has no stream associated with it +// (which implies it is not an RPC invocation context). +// +// This API is EXPERIMENTAL. +func ServerTransportStreamFromContext(ctx context.Context) ServerTransportStream { + s, _ := ctx.Value(streamKey{}).(ServerTransportStream) + return s +} + +// Stop stops the gRPC server. It immediately closes all open +// connections and listeners. +// It cancels all active RPCs on the server side and the corresponding +// pending RPCs on the client side will get notified by connection +// errors. +func (s *Server) Stop() { + s.quitOnce.Do(func() { + close(s.quit) + }) + + defer func() { + s.serveWG.Wait() + s.doneOnce.Do(func() { + close(s.done) + }) + }() + + s.channelzRemoveOnce.Do(func() { + if channelz.IsOn() { + channelz.RemoveEntry(s.channelzID) + } + }) + + s.mu.Lock() + listeners := s.lis + s.lis = nil + st := s.conns + s.conns = nil + // interrupt GracefulStop if Stop and GracefulStop are called concurrently. + s.cv.Broadcast() + s.mu.Unlock() + + for lis := range listeners { + lis.Close() + } + for c := range st { + c.Close() + } + + s.mu.Lock() + if s.events != nil { + s.events.Finish() + s.events = nil + } + s.mu.Unlock() +} + +// GracefulStop stops the gRPC server gracefully. It stops the server from +// accepting new connections and RPCs and blocks until all the pending RPCs are +// finished. +func (s *Server) GracefulStop() { + s.quitOnce.Do(func() { + close(s.quit) + }) + + defer func() { + s.doneOnce.Do(func() { + close(s.done) + }) + }() + + s.channelzRemoveOnce.Do(func() { + if channelz.IsOn() { + channelz.RemoveEntry(s.channelzID) + } + }) + s.mu.Lock() + if s.conns == nil { + s.mu.Unlock() + return + } + + for lis := range s.lis { + lis.Close() + } + s.lis = nil + if !s.drain { + for c := range s.conns { + c.(transport.ServerTransport).Drain() + } + s.drain = true + } + + // Wait for serving threads to be ready to exit. Only then can we be sure no + // new conns will be created. + s.mu.Unlock() + s.serveWG.Wait() + s.mu.Lock() + + for len(s.conns) != 0 { + s.cv.Wait() + } + s.conns = nil + if s.events != nil { + s.events.Finish() + s.events = nil + } + s.mu.Unlock() +} + +// contentSubtype must be lowercase +// cannot return nil +func (s *Server) getCodec(contentSubtype string) baseCodec { + if s.opts.codec != nil { + return s.opts.codec + } + if contentSubtype == "" { + return encoding.GetCodec(proto.Name) + } + codec := encoding.GetCodec(contentSubtype) + if codec == nil { + return encoding.GetCodec(proto.Name) + } + return codec +} + +// SetHeader sets the header metadata. +// When called multiple times, all the provided metadata will be merged. +// All the metadata will be sent out when one of the following happens: +// - grpc.SendHeader() is called; +// - The first response is sent out; +// - An RPC status is sent out (error or success). +func SetHeader(ctx context.Context, md metadata.MD) error { + if md.Len() == 0 { + return nil + } + stream := ServerTransportStreamFromContext(ctx) + if stream == nil { + return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) + } + return stream.SetHeader(md) +} + +// SendHeader sends header metadata. It may be called at most once. +// The provided md and headers set by SetHeader() will be sent. +func SendHeader(ctx context.Context, md metadata.MD) error { + stream := ServerTransportStreamFromContext(ctx) + if stream == nil { + return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) + } + if err := stream.SendHeader(md); err != nil { + return toRPCErr(err) + } + return nil +} + +// SetTrailer sets the trailer metadata that will be sent when an RPC returns. +// When called more than once, all the provided metadata will be merged. +func SetTrailer(ctx context.Context, md metadata.MD) error { + if md.Len() == 0 { + return nil + } + stream := ServerTransportStreamFromContext(ctx) + if stream == nil { + return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) + } + return stream.SetTrailer(md) +} + +// Method returns the method string for the server context. The returned +// string is in the format of "/service/method". +func Method(ctx context.Context) (string, bool) { + s := ServerTransportStreamFromContext(ctx) + if s == nil { + return "", false + } + return s.Method(), true +} + +type channelzServer struct { + s *Server +} + +func (c *channelzServer) ChannelzMetric() *channelz.ServerInternalMetric { + return c.s.channelzMetric() +} diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go new file mode 100644 index 000000000..e0d735265 --- /dev/null +++ b/vendor/google.golang.org/grpc/service_config.go @@ -0,0 +1,358 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" + "time" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" +) + +const maxInt = int(^uint(0) >> 1) + +// MethodConfig defines the configuration recommended by the service providers for a +// particular method. +// +// Deprecated: Users should not use this struct. Service config should be received +// through name resolver, as specified here +// https://github.com/grpc/grpc/blob/master/doc/service_config.md +type MethodConfig struct { + // WaitForReady indicates whether RPCs sent to this method should wait until + // the connection is ready by default (!failfast). The value specified via the + // gRPC client API will override the value set here. + WaitForReady *bool + // Timeout is the default timeout for RPCs sent to this method. The actual + // deadline used will be the minimum of the value specified here and the value + // set by the application via the gRPC client API. If either one is not set, + // then the other will be used. If neither is set, then the RPC has no deadline. + Timeout *time.Duration + // MaxReqSize is the maximum allowed payload size for an individual request in a + // stream (client->server) in bytes. The size which is measured is the serialized + // payload after per-message compression (but before stream compression) in bytes. + // The actual value used is the minimum of the value specified here and the value set + // by the application via the gRPC client API. If either one is not set, then the other + // will be used. If neither is set, then the built-in default is used. + MaxReqSize *int + // MaxRespSize is the maximum allowed payload size for an individual response in a + // stream (server->client) in bytes. + MaxRespSize *int + // RetryPolicy configures retry options for the method. + retryPolicy *retryPolicy +} + +// ServiceConfig is provided by the service provider and contains parameters for how +// clients that connect to the service should behave. +// +// Deprecated: Users should not use this struct. Service config should be received +// through name resolver, as specified here +// https://github.com/grpc/grpc/blob/master/doc/service_config.md +type ServiceConfig struct { + // LB is the load balancer the service providers recommends. The balancer specified + // via grpc.WithBalancer will override this. + LB *string + + // Methods contains a map for the methods in this service. If there is an + // exact match for a method (i.e. /service/method) in the map, use the + // corresponding MethodConfig. If there's no exact match, look for the + // default config for the service (/service/) and use the corresponding + // MethodConfig if it exists. Otherwise, the method has no MethodConfig to + // use. + Methods map[string]MethodConfig + + // If a retryThrottlingPolicy is provided, gRPC will automatically throttle + // retry attempts and hedged RPCs when the client’s ratio of failures to + // successes exceeds a threshold. + // + // For each server name, the gRPC client will maintain a token_count which is + // initially set to maxTokens, and can take values between 0 and maxTokens. + // + // Every outgoing RPC (regardless of service or method invoked) will change + // token_count as follows: + // + // - Every failed RPC will decrement the token_count by 1. + // - Every successful RPC will increment the token_count by tokenRatio. + // + // If token_count is less than or equal to maxTokens / 2, then RPCs will not + // be retried and hedged RPCs will not be sent. + retryThrottling *retryThrottlingPolicy +} + +// retryPolicy defines the go-native version of the retry policy defined by the +// service config here: +// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config +type retryPolicy struct { + // MaxAttempts is the maximum number of attempts, including the original RPC. + // + // This field is required and must be two or greater. + maxAttempts int + + // Exponential backoff parameters. The initial retry attempt will occur at + // random(0, initialBackoffMS). In general, the nth attempt will occur at + // random(0, + // min(initialBackoffMS*backoffMultiplier**(n-1), maxBackoffMS)). + // + // These fields are required and must be greater than zero. + initialBackoff time.Duration + maxBackoff time.Duration + backoffMultiplier float64 + + // The set of status codes which may be retried. + // + // Status codes are specified as strings, e.g., "UNAVAILABLE". + // + // This field is required and must be non-empty. + // Note: a set is used to store this for easy lookup. + retryableStatusCodes map[codes.Code]bool +} + +type jsonRetryPolicy struct { + MaxAttempts int + InitialBackoff string + MaxBackoff string + BackoffMultiplier float64 + RetryableStatusCodes []codes.Code +} + +// retryThrottlingPolicy defines the go-native version of the retry throttling +// policy defined by the service config here: +// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config +type retryThrottlingPolicy struct { + // The number of tokens starts at maxTokens. The token_count will always be + // between 0 and maxTokens. + // + // This field is required and must be greater than zero. + MaxTokens float64 + // The amount of tokens to add on each successful RPC. Typically this will + // be some number between 0 and 1, e.g., 0.1. + // + // This field is required and must be greater than zero. Up to 3 decimal + // places are supported. + TokenRatio float64 +} + +func parseDuration(s *string) (*time.Duration, error) { + if s == nil { + return nil, nil + } + if !strings.HasSuffix(*s, "s") { + return nil, fmt.Errorf("malformed duration %q", *s) + } + ss := strings.SplitN((*s)[:len(*s)-1], ".", 3) + if len(ss) > 2 { + return nil, fmt.Errorf("malformed duration %q", *s) + } + // hasDigits is set if either the whole or fractional part of the number is + // present, since both are optional but one is required. + hasDigits := false + var d time.Duration + if len(ss[0]) > 0 { + i, err := strconv.ParseInt(ss[0], 10, 32) + if err != nil { + return nil, fmt.Errorf("malformed duration %q: %v", *s, err) + } + d = time.Duration(i) * time.Second + hasDigits = true + } + if len(ss) == 2 && len(ss[1]) > 0 { + if len(ss[1]) > 9 { + return nil, fmt.Errorf("malformed duration %q", *s) + } + f, err := strconv.ParseInt(ss[1], 10, 64) + if err != nil { + return nil, fmt.Errorf("malformed duration %q: %v", *s, err) + } + for i := 9; i > len(ss[1]); i-- { + f *= 10 + } + d += time.Duration(f) + hasDigits = true + } + if !hasDigits { + return nil, fmt.Errorf("malformed duration %q", *s) + } + + return &d, nil +} + +type jsonName struct { + Service *string + Method *string +} + +func (j jsonName) generatePath() (string, bool) { + if j.Service == nil { + return "", false + } + res := "/" + *j.Service + "/" + if j.Method != nil { + res += *j.Method + } + return res, true +} + +// TODO(lyuxuan): delete this struct after cleaning up old service config implementation. +type jsonMC struct { + Name *[]jsonName + WaitForReady *bool + Timeout *string + MaxRequestMessageBytes *int64 + MaxResponseMessageBytes *int64 + RetryPolicy *jsonRetryPolicy +} + +// TODO(lyuxuan): delete this struct after cleaning up old service config implementation. +type jsonSC struct { + LoadBalancingPolicy *string + MethodConfig *[]jsonMC + RetryThrottling *retryThrottlingPolicy +} + +func parseServiceConfig(js string) (ServiceConfig, error) { + var rsc jsonSC + err := json.Unmarshal([]byte(js), &rsc) + if err != nil { + grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) + return ServiceConfig{}, err + } + sc := ServiceConfig{ + LB: rsc.LoadBalancingPolicy, + Methods: make(map[string]MethodConfig), + retryThrottling: rsc.RetryThrottling, + } + if rsc.MethodConfig == nil { + return sc, nil + } + + for _, m := range *rsc.MethodConfig { + if m.Name == nil { + continue + } + d, err := parseDuration(m.Timeout) + if err != nil { + grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) + return ServiceConfig{}, err + } + + mc := MethodConfig{ + WaitForReady: m.WaitForReady, + Timeout: d, + } + if mc.retryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil { + grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) + return ServiceConfig{}, err + } + if m.MaxRequestMessageBytes != nil { + if *m.MaxRequestMessageBytes > int64(maxInt) { + mc.MaxReqSize = newInt(maxInt) + } else { + mc.MaxReqSize = newInt(int(*m.MaxRequestMessageBytes)) + } + } + if m.MaxResponseMessageBytes != nil { + if *m.MaxResponseMessageBytes > int64(maxInt) { + mc.MaxRespSize = newInt(maxInt) + } else { + mc.MaxRespSize = newInt(int(*m.MaxResponseMessageBytes)) + } + } + for _, n := range *m.Name { + if path, valid := n.generatePath(); valid { + sc.Methods[path] = mc + } + } + } + + if sc.retryThrottling != nil { + if sc.retryThrottling.MaxTokens <= 0 || + sc.retryThrottling.MaxTokens >= 1000 || + sc.retryThrottling.TokenRatio <= 0 { + // Illegal throttling config; disable throttling. + sc.retryThrottling = nil + } + } + return sc, nil +} + +func convertRetryPolicy(jrp *jsonRetryPolicy) (p *retryPolicy, err error) { + if jrp == nil { + return nil, nil + } + ib, err := parseDuration(&jrp.InitialBackoff) + if err != nil { + return nil, err + } + mb, err := parseDuration(&jrp.MaxBackoff) + if err != nil { + return nil, err + } + + if jrp.MaxAttempts <= 1 || + *ib <= 0 || + *mb <= 0 || + jrp.BackoffMultiplier <= 0 || + len(jrp.RetryableStatusCodes) == 0 { + grpclog.Warningf("grpc: ignoring retry policy %v due to illegal configuration", jrp) + return nil, nil + } + + rp := &retryPolicy{ + maxAttempts: jrp.MaxAttempts, + initialBackoff: *ib, + maxBackoff: *mb, + backoffMultiplier: jrp.BackoffMultiplier, + retryableStatusCodes: make(map[codes.Code]bool), + } + if rp.maxAttempts > 5 { + // TODO(retry): Make the max maxAttempts configurable. + rp.maxAttempts = 5 + } + for _, code := range jrp.RetryableStatusCodes { + rp.retryableStatusCodes[code] = true + } + return rp, nil +} + +func min(a, b *int) *int { + if *a < *b { + return a + } + return b +} + +func getMaxSize(mcMax, doptMax *int, defaultVal int) *int { + if mcMax == nil && doptMax == nil { + return &defaultVal + } + if mcMax != nil && doptMax != nil { + return min(mcMax, doptMax) + } + if mcMax != nil { + return mcMax + } + return doptMax +} + +func newInt(b int) *int { + return &b +} diff --git a/vendor/google.golang.org/grpc/stats/handlers.go b/vendor/google.golang.org/grpc/stats/handlers.go new file mode 100644 index 000000000..05b384c69 --- /dev/null +++ b/vendor/google.golang.org/grpc/stats/handlers.go @@ -0,0 +1,64 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package stats + +import ( + "net" + + "golang.org/x/net/context" +) + +// ConnTagInfo defines the relevant information needed by connection context tagger. +type ConnTagInfo struct { + // RemoteAddr is the remote address of the corresponding connection. + RemoteAddr net.Addr + // LocalAddr is the local address of the corresponding connection. + LocalAddr net.Addr +} + +// RPCTagInfo defines the relevant information needed by RPC context tagger. +type RPCTagInfo struct { + // FullMethodName is the RPC method in the format of /package.service/method. + FullMethodName string + // FailFast indicates if this RPC is failfast. + // This field is only valid on client side, it's always false on server side. + FailFast bool +} + +// Handler defines the interface for the related stats handling (e.g., RPCs, connections). +type Handler interface { + // TagRPC can attach some information to the given context. + // The context used for the rest lifetime of the RPC will be derived from + // the returned context. + TagRPC(context.Context, *RPCTagInfo) context.Context + // HandleRPC processes the RPC stats. + HandleRPC(context.Context, RPCStats) + + // TagConn can attach some information to the given context. + // The returned context will be used for stats handling. + // For conn stats handling, the context used in HandleConn for this + // connection will be derived from the context returned. + // For RPC stats handling, + // - On server side, the context used in HandleRPC for all RPCs on this + // connection will be derived from the context returned. + // - On client side, the context is not derived from the context returned. + TagConn(context.Context, *ConnTagInfo) context.Context + // HandleConn processes the Conn stats. + HandleConn(context.Context, ConnStats) +} diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go new file mode 100644 index 000000000..3f13190a0 --- /dev/null +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -0,0 +1,296 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +//go:generate protoc --go_out=plugins=grpc:. grpc_testing/test.proto + +// Package stats is for collecting and reporting various network and RPC stats. +// This package is for monitoring purpose only. All fields are read-only. +// All APIs are experimental. +package stats // import "google.golang.org/grpc/stats" + +import ( + "net" + "time" + + "golang.org/x/net/context" +) + +// RPCStats contains stats information about RPCs. +type RPCStats interface { + isRPCStats() + // IsClient returns true if this RPCStats is from client side. + IsClient() bool +} + +// Begin contains stats when an RPC begins. +// FailFast is only valid if this Begin is from client side. +type Begin struct { + // Client is true if this Begin is from client side. + Client bool + // BeginTime is the time when the RPC begins. + BeginTime time.Time + // FailFast indicates if this RPC is failfast. + FailFast bool +} + +// IsClient indicates if the stats information is from client side. +func (s *Begin) IsClient() bool { return s.Client } + +func (s *Begin) isRPCStats() {} + +// InPayload contains the information for an incoming payload. +type InPayload struct { + // Client is true if this InPayload is from client side. + Client bool + // Payload is the payload with original type. + Payload interface{} + // Data is the serialized message payload. + Data []byte + // Length is the length of uncompressed data. + Length int + // WireLength is the length of data on wire (compressed, signed, encrypted). + WireLength int + // RecvTime is the time when the payload is received. + RecvTime time.Time +} + +// IsClient indicates if the stats information is from client side. +func (s *InPayload) IsClient() bool { return s.Client } + +func (s *InPayload) isRPCStats() {} + +// InHeader contains stats when a header is received. +type InHeader struct { + // Client is true if this InHeader is from client side. + Client bool + // WireLength is the wire length of header. + WireLength int + + // The following fields are valid only if Client is false. + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string + // RemoteAddr is the remote address of the corresponding connection. + RemoteAddr net.Addr + // LocalAddr is the local address of the corresponding connection. + LocalAddr net.Addr + // Compression is the compression algorithm used for the RPC. + Compression string +} + +// IsClient indicates if the stats information is from client side. +func (s *InHeader) IsClient() bool { return s.Client } + +func (s *InHeader) isRPCStats() {} + +// InTrailer contains stats when a trailer is received. +type InTrailer struct { + // Client is true if this InTrailer is from client side. + Client bool + // WireLength is the wire length of trailer. + WireLength int +} + +// IsClient indicates if the stats information is from client side. +func (s *InTrailer) IsClient() bool { return s.Client } + +func (s *InTrailer) isRPCStats() {} + +// OutPayload contains the information for an outgoing payload. +type OutPayload struct { + // Client is true if this OutPayload is from client side. + Client bool + // Payload is the payload with original type. + Payload interface{} + // Data is the serialized message payload. + Data []byte + // Length is the length of uncompressed data. + Length int + // WireLength is the length of data on wire (compressed, signed, encrypted). + WireLength int + // SentTime is the time when the payload is sent. + SentTime time.Time +} + +// IsClient indicates if this stats information is from client side. +func (s *OutPayload) IsClient() bool { return s.Client } + +func (s *OutPayload) isRPCStats() {} + +// OutHeader contains stats when a header is sent. +type OutHeader struct { + // Client is true if this OutHeader is from client side. + Client bool + + // The following fields are valid only if Client is true. + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string + // RemoteAddr is the remote address of the corresponding connection. + RemoteAddr net.Addr + // LocalAddr is the local address of the corresponding connection. + LocalAddr net.Addr + // Compression is the compression algorithm used for the RPC. + Compression string +} + +// IsClient indicates if this stats information is from client side. +func (s *OutHeader) IsClient() bool { return s.Client } + +func (s *OutHeader) isRPCStats() {} + +// OutTrailer contains stats when a trailer is sent. +type OutTrailer struct { + // Client is true if this OutTrailer is from client side. + Client bool + // WireLength is the wire length of trailer. + WireLength int +} + +// IsClient indicates if this stats information is from client side. +func (s *OutTrailer) IsClient() bool { return s.Client } + +func (s *OutTrailer) isRPCStats() {} + +// End contains stats when an RPC ends. +type End struct { + // Client is true if this End is from client side. + Client bool + // BeginTime is the time when the RPC began. + BeginTime time.Time + // EndTime is the time when the RPC ends. + EndTime time.Time + // Error is the error the RPC ended with. It is an error generated from + // status.Status and can be converted back to status.Status using + // status.FromError if non-nil. + Error error +} + +// IsClient indicates if this is from client side. +func (s *End) IsClient() bool { return s.Client } + +func (s *End) isRPCStats() {} + +// ConnStats contains stats information about connections. +type ConnStats interface { + isConnStats() + // IsClient returns true if this ConnStats is from client side. + IsClient() bool +} + +// ConnBegin contains the stats of a connection when it is established. +type ConnBegin struct { + // Client is true if this ConnBegin is from client side. + Client bool +} + +// IsClient indicates if this is from client side. +func (s *ConnBegin) IsClient() bool { return s.Client } + +func (s *ConnBegin) isConnStats() {} + +// ConnEnd contains the stats of a connection when it ends. +type ConnEnd struct { + // Client is true if this ConnEnd is from client side. + Client bool +} + +// IsClient indicates if this is from client side. +func (s *ConnEnd) IsClient() bool { return s.Client } + +func (s *ConnEnd) isConnStats() {} + +type incomingTagsKey struct{} +type outgoingTagsKey struct{} + +// SetTags attaches stats tagging data to the context, which will be sent in +// the outgoing RPC with the header grpc-tags-bin. Subsequent calls to +// SetTags will overwrite the values from earlier calls. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func SetTags(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, outgoingTagsKey{}, b) +} + +// Tags returns the tags from the context for the inbound RPC. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func Tags(ctx context.Context) []byte { + b, _ := ctx.Value(incomingTagsKey{}).([]byte) + return b +} + +// SetIncomingTags attaches stats tagging data to the context, to be read by +// the application (not sent in outgoing RPCs). +// +// This is intended for gRPC-internal use ONLY. +func SetIncomingTags(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, incomingTagsKey{}, b) +} + +// OutgoingTags returns the tags from the context for the outbound RPC. +// +// This is intended for gRPC-internal use ONLY. +func OutgoingTags(ctx context.Context) []byte { + b, _ := ctx.Value(outgoingTagsKey{}).([]byte) + return b +} + +type incomingTraceKey struct{} +type outgoingTraceKey struct{} + +// SetTrace attaches stats tagging data to the context, which will be sent in +// the outgoing RPC with the header grpc-trace-bin. Subsequent calls to +// SetTrace will overwrite the values from earlier calls. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func SetTrace(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, outgoingTraceKey{}, b) +} + +// Trace returns the trace from the context for the inbound RPC. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func Trace(ctx context.Context) []byte { + b, _ := ctx.Value(incomingTraceKey{}).([]byte) + return b +} + +// SetIncomingTrace attaches stats tagging data to the context, to be read by +// the application (not sent in outgoing RPCs). It is intended for +// gRPC-internal use. +func SetIncomingTrace(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, incomingTraceKey{}, b) +} + +// OutgoingTrace returns the trace from the context for the outbound RPC. It is +// intended for gRPC-internal use. +func OutgoingTrace(ctx context.Context) []byte { + b, _ := ctx.Value(outgoingTraceKey{}).([]byte) + return b +} diff --git a/vendor/google.golang.org/grpc/status/go16.go b/vendor/google.golang.org/grpc/status/go16.go new file mode 100644 index 000000000..e59b53e82 --- /dev/null +++ b/vendor/google.golang.org/grpc/status/go16.go @@ -0,0 +1,42 @@ +// +build go1.6,!go1.7 + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package status + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/codes" +) + +// FromContextError converts a context error into a Status. It returns a +// Status with codes.OK if err is nil, or a Status with codes.Unknown if err is +// non-nil and not a context error. +func FromContextError(err error) *Status { + switch err { + case nil: + return New(codes.OK, "") + case context.DeadlineExceeded: + return New(codes.DeadlineExceeded, err.Error()) + case context.Canceled: + return New(codes.Canceled, err.Error()) + default: + return New(codes.Unknown, err.Error()) + } +} diff --git a/vendor/google.golang.org/grpc/status/go17.go b/vendor/google.golang.org/grpc/status/go17.go new file mode 100644 index 000000000..090215149 --- /dev/null +++ b/vendor/google.golang.org/grpc/status/go17.go @@ -0,0 +1,44 @@ +// +build go1.7 + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package status + +import ( + "context" + + netctx "golang.org/x/net/context" + "google.golang.org/grpc/codes" +) + +// FromContextError converts a context error into a Status. It returns a +// Status with codes.OK if err is nil, or a Status with codes.Unknown if err is +// non-nil and not a context error. +func FromContextError(err error) *Status { + switch err { + case nil: + return New(codes.OK, "") + case context.DeadlineExceeded, netctx.DeadlineExceeded: + return New(codes.DeadlineExceeded, err.Error()) + case context.Canceled, netctx.Canceled: + return New(codes.Canceled, err.Error()) + default: + return New(codes.Unknown, err.Error()) + } +} diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go new file mode 100644 index 000000000..9c61b0945 --- /dev/null +++ b/vendor/google.golang.org/grpc/status/status.go @@ -0,0 +1,189 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package status implements errors returned by gRPC. These errors are +// serialized and transmitted on the wire between server and client, and allow +// for additional data to be transmitted via the Details field in the status +// proto. gRPC service handlers should return an error created by this +// package, and gRPC clients should expect a corresponding error to be +// returned from the RPC call. +// +// This package upholds the invariants that a non-nil error may not +// contain an OK code, and an OK code must result in a nil error. +package status + +import ( + "errors" + "fmt" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + spb "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc/codes" +) + +// statusError is an alias of a status proto. It implements error and Status, +// and a nil statusError should never be returned by this package. +type statusError spb.Status + +func (se *statusError) Error() string { + p := (*spb.Status)(se) + return fmt.Sprintf("rpc error: code = %s desc = %s", codes.Code(p.GetCode()), p.GetMessage()) +} + +func (se *statusError) GRPCStatus() *Status { + return &Status{s: (*spb.Status)(se)} +} + +// Status represents an RPC status code, message, and details. It is immutable +// and should be created with New, Newf, or FromProto. +type Status struct { + s *spb.Status +} + +// Code returns the status code contained in s. +func (s *Status) Code() codes.Code { + if s == nil || s.s == nil { + return codes.OK + } + return codes.Code(s.s.Code) +} + +// Message returns the message contained in s. +func (s *Status) Message() string { + if s == nil || s.s == nil { + return "" + } + return s.s.Message +} + +// Proto returns s's status as an spb.Status proto message. +func (s *Status) Proto() *spb.Status { + if s == nil { + return nil + } + return proto.Clone(s.s).(*spb.Status) +} + +// Err returns an immutable error representing s; returns nil if s.Code() is +// OK. +func (s *Status) Err() error { + if s.Code() == codes.OK { + return nil + } + return (*statusError)(s.s) +} + +// New returns a Status representing c and msg. +func New(c codes.Code, msg string) *Status { + return &Status{s: &spb.Status{Code: int32(c), Message: msg}} +} + +// Newf returns New(c, fmt.Sprintf(format, a...)). +func Newf(c codes.Code, format string, a ...interface{}) *Status { + return New(c, fmt.Sprintf(format, a...)) +} + +// Error returns an error representing c and msg. If c is OK, returns nil. +func Error(c codes.Code, msg string) error { + return New(c, msg).Err() +} + +// Errorf returns Error(c, fmt.Sprintf(format, a...)). +func Errorf(c codes.Code, format string, a ...interface{}) error { + return Error(c, fmt.Sprintf(format, a...)) +} + +// ErrorProto returns an error representing s. If s.Code is OK, returns nil. +func ErrorProto(s *spb.Status) error { + return FromProto(s).Err() +} + +// FromProto returns a Status representing s. +func FromProto(s *spb.Status) *Status { + return &Status{s: proto.Clone(s).(*spb.Status)} +} + +// FromError returns a Status representing err if it was produced from this +// package or has a method `GRPCStatus() *Status`. Otherwise, ok is false and a +// Status is returned with codes.Unknown and the original error message. +func FromError(err error) (s *Status, ok bool) { + if err == nil { + return &Status{s: &spb.Status{Code: int32(codes.OK)}}, true + } + if se, ok := err.(interface{ GRPCStatus() *Status }); ok { + return se.GRPCStatus(), true + } + return New(codes.Unknown, err.Error()), false +} + +// Convert is a convenience function which removes the need to handle the +// boolean return value from FromError. +func Convert(err error) *Status { + s, _ := FromError(err) + return s +} + +// WithDetails returns a new status with the provided details messages appended to the status. +// If any errors are encountered, it returns nil and the first error encountered. +func (s *Status) WithDetails(details ...proto.Message) (*Status, error) { + if s.Code() == codes.OK { + return nil, errors.New("no error details for status with code OK") + } + // s.Code() != OK implies that s.Proto() != nil. + p := s.Proto() + for _, detail := range details { + any, err := ptypes.MarshalAny(detail) + if err != nil { + return nil, err + } + p.Details = append(p.Details, any) + } + return &Status{s: p}, nil +} + +// Details returns a slice of details messages attached to the status. +// If a detail cannot be decoded, the error is returned in place of the detail. +func (s *Status) Details() []interface{} { + if s == nil || s.s == nil { + return nil + } + details := make([]interface{}, 0, len(s.s.Details)) + for _, any := range s.s.Details { + detail := &ptypes.DynamicAny{} + if err := ptypes.UnmarshalAny(any, detail); err != nil { + details = append(details, err) + continue + } + details = append(details, detail.Message) + } + return details +} + +// Code returns the Code of the error if it is a Status error, codes.OK if err +// is nil, or codes.Unknown otherwise. +func Code(err error) codes.Code { + // Don't use FromError to avoid allocation of OK status. + if err == nil { + return codes.OK + } + if se, ok := err.(interface{ GRPCStatus() *Status }); ok { + return se.GRPCStatus().Code() + } + return codes.Unknown +} diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go new file mode 100644 index 000000000..65d45a1d9 --- /dev/null +++ b/vendor/google.golang.org/grpc/stream.go @@ -0,0 +1,1023 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "errors" + "io" + "math" + "strconv" + "sync" + "time" + + "golang.org/x/net/context" + "golang.org/x/net/trace" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/encoding" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +// StreamHandler defines the handler called by gRPC server to complete the +// execution of a streaming RPC. If a StreamHandler returns an error, it +// should be produced by the status package, or else gRPC will use +// codes.Unknown as the status code and err.Error() as the status message +// of the RPC. +type StreamHandler func(srv interface{}, stream ServerStream) error + +// StreamDesc represents a streaming RPC service's method specification. +type StreamDesc struct { + StreamName string + Handler StreamHandler + + // At least one of these is true. + ServerStreams bool + ClientStreams bool +} + +// Stream defines the common interface a client or server stream has to satisfy. +// +// Deprecated: See ClientStream and ServerStream documentation instead. +type Stream interface { + // Deprecated: See ClientStream and ServerStream documentation instead. + Context() context.Context + // Deprecated: See ClientStream and ServerStream documentation instead. + SendMsg(m interface{}) error + // Deprecated: See ClientStream and ServerStream documentation instead. + RecvMsg(m interface{}) error +} + +// ClientStream defines the client-side behavior of a streaming RPC. +// +// All errors returned from ClientStream methods are compatible with the +// status package. +type ClientStream interface { + // Header returns the header metadata received from the server if there + // is any. It blocks if the metadata is not ready to read. + Header() (metadata.MD, error) + // Trailer returns the trailer metadata from the server, if there is any. + // It must only be called after stream.CloseAndRecv has returned, or + // stream.Recv has returned a non-nil error (including io.EOF). + Trailer() metadata.MD + // CloseSend closes the send direction of the stream. It closes the stream + // when non-nil error is met. + CloseSend() error + // Context returns the context for this stream. + // + // It should not be called until after Header or RecvMsg has returned. Once + // called, subsequent client-side retries are disabled. + Context() context.Context + // SendMsg is generally called by generated code. On error, SendMsg aborts + // the stream. If the error was generated by the client, the status is + // returned directly; otherwise, io.EOF is returned and the status of + // the stream may be discovered using RecvMsg. + // + // SendMsg blocks until: + // - There is sufficient flow control to schedule m with the transport, or + // - The stream is done, or + // - The stream breaks. + // + // SendMsg does not wait until the message is received by the server. An + // untimely stream closure may result in lost messages. To ensure delivery, + // users should ensure the RPC completed successfully using RecvMsg. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not safe + // to call SendMsg on the same stream in different goroutines. + SendMsg(m interface{}) error + // RecvMsg blocks until it receives a message into m or the stream is + // done. It returns io.EOF when the stream completes successfully. On + // any other error, the stream is aborted and the error contains the RPC + // status. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not + // safe to call RecvMsg on the same stream in different goroutines. + RecvMsg(m interface{}) error +} + +// NewStream creates a new Stream for the client side. This is typically +// called by generated code. ctx is used for the lifetime of the stream. +// +// To ensure resources are not leaked due to the stream returned, one of the following +// actions must be performed: +// +// 1. Call Close on the ClientConn. +// 2. Cancel the context provided. +// 3. Call RecvMsg until a non-nil error is returned. A protobuf-generated +// client-streaming RPC, for instance, might use the helper function +// CloseAndRecv (note that CloseSend does not Recv, therefore is not +// guaranteed to release all resources). +// 4. Receive a non-nil, non-io.EOF error from Header or SendMsg. +// +// If none of the above happen, a goroutine and a context will be leaked, and grpc +// will not call the optionally-configured stats handler with a stats.End message. +func (cc *ClientConn) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { + // allow interceptor to see all applicable call options, which means those + // configured as defaults from dial option as well as per-call options + opts = combine(cc.dopts.callOptions, opts) + + if cc.dopts.streamInt != nil { + return cc.dopts.streamInt(ctx, desc, cc, method, newClientStream, opts...) + } + return newClientStream(ctx, desc, cc, method, opts...) +} + +// NewClientStream is a wrapper for ClientConn.NewStream. +func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) { + return cc.NewStream(ctx, desc, method, opts...) +} + +func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { + if channelz.IsOn() { + cc.incrCallsStarted() + defer func() { + if err != nil { + cc.incrCallsFailed() + } + }() + } + c := defaultCallInfo() + mc := cc.GetMethodConfig(method) + if mc.WaitForReady != nil { + c.failFast = !*mc.WaitForReady + } + + // Possible context leak: + // The cancel function for the child context we create will only be called + // when RecvMsg returns a non-nil error, if the ClientConn is closed, or if + // an error is generated by SendMsg. + // https://github.com/grpc/grpc-go/issues/1818. + var cancel context.CancelFunc + if mc.Timeout != nil && *mc.Timeout >= 0 { + ctx, cancel = context.WithTimeout(ctx, *mc.Timeout) + } else { + ctx, cancel = context.WithCancel(ctx) + } + defer func() { + if err != nil { + cancel() + } + }() + + for _, o := range opts { + if err := o.before(c); err != nil { + return nil, toRPCErr(err) + } + } + c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize) + c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize) + if err := setCallInfoCodec(c); err != nil { + return nil, err + } + + callHdr := &transport.CallHdr{ + Host: cc.authority, + Method: method, + ContentSubtype: c.contentSubtype, + } + + // Set our outgoing compression according to the UseCompressor CallOption, if + // set. In that case, also find the compressor from the encoding package. + // Otherwise, use the compressor configured by the WithCompressor DialOption, + // if set. + var cp Compressor + var comp encoding.Compressor + if ct := c.compressorType; ct != "" { + callHdr.SendCompress = ct + if ct != encoding.Identity { + comp = encoding.GetCompressor(ct) + if comp == nil { + return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct) + } + } + } else if cc.dopts.cp != nil { + callHdr.SendCompress = cc.dopts.cp.Type() + cp = cc.dopts.cp + } + if c.creds != nil { + callHdr.Creds = c.creds + } + var trInfo traceInfo + if EnableTracing { + trInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method) + trInfo.firstLine.client = true + if deadline, ok := ctx.Deadline(); ok { + trInfo.firstLine.deadline = deadline.Sub(time.Now()) + } + trInfo.tr.LazyLog(&trInfo.firstLine, false) + ctx = trace.NewContext(ctx, trInfo.tr) + } + ctx = newContextWithRPCInfo(ctx, c.failFast) + sh := cc.dopts.copts.StatsHandler + var beginTime time.Time + if sh != nil { + ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast}) + beginTime = time.Now() + begin := &stats.Begin{ + Client: true, + BeginTime: beginTime, + FailFast: c.failFast, + } + sh.HandleRPC(ctx, begin) + } + + cs := &clientStream{ + callHdr: callHdr, + ctx: ctx, + methodConfig: &mc, + opts: opts, + callInfo: c, + cc: cc, + desc: desc, + codec: c.codec, + cp: cp, + comp: comp, + cancel: cancel, + beginTime: beginTime, + firstAttempt: true, + } + if !cc.dopts.disableRetry { + cs.retryThrottler = cc.retryThrottler.Load().(*retryThrottler) + } + + cs.callInfo.stream = cs + // Only this initial attempt has stats/tracing. + // TODO(dfawley): move to newAttempt when per-attempt stats are implemented. + if err := cs.newAttemptLocked(sh, trInfo); err != nil { + cs.finish(err) + return nil, err + } + + op := func(a *csAttempt) error { return a.newStream() } + if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }); err != nil { + cs.finish(err) + return nil, err + } + + if desc != unaryStreamDesc { + // Listen on cc and stream contexts to cleanup when the user closes the + // ClientConn or cancels the stream context. In all other cases, an error + // should already be injected into the recv buffer by the transport, which + // the client will eventually receive, and then we will cancel the stream's + // context in clientStream.finish. + go func() { + select { + case <-cc.ctx.Done(): + cs.finish(ErrClientConnClosing) + case <-ctx.Done(): + cs.finish(toRPCErr(ctx.Err())) + } + }() + } + return cs, nil +} + +func (cs *clientStream) newAttemptLocked(sh stats.Handler, trInfo traceInfo) error { + cs.attempt = &csAttempt{ + cs: cs, + dc: cs.cc.dopts.dc, + statsHandler: sh, + trInfo: trInfo, + } + + if err := cs.ctx.Err(); err != nil { + return toRPCErr(err) + } + t, done, err := cs.cc.getTransport(cs.ctx, cs.callInfo.failFast, cs.callHdr.Method) + if err != nil { + return err + } + cs.attempt.t = t + cs.attempt.done = done + return nil +} + +func (a *csAttempt) newStream() error { + cs := a.cs + cs.callHdr.PreviousAttempts = cs.numRetries + s, err := a.t.NewStream(cs.ctx, cs.callHdr) + if err != nil { + return toRPCErr(err) + } + cs.attempt.s = s + cs.attempt.p = &parser{r: s} + return nil +} + +// clientStream implements a client side Stream. +type clientStream struct { + callHdr *transport.CallHdr + opts []CallOption + callInfo *callInfo + cc *ClientConn + desc *StreamDesc + + codec baseCodec + cp Compressor + comp encoding.Compressor + + cancel context.CancelFunc // cancels all attempts + + sentLast bool // sent an end stream + beginTime time.Time + + methodConfig *MethodConfig + + ctx context.Context // the application's context, wrapped by stats/tracing + + retryThrottler *retryThrottler // The throttler active when the RPC began. + + mu sync.Mutex + firstAttempt bool // if true, transparent retry is valid + numRetries int // exclusive of transparent retry attempt(s) + numRetriesSincePushback int // retries since pushback; to reset backoff + finished bool // TODO: replace with atomic cmpxchg or sync.Once? + attempt *csAttempt // the active client stream attempt + // TODO(hedging): hedging will have multiple attempts simultaneously. + committed bool // active attempt committed for retry? + buffer []func(a *csAttempt) error // operations to replay on retry + bufferSize int // current size of buffer +} + +// csAttempt implements a single transport stream attempt within a +// clientStream. +type csAttempt struct { + cs *clientStream + t transport.ClientTransport + s *transport.Stream + p *parser + done func(balancer.DoneInfo) + + finished bool + dc Decompressor + decomp encoding.Compressor + decompSet bool + + mu sync.Mutex // guards trInfo.tr + // trInfo.tr is set when created (if EnableTracing is true), + // and cleared when the finish method is called. + trInfo traceInfo + + statsHandler stats.Handler +} + +func (cs *clientStream) commitAttemptLocked() { + cs.committed = true + cs.buffer = nil +} + +func (cs *clientStream) commitAttempt() { + cs.mu.Lock() + cs.commitAttemptLocked() + cs.mu.Unlock() +} + +// shouldRetry returns nil if the RPC should be retried; otherwise it returns +// the error that should be returned by the operation. +func (cs *clientStream) shouldRetry(err error) error { + if cs.attempt.s == nil && !cs.callInfo.failFast { + // In the event of any error from NewStream (attempt.s == nil), we + // never attempted to write anything to the wire, so we can retry + // indefinitely for non-fail-fast RPCs. + return nil + } + if cs.finished || cs.committed { + // RPC is finished or committed; cannot retry. + return err + } + // Wait for the trailers. + if cs.attempt.s != nil { + <-cs.attempt.s.Done() + } + if cs.firstAttempt && !cs.callInfo.failFast && (cs.attempt.s == nil || cs.attempt.s.Unprocessed()) { + // First attempt, wait-for-ready, stream unprocessed: transparently retry. + cs.firstAttempt = false + return nil + } + cs.firstAttempt = false + if cs.cc.dopts.disableRetry { + return err + } + + pushback := 0 + hasPushback := false + if cs.attempt.s != nil { + if to, toErr := cs.attempt.s.TrailersOnly(); toErr != nil { + // Context error; stop now. + return toErr + } else if !to { + return err + } + + // TODO(retry): Move down if the spec changes to not check server pushback + // before considering this a failure for throttling. + sps := cs.attempt.s.Trailer()["grpc-retry-pushback-ms"] + if len(sps) == 1 { + var e error + if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 { + grpclog.Infof("Server retry pushback specified to abort (%q).", sps[0]) + cs.retryThrottler.throttle() // This counts as a failure for throttling. + return err + } + hasPushback = true + } else if len(sps) > 1 { + grpclog.Warningf("Server retry pushback specified multiple values (%q); not retrying.", sps) + cs.retryThrottler.throttle() // This counts as a failure for throttling. + return err + } + } + + var code codes.Code + if cs.attempt.s != nil { + code = cs.attempt.s.Status().Code() + } else { + code = status.Convert(err).Code() + } + + rp := cs.methodConfig.retryPolicy + if rp == nil || !rp.retryableStatusCodes[code] { + return err + } + + // Note: the ordering here is important; we count this as a failure + // only if the code matched a retryable code. + if cs.retryThrottler.throttle() { + return err + } + if cs.numRetries+1 >= rp.maxAttempts { + return err + } + + var dur time.Duration + if hasPushback { + dur = time.Millisecond * time.Duration(pushback) + cs.numRetriesSincePushback = 0 + } else { + fact := math.Pow(rp.backoffMultiplier, float64(cs.numRetriesSincePushback)) + cur := float64(rp.initialBackoff) * fact + if max := float64(rp.maxBackoff); cur > max { + cur = max + } + dur = time.Duration(grpcrand.Int63n(int64(cur))) + cs.numRetriesSincePushback++ + } + + // TODO(dfawley): we could eagerly fail here if dur puts us past the + // deadline, but unsure if it is worth doing. + t := time.NewTimer(dur) + select { + case <-t.C: + cs.numRetries++ + return nil + case <-cs.ctx.Done(): + t.Stop() + return status.FromContextError(cs.ctx.Err()).Err() + } +} + +// Returns nil if a retry was performed and succeeded; error otherwise. +func (cs *clientStream) retryLocked(lastErr error) error { + for { + cs.attempt.finish(lastErr) + if err := cs.shouldRetry(lastErr); err != nil { + cs.commitAttemptLocked() + return err + } + if err := cs.newAttemptLocked(nil, traceInfo{}); err != nil { + return err + } + if lastErr = cs.replayBufferLocked(); lastErr == nil { + return nil + } + } +} + +func (cs *clientStream) Context() context.Context { + cs.commitAttempt() + // No need to lock before using attempt, since we know it is committed and + // cannot change. + return cs.attempt.s.Context() +} + +func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) error { + cs.mu.Lock() + for { + if cs.committed { + cs.mu.Unlock() + return op(cs.attempt) + } + a := cs.attempt + cs.mu.Unlock() + err := op(a) + cs.mu.Lock() + if a != cs.attempt { + // We started another attempt already. + continue + } + if err == io.EOF { + <-a.s.Done() + } + if err == nil || (err == io.EOF && a.s.Status().Code() == codes.OK) { + onSuccess() + cs.mu.Unlock() + return err + } + if err := cs.retryLocked(err); err != nil { + cs.mu.Unlock() + return err + } + } +} + +func (cs *clientStream) Header() (metadata.MD, error) { + var m metadata.MD + err := cs.withRetry(func(a *csAttempt) error { + var err error + m, err = a.s.Header() + return toRPCErr(err) + }, cs.commitAttemptLocked) + if err != nil { + cs.finish(err) + } + return m, err +} + +func (cs *clientStream) Trailer() metadata.MD { + // On RPC failure, we never need to retry, because usage requires that + // RecvMsg() returned a non-nil error before calling this function is valid. + // We would have retried earlier if necessary. + // + // Commit the attempt anyway, just in case users are not following those + // directions -- it will prevent races and should not meaningfully impact + // performance. + cs.commitAttempt() + if cs.attempt.s == nil { + return nil + } + return cs.attempt.s.Trailer() +} + +func (cs *clientStream) replayBufferLocked() error { + a := cs.attempt + for _, f := range cs.buffer { + if err := f(a); err != nil { + return err + } + } + return nil +} + +func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error) { + // Note: we still will buffer if retry is disabled (for transparent retries). + if cs.committed { + return + } + cs.bufferSize += sz + if cs.bufferSize > cs.callInfo.maxRetryRPCBufferSize { + cs.commitAttemptLocked() + return + } + cs.buffer = append(cs.buffer, op) +} + +func (cs *clientStream) SendMsg(m interface{}) (err error) { + defer func() { + if err != nil && err != io.EOF { + // Call finish on the client stream for errors generated by this SendMsg + // call, as these indicate problems created by this client. (Transport + // errors are converted to an io.EOF error in csAttempt.sendMsg; the real + // error will be returned from RecvMsg eventually in that case, or be + // retried.) + cs.finish(err) + } + }() + if cs.sentLast { + return status.Errorf(codes.Internal, "SendMsg called after CloseSend") + } + if !cs.desc.ClientStreams { + cs.sentLast = true + } + data, err := encode(cs.codec, m) + if err != nil { + return err + } + compData, err := compress(data, cs.cp, cs.comp) + if err != nil { + return err + } + hdr, payload := msgHeader(data, compData) + // TODO(dfawley): should we be checking len(data) instead? + if len(payload) > *cs.callInfo.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize) + } + op := func(a *csAttempt) error { + err := a.sendMsg(m, hdr, payload, data) + // nil out the message and uncomp when replaying; they are only needed for + // stats which is disabled for subsequent attempts. + m, data = nil, nil + return err + } + return cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) }) +} + +func (cs *clientStream) RecvMsg(m interface{}) error { + err := cs.withRetry(func(a *csAttempt) error { + return a.recvMsg(m) + }, cs.commitAttemptLocked) + if err != nil || !cs.desc.ServerStreams { + // err != nil or non-server-streaming indicates end of stream. + cs.finish(err) + } + return err +} + +func (cs *clientStream) CloseSend() error { + if cs.sentLast { + // TODO: return an error and finish the stream instead, due to API misuse? + return nil + } + cs.sentLast = true + op := func(a *csAttempt) error { return a.t.Write(a.s, nil, nil, &transport.Options{Last: true}) } + cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }) + // We never returned an error here for reasons. + return nil +} + +func (cs *clientStream) finish(err error) { + if err == io.EOF { + // Ending a stream with EOF indicates a success. + err = nil + } + cs.mu.Lock() + if cs.finished { + cs.mu.Unlock() + return + } + cs.finished = true + cs.commitAttemptLocked() + cs.mu.Unlock() + if err == nil { + cs.retryThrottler.successfulRPC() + } + if channelz.IsOn() { + if err != nil { + cs.cc.incrCallsFailed() + } else { + cs.cc.incrCallsSucceeded() + } + } + if cs.attempt != nil { + cs.attempt.finish(err) + } + // after functions all rely upon having a stream. + if cs.attempt.s != nil { + for _, o := range cs.opts { + o.after(cs.callInfo) + } + } + cs.cancel() +} + +func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error { + cs := a.cs + if EnableTracing { + a.mu.Lock() + if a.trInfo.tr != nil { + a.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) + } + a.mu.Unlock() + } + if err := a.t.Write(a.s, hdr, payld, &transport.Options{Last: !cs.desc.ClientStreams}); err != nil { + if !cs.desc.ClientStreams { + // For non-client-streaming RPCs, we return nil instead of EOF on error + // because the generated code requires it. finish is not called; RecvMsg() + // will call it with the stream's status independently. + return nil + } + return io.EOF + } + if a.statsHandler != nil { + a.statsHandler.HandleRPC(cs.ctx, outPayload(true, m, data, payld, time.Now())) + } + if channelz.IsOn() { + a.t.IncrMsgSent() + } + return nil +} + +func (a *csAttempt) recvMsg(m interface{}) (err error) { + cs := a.cs + var inPayload *stats.InPayload + if a.statsHandler != nil { + inPayload = &stats.InPayload{ + Client: true, + } + } + if !a.decompSet { + // Block until we receive headers containing received message encoding. + if ct := a.s.RecvCompress(); ct != "" && ct != encoding.Identity { + if a.dc == nil || a.dc.Type() != ct { + // No configured decompressor, or it does not match the incoming + // message encoding; attempt to find a registered compressor that does. + a.dc = nil + a.decomp = encoding.GetCompressor(ct) + } + } else { + // No compression is used; disable our decompressor. + a.dc = nil + } + // Only initialize this state once per stream. + a.decompSet = true + } + err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, inPayload, a.decomp) + if err != nil { + if err == io.EOF { + if statusErr := a.s.Status().Err(); statusErr != nil { + return statusErr + } + return io.EOF // indicates successful end of stream. + } + return toRPCErr(err) + } + if EnableTracing { + a.mu.Lock() + if a.trInfo.tr != nil { + a.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) + } + a.mu.Unlock() + } + if inPayload != nil { + a.statsHandler.HandleRPC(cs.ctx, inPayload) + } + if channelz.IsOn() { + a.t.IncrMsgRecv() + } + if cs.desc.ServerStreams { + // Subsequent messages should be received by subsequent RecvMsg calls. + return nil + } + + // Special handling for non-server-stream rpcs. + // This recv expects EOF or errors, so we don't collect inPayload. + err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp) + if err == nil { + return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) + } + if err == io.EOF { + return a.s.Status().Err() // non-server streaming Recv returns nil on success + } + return toRPCErr(err) +} + +func (a *csAttempt) finish(err error) { + a.mu.Lock() + if a.finished { + a.mu.Unlock() + return + } + a.finished = true + if err == io.EOF { + // Ending a stream with EOF indicates a success. + err = nil + } + if a.s != nil { + a.t.CloseStream(a.s, err) + } + + if a.done != nil { + br := false + if a.s != nil { + br = a.s.BytesReceived() + } + a.done(balancer.DoneInfo{ + Err: err, + BytesSent: a.s != nil, + BytesReceived: br, + }) + } + if a.statsHandler != nil { + end := &stats.End{ + Client: true, + BeginTime: a.cs.beginTime, + EndTime: time.Now(), + Error: err, + } + a.statsHandler.HandleRPC(a.cs.ctx, end) + } + if a.trInfo.tr != nil { + if err == nil { + a.trInfo.tr.LazyPrintf("RPC: [OK]") + } else { + a.trInfo.tr.LazyPrintf("RPC: [%v]", err) + a.trInfo.tr.SetError() + } + a.trInfo.tr.Finish() + a.trInfo.tr = nil + } + a.mu.Unlock() +} + +// ServerStream defines the server-side behavior of a streaming RPC. +// +// All errors returned from ServerStream methods are compatible with the +// status package. +type ServerStream interface { + // SetHeader sets the header metadata. It may be called multiple times. + // When call multiple times, all the provided metadata will be merged. + // All the metadata will be sent out when one of the following happens: + // - ServerStream.SendHeader() is called; + // - The first response is sent out; + // - An RPC status is sent out (error or success). + SetHeader(metadata.MD) error + // SendHeader sends the header metadata. + // The provided md and headers set by SetHeader() will be sent. + // It fails if called multiple times. + SendHeader(metadata.MD) error + // SetTrailer sets the trailer metadata which will be sent with the RPC status. + // When called more than once, all the provided metadata will be merged. + SetTrailer(metadata.MD) + // Context returns the context for this stream. + Context() context.Context + // SendMsg sends a message. On error, SendMsg aborts the stream and the + // error is returned directly. + // + // SendMsg blocks until: + // - There is sufficient flow control to schedule m with the transport, or + // - The stream is done, or + // - The stream breaks. + // + // SendMsg does not wait until the message is received by the client. An + // untimely stream closure may result in lost messages. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not safe + // to call SendMsg on the same stream in different goroutines. + SendMsg(m interface{}) error + // RecvMsg blocks until it receives a message into m or the stream is + // done. It returns io.EOF when the client has performed a CloseSend. On + // any non-EOF error, the stream is aborted and the error contains the + // RPC status. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not + // safe to call RecvMsg on the same stream in different goroutines. + RecvMsg(m interface{}) error +} + +// serverStream implements a server side Stream. +type serverStream struct { + ctx context.Context + t transport.ServerTransport + s *transport.Stream + p *parser + codec baseCodec + + cp Compressor + dc Decompressor + comp encoding.Compressor + decomp encoding.Compressor + + maxReceiveMessageSize int + maxSendMessageSize int + trInfo *traceInfo + + statsHandler stats.Handler + + mu sync.Mutex // protects trInfo.tr after the service handler runs. +} + +func (ss *serverStream) Context() context.Context { + return ss.ctx +} + +func (ss *serverStream) SetHeader(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + return ss.s.SetHeader(md) +} + +func (ss *serverStream) SendHeader(md metadata.MD) error { + return ss.t.WriteHeader(ss.s, md) +} + +func (ss *serverStream) SetTrailer(md metadata.MD) { + if md.Len() == 0 { + return + } + ss.s.SetTrailer(md) +} + +func (ss *serverStream) SendMsg(m interface{}) (err error) { + defer func() { + if ss.trInfo != nil { + ss.mu.Lock() + if ss.trInfo.tr != nil { + if err == nil { + ss.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) + } else { + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.SetError() + } + } + ss.mu.Unlock() + } + if err != nil && err != io.EOF { + st, _ := status.FromError(toRPCErr(err)) + ss.t.WriteStatus(ss.s, st) + } + if channelz.IsOn() && err == nil { + ss.t.IncrMsgSent() + } + }() + data, err := encode(ss.codec, m) + if err != nil { + return err + } + compData, err := compress(data, ss.cp, ss.comp) + if err != nil { + return err + } + hdr, payload := msgHeader(data, compData) + // TODO(dfawley): should we be checking len(data) instead? + if len(payload) > ss.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), ss.maxSendMessageSize) + } + if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil { + return toRPCErr(err) + } + if ss.statsHandler != nil { + ss.statsHandler.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now())) + } + return nil +} + +func (ss *serverStream) RecvMsg(m interface{}) (err error) { + defer func() { + if ss.trInfo != nil { + ss.mu.Lock() + if ss.trInfo.tr != nil { + if err == nil { + ss.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) + } else if err != io.EOF { + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.SetError() + } + } + ss.mu.Unlock() + } + if err != nil && err != io.EOF { + st, _ := status.FromError(toRPCErr(err)) + ss.t.WriteStatus(ss.s, st) + } + if channelz.IsOn() && err == nil { + ss.t.IncrMsgRecv() + } + }() + var inPayload *stats.InPayload + if ss.statsHandler != nil { + inPayload = &stats.InPayload{} + } + if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, inPayload, ss.decomp); err != nil { + if err == io.EOF { + return err + } + if err == io.ErrUnexpectedEOF { + err = status.Errorf(codes.Internal, io.ErrUnexpectedEOF.Error()) + } + return toRPCErr(err) + } + if inPayload != nil { + ss.statsHandler.HandleRPC(ss.s.Context(), inPayload) + } + return nil +} + +// MethodFromServerStream returns the method string for the input stream. +// The returned string is in the format of "/service/method". +func MethodFromServerStream(stream ServerStream) (string, bool) { + return Method(stream.Context()) +} diff --git a/vendor/google.golang.org/grpc/tap/tap.go b/vendor/google.golang.org/grpc/tap/tap.go new file mode 100644 index 000000000..22b8fb50d --- /dev/null +++ b/vendor/google.golang.org/grpc/tap/tap.go @@ -0,0 +1,51 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package tap defines the function handles which are executed on the transport +// layer of gRPC-Go and related information. Everything here is EXPERIMENTAL. +package tap + +import ( + "golang.org/x/net/context" +) + +// Info defines the relevant information needed by the handles. +type Info struct { + // FullMethodName is the string of grpc method (in the format of + // /package.service/method). + FullMethodName string + // TODO: More to be added. +} + +// ServerInHandle defines the function which runs before a new stream is created +// on the server side. If it returns a non-nil error, the stream will not be +// created and a RST_STREAM will be sent back to the client with REFUSED_STREAM. +// The client will receive an RPC error "code = Unavailable, desc = stream +// terminated by RST_STREAM with error code: REFUSED_STREAM". +// +// It's intended to be used in situations where you don't want to waste the +// resources to accept the new stream (e.g. rate-limiting). And the content of +// the error will be ignored and won't be sent back to the client. For other +// general usages, please use interceptors. +// +// Note that it is executed in the per-connection I/O goroutine(s) instead of +// per-RPC goroutine. Therefore, users should NOT have any +// blocking/time-consuming work in this handle. Otherwise all the RPCs would +// slow down. Also, for the same reason, this handle won't be called +// concurrently by gRPC. +type ServerInHandle func(ctx context.Context, info *Info) (context.Context, error) diff --git a/vendor/google.golang.org/grpc/trace.go b/vendor/google.golang.org/grpc/trace.go new file mode 100644 index 000000000..c1c96dedc --- /dev/null +++ b/vendor/google.golang.org/grpc/trace.go @@ -0,0 +1,113 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "bytes" + "fmt" + "io" + "net" + "strings" + "time" + + "golang.org/x/net/trace" +) + +// EnableTracing controls whether to trace RPCs using the golang.org/x/net/trace package. +// This should only be set before any RPCs are sent or received by this program. +var EnableTracing bool + +// methodFamily returns the trace family for the given method. +// It turns "/pkg.Service/GetFoo" into "pkg.Service". +func methodFamily(m string) string { + m = strings.TrimPrefix(m, "/") // remove leading slash + if i := strings.Index(m, "/"); i >= 0 { + m = m[:i] // remove everything from second slash + } + if i := strings.LastIndex(m, "."); i >= 0 { + m = m[i+1:] // cut down to last dotted component + } + return m +} + +// traceInfo contains tracing information for an RPC. +type traceInfo struct { + tr trace.Trace + firstLine firstLine +} + +// firstLine is the first line of an RPC trace. +type firstLine struct { + client bool // whether this is a client (outgoing) RPC + remoteAddr net.Addr + deadline time.Duration // may be zero +} + +func (f *firstLine) String() string { + var line bytes.Buffer + io.WriteString(&line, "RPC: ") + if f.client { + io.WriteString(&line, "to") + } else { + io.WriteString(&line, "from") + } + fmt.Fprintf(&line, " %v deadline:", f.remoteAddr) + if f.deadline != 0 { + fmt.Fprint(&line, f.deadline) + } else { + io.WriteString(&line, "none") + } + return line.String() +} + +const truncateSize = 100 + +func truncate(x string, l int) string { + if l > len(x) { + return x + } + return x[:l] +} + +// payload represents an RPC request or response payload. +type payload struct { + sent bool // whether this is an outgoing payload + msg interface{} // e.g. a proto.Message + // TODO(dsymonds): add stringifying info to codec, and limit how much we hold here? +} + +func (p payload) String() string { + if p.sent { + return truncate(fmt.Sprintf("sent: %v", p.msg), truncateSize) + } + return truncate(fmt.Sprintf("recv: %v", p.msg), truncateSize) +} + +type fmtStringer struct { + format string + a []interface{} +} + +func (f *fmtStringer) String() string { + return fmt.Sprintf(f.format, f.a...) +} + +type stringer string + +func (s stringer) String() string { return string(s) } diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go new file mode 100644 index 000000000..8ee619bf6 --- /dev/null +++ b/vendor/google.golang.org/grpc/version.go @@ -0,0 +1,22 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +// Version is the current grpc version. +const Version = "1.15.0"