From 1efae398f026af32fdd34e1ca904b8dd92d8631c Mon Sep 17 00:00:00 2001 From: Gyu-Ho Lee Date: Mon, 31 Oct 2016 21:03:34 -0700 Subject: [PATCH] vendor: sync with etcd master + other upstream --- glide.lock | 74 +- glide.yaml | 35 +- vendor/bitbucket.org/ww/goautoneg/autoneg.go | 162 + .../go}/storage/acl.go | 0 vendor/cloud.google.com/go/storage/bucket.go | 489 +++ .../go}/storage/reader.go | 0 .../go}/storage/storage.go | 347 +- .../go}/storage/writer.go | 0 .../beorn7/perks/quantile/stream.go | 292 ++ .../cloudfoundry-incubator/candiedyaml/api.go | 834 ----- .../candiedyaml/decode.go | 622 --- .../candiedyaml/emitter.go | 2072 ---------- .../candiedyaml/encode.go | 395 -- .../candiedyaml/parser.go | 1230 ------ .../candiedyaml/reader.go | 465 --- .../candiedyaml/resolver.go | 449 --- .../candiedyaml/run_parser.go | 62 - .../candiedyaml/scanner.go | 3318 ----------------- .../candiedyaml/tags.go | 360 -- .../candiedyaml/writer.go | 128 - .../candiedyaml/yaml_definesh.go | 22 - .../candiedyaml/yaml_privateh.go | 891 ----- .../candiedyaml/yamlh.go | 953 ----- .../coreos/etcd/auth/authpb/auth.pb.go | 42 +- vendor/github.com/coreos/etcd/client/keys.go | 5 + .../github.com/coreos/etcd/clientv3/auth.go | 4 +- .../coreos/etcd/clientv3/balancer.go | 27 +- .../github.com/coreos/etcd/clientv3/client.go | 26 +- .../coreos/etcd/clientv3/cluster.go | 2 +- .../github.com/coreos/etcd/clientv3/config.go | 3 - vendor/github.com/coreos/etcd/clientv3/doc.go | 2 +- vendor/github.com/coreos/etcd/clientv3/kv.go | 3 +- .../github.com/coreos/etcd/clientv3/lease.go | 2 +- .../github.com/coreos/etcd/clientv3/logger.go | 40 +- .../github.com/coreos/etcd/clientv3/retry.go | 55 +- .../github.com/coreos/etcd/clientv3/watch.go | 554 ++- .../etcdserver/etcdserverpb/etcdserver.pb.go | 57 +- .../etcdserverpb/raft_internal.pb.go | 111 +- .../etcd/etcdserver/etcdserverpb/rpc.pb.go | 427 +-- .../coreos/etcd/mvcc/mvccpb/kv.pb.go | 44 +- .../github.com/cpuguy83/go-md2man/LICENSE.md | 21 + .../cpuguy83/go-md2man/md2man/md2man.go | 19 + .../cpuguy83/go-md2man/md2man/roff.go | 269 ++ vendor/github.com/dustin/go-humanize/comma.go | 2 +- .../github.com/dustin/go-humanize/commaf.go | 40 - vendor/github.com/dustin/go-humanize/si.go | 11 +- vendor/github.com/dustin/go-humanize/times.go | 71 +- vendor/github.com/ghodss/yaml/yaml.go | 8 +- vendor/github.com/gogo/protobuf/LICENSE | 6 +- .../github.com/gogo/protobuf/gogoproto/doc.go | 6 +- .../gogo/protobuf/gogoproto/gogo.pb.go | 146 +- .../gogo/protobuf/gogoproto/helper.go | 4 +- .../github.com/gogo/protobuf/proto/clone.go | 14 +- .../github.com/gogo/protobuf/proto/decode.go | 25 +- .../gogo/protobuf/proto/decode_gogo.go | 6 +- .../github.com/gogo/protobuf/proto/encode.go | 52 +- .../gogo/protobuf/proto/encode_gogo.go | 6 +- .../github.com/gogo/protobuf/proto/equal.go | 26 +- .../gogo/protobuf/proto/extensions.go | 409 +- .../gogo/protobuf/proto/extensions_gogo.go | 88 +- vendor/github.com/gogo/protobuf/proto/lib.go | 4 - .../gogo/protobuf/proto/lib_gogo.go | 6 +- .../gogo/protobuf/proto/message_set.go | 43 +- .../gogo/protobuf/proto/pointer_reflect.go | 7 +- .../gogo/protobuf/proto/pointer_unsafe.go | 6 +- .../protobuf/proto/pointer_unsafe_gogo.go | 19 +- .../gogo/protobuf/proto/properties.go | 53 +- .../gogo/protobuf/proto/properties_gogo.go | 6 +- .../gogo/protobuf/proto/skip_gogo.go | 6 +- vendor/github.com/gogo/protobuf/proto/text.go | 36 +- .../gogo/protobuf/proto/text_gogo.go | 6 +- .../gogo/protobuf/proto/text_parser.go | 75 +- .../descriptor/descriptor.pb.go | 382 +- ...descriptor_gostring.gen.go => gostring.go} | 73 +- .../protoc-gen-gogo/descriptor/helper.go | 4 +- .../golang/protobuf/jsonpb/jsonpb.go | 67 +- .../golang/protobuf/proto/decode.go | 5 - .../golang/protobuf/proto/extensions.go | 31 - .../github.com/golang/protobuf/proto/lib.go | 2 +- .../github.com/golang/protobuf/proto/text.go | 2 +- .../golang/protobuf/proto/text_parser.go | 11 - .../grpc-ecosystem/go-grpc-prometheus/LICENSE | 201 + .../go-grpc-prometheus/client.go | 72 + .../go-grpc-prometheus/client_reporter.go | 111 + .../go-grpc-prometheus/server.go | 74 + .../go-grpc-prometheus/server_reporter.go | 157 + .../grpc-ecosystem/go-grpc-prometheus/util.go | 27 + .../grpc-gateway/runtime/marshal_jsonpb.go | 6 +- .../golang_protobuf_extensions}/LICENSE | 3 +- .../pbutil/decode.go | 75 + .../golang_protobuf_extensions/pbutil/doc.go | 16 + .../pbutil/encode.go | 46 + .../client_golang}/LICENSE | 25 + .../prometheus/client_golang/NOTICE | 28 + .../client_golang/prometheus/collector.go | 75 + .../client_golang/prometheus/counter.go | 175 + .../client_golang/prometheus/desc.go | 201 + .../client_golang/prometheus/doc.go | 109 + .../client_golang/prometheus/expvar.go | 119 + .../client_golang/prometheus/gauge.go | 147 + .../client_golang/prometheus/go_collector.go | 263 ++ .../client_golang/prometheus/histogram.go | 450 +++ .../client_golang/prometheus/http.go | 361 ++ .../client_golang/prometheus/metric.go | 166 + .../prometheus/process_collector.go | 142 + .../client_golang/prometheus/push.go | 65 + .../client_golang/prometheus/registry.go | 726 ++++ .../client_golang/prometheus/summary.go | 540 +++ .../client_golang/prometheus/untyped.go | 145 + .../client_golang/prometheus/value.go | 234 ++ .../client_golang/prometheus/vec.go | 247 ++ .../prometheus/client_model/LICENSE | 201 + .../github.com/prometheus/client_model/NOTICE | 5 + .../prometheus/client_model/go/metrics.pb.go | 364 ++ vendor/github.com/prometheus/common/LICENSE | 201 + vendor/github.com/prometheus/common/NOTICE | 5 + .../prometheus/common/expfmt/decode.go | 411 ++ .../prometheus/common/expfmt/encode.go | 88 + .../prometheus/common/expfmt/expfmt.go | 40 + .../prometheus/common/expfmt/fuzz.go | 36 + .../prometheus/common/expfmt/json_decode.go | 162 + .../prometheus/common/expfmt/text_create.go | 305 ++ .../prometheus/common/expfmt/text_parse.go | 746 ++++ .../prometheus/common/model/alert.go | 109 + .../prometheus/common/model/fingerprinting.go | 105 + .../prometheus/common/model/labels.go | 188 + .../prometheus/common/model/labelset.go | 153 + .../prometheus/common/model/metric.go | 81 + .../prometheus/common/model/model.go | 16 + .../prometheus/common/model/signature.go | 190 + .../prometheus/common/model/silence.go | 60 + .../prometheus/common/model/time.go | 230 ++ .../prometheus/common/model/value.go | 395 ++ vendor/github.com/prometheus/procfs/LICENSE | 201 + vendor/github.com/prometheus/procfs/NOTICE | 7 + vendor/github.com/prometheus/procfs/doc.go | 45 + vendor/github.com/prometheus/procfs/fs.go | 40 + vendor/github.com/prometheus/procfs/ipvs.go | 223 ++ vendor/github.com/prometheus/procfs/mdstat.go | 158 + vendor/github.com/prometheus/procfs/proc.go | 202 + .../github.com/prometheus/procfs/proc_io.go | 54 + .../prometheus/procfs/proc_limits.go | 111 + .../github.com/prometheus/procfs/proc_stat.go | 175 + vendor/github.com/prometheus/procfs/stat.go | 55 + .../russross/blackfriday/LICENSE.txt | 29 + .../github.com/russross/blackfriday/block.go | 1398 +++++++ .../github.com/russross/blackfriday/html.go | 949 +++++ .../github.com/russross/blackfriday/inline.go | 1133 ++++++ .../github.com/russross/blackfriday/latex.go | 332 ++ .../russross/blackfriday/markdown.go | 926 +++++ .../russross/blackfriday/smartypants.go | 400 ++ .../shurcooL/sanitized_anchor_name/LICENSE | 19 + .../shurcooL/sanitized_anchor_name/main.go | 29 + .../spf13/cobra/bash_completions.go | 388 +- vendor/github.com/spf13/cobra/cobra.go | 63 +- vendor/github.com/spf13/cobra/command.go | 321 +- .../github.com/spf13/cobra/command_notwin.go | 5 - vendor/github.com/spf13/cobra/command_win.go | 26 - vendor/github.com/spf13/cobra/doc_util.go | 34 + vendor/github.com/spf13/cobra/man_docs.go | 227 ++ vendor/github.com/spf13/cobra/md_docs.go | 162 + vendor/github.com/spf13/pflag/bool.go | 7 +- vendor/github.com/spf13/pflag/count.go | 7 +- vendor/github.com/spf13/pflag/flag.go | 147 +- vendor/github.com/spf13/pflag/float32.go | 7 +- vendor/github.com/spf13/pflag/float64.go | 7 +- vendor/github.com/spf13/pflag/golangflag.go | 7 - vendor/github.com/spf13/pflag/int.go | 7 +- vendor/github.com/spf13/pflag/int32.go | 7 +- vendor/github.com/spf13/pflag/int64.go | 7 +- vendor/github.com/spf13/pflag/int8.go | 7 +- vendor/github.com/spf13/pflag/string.go | 4 +- vendor/github.com/spf13/pflag/string_array.go | 110 - vendor/github.com/spf13/pflag/string_slice.go | 31 +- vendor/github.com/spf13/pflag/uint.go | 7 +- vendor/github.com/spf13/pflag/uint16.go | 9 +- vendor/github.com/spf13/pflag/uint32.go | 11 +- vendor/github.com/spf13/pflag/uint64.go | 7 +- vendor/github.com/spf13/pflag/uint8.go | 7 +- vendor/github.com/ugorji/go/codec/0doc.go | 14 +- vendor/github.com/ugorji/go/codec/binc.go | 6 +- vendor/github.com/ugorji/go/codec/cbor.go | 3 +- vendor/github.com/ugorji/go/codec/decode.go | 26 +- vendor/github.com/ugorji/go/codec/encode.go | 151 +- .../ugorji/go/codec/fast-path.generated.go | 574 +-- .../ugorji/go/codec/fast-path.not.go | 2 - .../ugorji/go/codec/gen-helper.generated.go | 2 +- .../ugorji/go/codec/gen.generated.go | 5 +- vendor/github.com/ugorji/go/codec/gen.go | 195 +- vendor/github.com/ugorji/go/codec/gen_15.go | 12 - vendor/github.com/ugorji/go/codec/gen_16.go | 12 - vendor/github.com/ugorji/go/codec/helper.go | 271 +- .../ugorji/go/codec/helper_internal.go | 4 +- .../ugorji/go/codec/helper_unsafe.go | 14 +- vendor/github.com/ugorji/go/codec/json.go | 171 +- vendor/github.com/ugorji/go/codec/msgpack.go | 3 +- vendor/github.com/ugorji/go/codec/rpc.go | 2 +- vendor/github.com/ugorji/go/codec/simple.go | 3 +- vendor/github.com/ugorji/go/codec/time.go | 13 +- vendor/golang.org/x/net/context/context.go | 295 +- .../x/net/context/ctxhttp/cancelreq.go | 19 + .../x/net/context/ctxhttp/cancelreq_go14.go | 23 + .../x/net/context/ctxhttp/ctxhttp.go | 102 +- .../x/net/context/ctxhttp/ctxhttp_pre17.go | 147 - vendor/golang.org/x/net/context/go17.go | 72 - vendor/golang.org/x/net/context/pre_go17.go | 300 -- .../x/net/http2/client_conn_pool.go | 35 +- .../x/net/http2/configure_transport.go | 11 +- vendor/golang.org/x/net/http2/errors.go | 8 - vendor/golang.org/x/net/http2/frame.go | 67 +- vendor/golang.org/x/net/http2/go15.go | 11 + vendor/golang.org/x/net/http2/go16.go | 43 - vendor/golang.org/x/net/http2/go17.go | 94 - vendor/golang.org/x/net/http2/hpack/hpack.go | 2 +- .../golang.org/x/net/http2/hpack/huffman.go | 42 +- vendor/golang.org/x/net/http2/http2.go | 167 +- vendor/golang.org/x/net/http2/not_go15.go | 11 + vendor/golang.org/x/net/http2/not_go16.go | 35 +- vendor/golang.org/x/net/http2/not_go17.go | 51 - vendor/golang.org/x/net/http2/pipe.go | 6 - vendor/golang.org/x/net/http2/server.go | 269 +- vendor/golang.org/x/net/http2/transport.go | 686 +--- vendor/golang.org/x/net/http2/write.go | 23 +- .../golang.org/x/net/lex/httplex/httplex.go | 312 -- vendor/golang.org/x/net/trace/trace.go | 14 +- vendor/google.golang.org/cloud/cloud.go | 56 - .../google.golang.org/cloud/internal/cloud.go | 128 - .../cloud/internal/transport/dial.go | 61 - vendor/google.golang.org/cloud/option.go | 88 - vendor/google.golang.org/grpc/balancer.go | 20 +- vendor/google.golang.org/grpc/call.go | 11 +- vendor/google.golang.org/grpc/clientconn.go | 127 +- .../grpc/credentials/credentials.go | 49 +- vendor/google.golang.org/grpc/interceptor.go | 16 + .../grpc/metadata/metadata.go | 13 +- vendor/google.golang.org/grpc/rpc_util.go | 4 +- vendor/google.golang.org/grpc/server.go | 26 +- vendor/google.golang.org/grpc/stream.go | 33 +- .../grpc/transport/handler_server.go | 4 +- .../grpc/transport/http2_client.go | 100 +- .../grpc/transport/http2_server.go | 12 +- .../grpc/transport/http_util.go | 13 +- .../grpc/transport/transport.go | 31 +- vendor/gopkg.in/yaml.v2/LICENSE | 195 +- vendor/gopkg.in/yaml.v2/readerc.go | 7 +- vendor/gopkg.in/yaml.v2/scannerc.go | 2 +- vendor/gopkg.in/yaml.v2/yaml.go | 2 +- 247 files changed, 20630 insertions(+), 18578 deletions(-) create mode 100644 vendor/bitbucket.org/ww/goautoneg/autoneg.go rename vendor/{google.golang.org/cloud => cloud.google.com/go}/storage/acl.go (100%) create mode 100644 vendor/cloud.google.com/go/storage/bucket.go rename vendor/{google.golang.org/cloud => cloud.google.com/go}/storage/reader.go (100%) rename vendor/{google.golang.org/cloud => cloud.google.com/go}/storage/storage.go (73%) rename vendor/{google.golang.org/cloud => cloud.google.com/go}/storage/writer.go (100%) create mode 100644 vendor/github.com/beorn7/perks/quantile/stream.go delete mode 100644 vendor/github.com/cloudfoundry-incubator/candiedyaml/api.go delete mode 100644 vendor/github.com/cloudfoundry-incubator/candiedyaml/decode.go delete mode 100644 vendor/github.com/cloudfoundry-incubator/candiedyaml/emitter.go delete mode 100644 vendor/github.com/cloudfoundry-incubator/candiedyaml/encode.go delete mode 100644 vendor/github.com/cloudfoundry-incubator/candiedyaml/parser.go delete mode 100644 vendor/github.com/cloudfoundry-incubator/candiedyaml/reader.go delete mode 100644 vendor/github.com/cloudfoundry-incubator/candiedyaml/resolver.go delete mode 100644 vendor/github.com/cloudfoundry-incubator/candiedyaml/run_parser.go delete mode 100644 vendor/github.com/cloudfoundry-incubator/candiedyaml/scanner.go delete mode 100644 vendor/github.com/cloudfoundry-incubator/candiedyaml/tags.go delete mode 100644 vendor/github.com/cloudfoundry-incubator/candiedyaml/writer.go delete mode 100644 vendor/github.com/cloudfoundry-incubator/candiedyaml/yaml_definesh.go delete mode 100644 vendor/github.com/cloudfoundry-incubator/candiedyaml/yaml_privateh.go delete mode 100644 vendor/github.com/cloudfoundry-incubator/candiedyaml/yamlh.go create mode 100644 vendor/github.com/cpuguy83/go-md2man/LICENSE.md create mode 100644 vendor/github.com/cpuguy83/go-md2man/md2man/md2man.go create mode 100644 vendor/github.com/cpuguy83/go-md2man/md2man/roff.go delete mode 100644 vendor/github.com/dustin/go-humanize/commaf.go rename vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/{descriptor_gostring.gen.go => gostring.go} (92%) create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/LICENSE create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_reporter.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_reporter.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/util.go rename vendor/{google.golang.org/cloud => github.com/matttproud/golang_protobuf_extensions}/LICENSE (99%) create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go rename vendor/github.com/{cloudfoundry-incubator/candiedyaml => prometheus/client_golang}/LICENSE (89%) create mode 100644 vendor/github.com/prometheus/client_golang/NOTICE create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/collector.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/counter.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/desc.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/doc.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/expvar.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/gauge.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/go_collector.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/histogram.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/http.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/metric.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/process_collector.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/push.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/registry.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/summary.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/untyped.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/value.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/vec.go create mode 100644 vendor/github.com/prometheus/client_model/LICENSE create mode 100644 vendor/github.com/prometheus/client_model/NOTICE create mode 100644 vendor/github.com/prometheus/client_model/go/metrics.pb.go create mode 100644 vendor/github.com/prometheus/common/LICENSE create mode 100644 vendor/github.com/prometheus/common/NOTICE create mode 100644 vendor/github.com/prometheus/common/expfmt/decode.go create mode 100644 vendor/github.com/prometheus/common/expfmt/encode.go create mode 100644 vendor/github.com/prometheus/common/expfmt/expfmt.go create mode 100644 vendor/github.com/prometheus/common/expfmt/fuzz.go create mode 100644 vendor/github.com/prometheus/common/expfmt/json_decode.go create mode 100644 vendor/github.com/prometheus/common/expfmt/text_create.go create mode 100644 vendor/github.com/prometheus/common/expfmt/text_parse.go create mode 100644 vendor/github.com/prometheus/common/model/alert.go create mode 100644 vendor/github.com/prometheus/common/model/fingerprinting.go create mode 100644 vendor/github.com/prometheus/common/model/labels.go create mode 100644 vendor/github.com/prometheus/common/model/labelset.go create mode 100644 vendor/github.com/prometheus/common/model/metric.go create mode 100644 vendor/github.com/prometheus/common/model/model.go create mode 100644 vendor/github.com/prometheus/common/model/signature.go create mode 100644 vendor/github.com/prometheus/common/model/silence.go create mode 100644 vendor/github.com/prometheus/common/model/time.go create mode 100644 vendor/github.com/prometheus/common/model/value.go create mode 100644 vendor/github.com/prometheus/procfs/LICENSE create mode 100644 vendor/github.com/prometheus/procfs/NOTICE create mode 100644 vendor/github.com/prometheus/procfs/doc.go create mode 100644 vendor/github.com/prometheus/procfs/fs.go create mode 100644 vendor/github.com/prometheus/procfs/ipvs.go create mode 100644 vendor/github.com/prometheus/procfs/mdstat.go create mode 100644 vendor/github.com/prometheus/procfs/proc.go create mode 100644 vendor/github.com/prometheus/procfs/proc_io.go create mode 100644 vendor/github.com/prometheus/procfs/proc_limits.go create mode 100644 vendor/github.com/prometheus/procfs/proc_stat.go create mode 100644 vendor/github.com/prometheus/procfs/stat.go create mode 100644 vendor/github.com/russross/blackfriday/LICENSE.txt create mode 100644 vendor/github.com/russross/blackfriday/block.go create mode 100644 vendor/github.com/russross/blackfriday/html.go create mode 100644 vendor/github.com/russross/blackfriday/inline.go create mode 100644 vendor/github.com/russross/blackfriday/latex.go create mode 100644 vendor/github.com/russross/blackfriday/markdown.go create mode 100644 vendor/github.com/russross/blackfriday/smartypants.go create mode 100644 vendor/github.com/shurcooL/sanitized_anchor_name/LICENSE create mode 100644 vendor/github.com/shurcooL/sanitized_anchor_name/main.go delete mode 100644 vendor/github.com/spf13/cobra/command_notwin.go delete mode 100644 vendor/github.com/spf13/cobra/command_win.go create mode 100644 vendor/github.com/spf13/cobra/doc_util.go create mode 100644 vendor/github.com/spf13/cobra/man_docs.go create mode 100644 vendor/github.com/spf13/cobra/md_docs.go delete mode 100644 vendor/github.com/spf13/pflag/string_array.go delete mode 100644 vendor/github.com/ugorji/go/codec/gen_15.go delete mode 100644 vendor/github.com/ugorji/go/codec/gen_16.go create mode 100644 vendor/golang.org/x/net/context/ctxhttp/cancelreq.go create mode 100644 vendor/golang.org/x/net/context/ctxhttp/cancelreq_go14.go delete mode 100644 vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go delete mode 100644 vendor/golang.org/x/net/context/go17.go delete mode 100644 vendor/golang.org/x/net/context/pre_go17.go create mode 100644 vendor/golang.org/x/net/http2/go15.go delete mode 100644 vendor/golang.org/x/net/http2/go16.go delete mode 100644 vendor/golang.org/x/net/http2/go17.go create mode 100644 vendor/golang.org/x/net/http2/not_go15.go delete mode 100644 vendor/golang.org/x/net/http2/not_go17.go delete mode 100644 vendor/golang.org/x/net/lex/httplex/httplex.go delete mode 100644 vendor/google.golang.org/cloud/cloud.go delete mode 100644 vendor/google.golang.org/cloud/internal/cloud.go delete mode 100644 vendor/google.golang.org/cloud/internal/transport/dial.go delete mode 100644 vendor/google.golang.org/cloud/option.go diff --git a/glide.lock b/glide.lock index e040b136..572c519b 100644 --- a/glide.lock +++ b/glide.lock @@ -1,6 +1,8 @@ -hash: 1fdae15de84bd36c00be69c38a6471bd77375bb6200160c7c4b4acbeea1f4778 -updated: 2016-09-24T11:38:22.859090033-07:00 +hash: 08d405c0ee123a271c5f6e06676d450f48f89becaebea18514e87f4d49b0d937 +updated: 2016-10-31T21:02:43.335772536-07:00 imports: +- name: bitbucket.org/ww/goautoneg + version: 75cd24fc2f2c2a2088577d12123ddee5f54e0675 - name: bitbucket.org/zombiezen/gopdf version: 1c63dc69751bc45441c2ce1f56b631c55294b4d5 subpackages: @@ -10,14 +12,19 @@ imports: subpackages: - compute/metadata - internal + - storage - name: github.com/ajstarks/svgo version: 672fe547df4e49efc6db67a74391368bcb149b37 +- name: github.com/beorn7/perks + version: b965b613227fddccbfffe13eae360ed3fa822f8d + subpackages: + - quantile - name: github.com/cheggaaa/pb version: 6e9d17711bb763b26b68b3931d47f24c1323abab - name: github.com/cloudfoundry-incubator/candiedyaml version: 99c3df83b51532e3615f851d8c2dbb638f5313bf - name: github.com/coreos/etcd - version: f5b9238a3c9f10e5bad42626896182451e34a1a9 + version: 136c02da71430e60fc9e04a1e033b95e7c3376c8 subpackages: - auth/authpb - client @@ -29,19 +36,23 @@ imports: - pkg/tlsutil - pkg/types - name: github.com/coreos/go-systemd - version: 5c49e4850c879a0ddc061e8f4adcf307de8a8bc2 + version: bfdc81d0d7e0fb19447b08571f63b774495251ce subpackages: - journal - name: github.com/coreos/pkg - version: 3ac0863d7acf3bc44daf49afef8919af12f704ef + version: fa29b1d70f0beaddd4c7021607cc3c3be8ce94b8 subpackages: - capnslog +- name: github.com/cpuguy83/go-md2man + version: 71acacd42f85e5e82f70a55327789582a5200a90 + subpackages: + - md2man - name: github.com/dustin/go-humanize - version: 2fcb5204cdc65b4bec9fd0a87606bb0d0e3c54e8 + version: 8929fe90cee4b2cb9deb468b51fb34eba64d1bf0 - name: github.com/ghodss/yaml - version: aa0c862057666179de291b67d9f093d12b5a8473 + version: 73d445a93680fa1a78ae23a5839bad48f32ba1ee - name: github.com/gogo/protobuf - version: 966a6f4b3274f2692aa2f30df2ea5c7172c832ca + version: e18d7aa8f8c624c915db340349aad4c49b10d173 subpackages: - gogoproto - proto @@ -52,7 +63,7 @@ imports: - raster - truetype - name: github.com/golang/protobuf - version: f592bd283e9ef86337a432eb50e592278c3d534d + version: 8616e8ee5e20a1704615e6c8d7afcdac06087a67 subpackages: - jsonpb - proto @@ -75,8 +86,10 @@ imports: - vg/vgimg - vg/vgpdf - vg/vgsvg +- name: github.com/grpc-ecosystem/go-grpc-prometheus + version: 6b7015e65d366bf3f19b2b2a000a831940f0f7e0 - name: github.com/grpc-ecosystem/grpc-gateway - version: 5e0e028ba0a015710eaebf6e47af18812c9f2767 + version: f52d055dc48aec25854ed7d31862f78913cf17d1 subpackages: - runtime - runtime/internal @@ -106,18 +119,41 @@ imports: - draw2dimg - name: github.com/mattn/go-runewidth version: d6bea18f789704b5f83375793155289da36a3c7f +- name: github.com/matttproud/golang_protobuf_extensions + version: fc2b8d3a73c4867e51861bbdd5ae3c1f0869dd6a + subpackages: + - pbutil - name: github.com/olekukonko/tablewriter version: daf2955e742cf123959884fdff4685aa79b63135 +- name: github.com/prometheus/client_golang + version: e51041b3fa41cece0dca035740ba6411905be473 + subpackages: + - prometheus +- name: github.com/prometheus/client_model + version: fa8ad6fec33561be4280a8f0514318c79d7f6cb6 + subpackages: + - go +- name: github.com/prometheus/common + version: ffe929a3f4c4faeaa10f2b9535c2b1be3ad15650 + subpackages: + - expfmt + - model +- name: github.com/prometheus/procfs + version: 454a56f35412459b5e684fd5ec0f9211b94f002a +- name: github.com/russross/blackfriday + version: 300106c228d52c8941d4b3de6054a6062a86dda3 - name: github.com/samuel/go-zookeeper version: 87e1bca4477a3cc767ca71be023ced183d74e538 subpackages: - zk +- name: github.com/shurcooL/sanitized_anchor_name + version: 10ef21a441db47d8b13ebcc5fd2310f636973c77 - name: github.com/spf13/cobra - version: 9c28e4bbd74e5c3ed7aacbc552b2cab7cfdfe744 + version: 1c44ec8d3f1552cac48999f9306da23c4d8a288b - name: github.com/spf13/pflag - version: c7e63cf4530bcd3ba943729cee0efeff2ebea63f + version: 08b1a584251b5b62f458943640fc8ebd4d50aaa5 - name: github.com/ugorji/go - version: 4a1cb5252a6951f715a85d0e4be334c2a2dbf2a2 + version: f1f1a805ed361a0e078bb537e4ea78cd37dcf065 subpackages: - codec - name: golang.org/x/image @@ -130,7 +166,7 @@ imports: - tiff - tiff/lzw - name: golang.org/x/net - version: 7394c112eae4dba7e96bfcfe738e6373d61772b4 + version: 6acef71eb69611914f7a30939ea9f6e194c78172 subpackages: - context - context/ctxhttp @@ -170,14 +206,8 @@ imports: - internal/urlfetch - socket - urlfetch -- name: google.golang.org/cloud - version: 12aa462581208c155e498dc13e14cabe6da24dc3 - subpackages: - - internal - - internal/transport - - storage - name: google.golang.org/grpc - version: 231b4cfea0e79843053a33f5fe90bd4d84b23cd3 + version: b1a2821ca5a4fd6b6e48ddfbb7d6d7584d839d21 subpackages: - codes - credentials @@ -189,5 +219,5 @@ imports: - peer - transport - name: gopkg.in/yaml.v2 - version: e4d366fc3c7938e2958e662b4258c7a89e1f0e3e + version: 53feefa2559fb8dfa8d81baad31be332c97d6c77 testImports: [] diff --git a/glide.yaml b/glide.yaml index c2fb4c99..7811aeaa 100644 --- a/glide.yaml +++ b/glide.yaml @@ -9,6 +9,7 @@ import: subpackages: - compute/metadata - internal + - storage - package: github.com/ajstarks/svgo version: 672fe547df4e49efc6db67a74391368bcb149b37 - package: github.com/cheggaaa/pb @@ -16,7 +17,7 @@ import: - package: github.com/cloudfoundry-incubator/candiedyaml version: 99c3df83b51532e3615f851d8c2dbb638f5313bf - package: github.com/coreos/etcd - version: f5b9238a3c9f10e5bad42626896182451e34a1a9 + version: 136c02da71430e60fc9e04a1e033b95e7c3376c8 subpackages: - auth/authpb - client @@ -28,19 +29,19 @@ import: - pkg/tlsutil - pkg/types - package: github.com/coreos/go-systemd - version: 5c49e4850c879a0ddc061e8f4adcf307de8a8bc2 + version: bfdc81d0d7e0fb19447b08571f63b774495251ce subpackages: - journal - package: github.com/coreos/pkg - version: 3ac0863d7acf3bc44daf49afef8919af12f704ef + version: fa29b1d70f0beaddd4c7021607cc3c3be8ce94b8 subpackages: - capnslog - package: github.com/dustin/go-humanize - version: 2fcb5204cdc65b4bec9fd0a87606bb0d0e3c54e8 + version: 8929fe90cee4b2cb9deb468b51fb34eba64d1bf0 - package: github.com/ghodss/yaml - version: aa0c862057666179de291b67d9f093d12b5a8473 + version: 73d445a93680fa1a78ae23a5839bad48f32ba1ee - package: github.com/gogo/protobuf - version: 966a6f4b3274f2692aa2f30df2ea5c7172c832ca + version: e18d7aa8f8c624c915db340349aad4c49b10d173 subpackages: - gogoproto - proto @@ -51,7 +52,7 @@ import: - raster - truetype - package: github.com/golang/protobuf - version: f592bd283e9ef86337a432eb50e592278c3d534d + version: 8616e8ee5e20a1704615e6c8d7afcdac06087a67 subpackages: - jsonpb - proto @@ -75,7 +76,7 @@ import: - vg/vgpdf - vg/vgsvg - package: github.com/grpc-ecosystem/grpc-gateway - version: 5e0e028ba0a015710eaebf6e47af18812c9f2767 + version: f52d055dc48aec25854ed7d31862f78913cf17d1 subpackages: - runtime - runtime/internal @@ -112,11 +113,11 @@ import: subpackages: - zk - package: github.com/spf13/cobra - version: 9c28e4bbd74e5c3ed7aacbc552b2cab7cfdfe744 + version: 1c44ec8d3f1552cac48999f9306da23c4d8a288b - package: github.com/spf13/pflag - version: c7e63cf4530bcd3ba943729cee0efeff2ebea63f + version: 08b1a584251b5b62f458943640fc8ebd4d50aaa5 - package: github.com/ugorji/go - version: 4a1cb5252a6951f715a85d0e4be334c2a2dbf2a2 + version: f1f1a805ed361a0e078bb537e4ea78cd37dcf065 subpackages: - codec - package: golang.org/x/image @@ -129,7 +130,7 @@ import: - tiff - tiff/lzw - package: golang.org/x/net - version: 7394c112eae4dba7e96bfcfe738e6373d61772b4 + version: 6acef71eb69611914f7a30939ea9f6e194c78172 subpackages: - context - context/ctxhttp @@ -165,14 +166,8 @@ import: - internal/log - internal/modules - internal/remote_api -- package: google.golang.org/cloud - version: 12aa462581208c155e498dc13e14cabe6da24dc3 - subpackages: - - internal - - internal/transport - - storage - package: google.golang.org/grpc - version: 231b4cfea0e79843053a33f5fe90bd4d84b23cd3 + version: v1.0.2 subpackages: - codes - credentials @@ -184,4 +179,4 @@ import: - peer - transport - package: gopkg.in/yaml.v2 - version: e4d366fc3c7938e2958e662b4258c7a89e1f0e3e + version: 53feefa2559fb8dfa8d81baad31be332c97d6c77 diff --git a/vendor/bitbucket.org/ww/goautoneg/autoneg.go b/vendor/bitbucket.org/ww/goautoneg/autoneg.go new file mode 100644 index 00000000..648b38cb --- /dev/null +++ b/vendor/bitbucket.org/ww/goautoneg/autoneg.go @@ -0,0 +1,162 @@ +/* +HTTP Content-Type Autonegotiation. + +The functions in this package implement the behaviour specified in +http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html + +Copyright (c) 2011, Open Knowledge Foundation Ltd. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + Neither the name of the Open Knowledge Foundation Ltd. nor the + names of its contributors may be used to endorse or promote + products derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +*/ +package goautoneg + +import ( + "sort" + "strconv" + "strings" +) + +// Structure to represent a clause in an HTTP Accept Header +type Accept struct { + Type, SubType string + Q float64 + Params map[string]string +} + +// For internal use, so that we can use the sort interface +type accept_slice []Accept + +func (accept accept_slice) Len() int { + slice := []Accept(accept) + return len(slice) +} + +func (accept accept_slice) Less(i, j int) bool { + slice := []Accept(accept) + ai, aj := slice[i], slice[j] + if ai.Q > aj.Q { + return true + } + if ai.Type != "*" && aj.Type == "*" { + return true + } + if ai.SubType != "*" && aj.SubType == "*" { + return true + } + return false +} + +func (accept accept_slice) Swap(i, j int) { + slice := []Accept(accept) + slice[i], slice[j] = slice[j], slice[i] +} + +// Parse an Accept Header string returning a sorted list +// of clauses +func ParseAccept(header string) (accept []Accept) { + parts := strings.Split(header, ",") + accept = make([]Accept, 0, len(parts)) + for _, part := range parts { + part := strings.Trim(part, " ") + + a := Accept{} + a.Params = make(map[string]string) + a.Q = 1.0 + + mrp := strings.Split(part, ";") + + media_range := mrp[0] + sp := strings.Split(media_range, "/") + a.Type = strings.Trim(sp[0], " ") + + switch { + case len(sp) == 1 && a.Type == "*": + a.SubType = "*" + case len(sp) == 2: + a.SubType = strings.Trim(sp[1], " ") + default: + continue + } + + if len(mrp) == 1 { + accept = append(accept, a) + continue + } + + for _, param := range mrp[1:] { + sp := strings.SplitN(param, "=", 2) + if len(sp) != 2 { + continue + } + token := strings.Trim(sp[0], " ") + if token == "q" { + a.Q, _ = strconv.ParseFloat(sp[1], 32) + } else { + a.Params[token] = strings.Trim(sp[1], " ") + } + } + + accept = append(accept, a) + } + + slice := accept_slice(accept) + sort.Sort(slice) + + return +} + +// Negotiate the most appropriate content_type given the accept header +// and a list of alternatives. +func Negotiate(header string, alternatives []string) (content_type string) { + asp := make([][]string, 0, len(alternatives)) + for _, ctype := range alternatives { + asp = append(asp, strings.SplitN(ctype, "/", 2)) + } + for _, clause := range ParseAccept(header) { + for i, ctsp := range asp { + if clause.Type == ctsp[0] && clause.SubType == ctsp[1] { + content_type = alternatives[i] + return + } + if clause.Type == ctsp[0] && clause.SubType == "*" { + content_type = alternatives[i] + return + } + if clause.Type == "*" && clause.SubType == "*" { + content_type = alternatives[i] + return + } + } + } + return +} diff --git a/vendor/google.golang.org/cloud/storage/acl.go b/vendor/cloud.google.com/go/storage/acl.go similarity index 100% rename from vendor/google.golang.org/cloud/storage/acl.go rename to vendor/cloud.google.com/go/storage/acl.go diff --git a/vendor/cloud.google.com/go/storage/bucket.go b/vendor/cloud.google.com/go/storage/bucket.go new file mode 100644 index 00000000..305e9dda --- /dev/null +++ b/vendor/cloud.google.com/go/storage/bucket.go @@ -0,0 +1,489 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "errors" + "net/http" + "time" + + "golang.org/x/net/context" + "google.golang.org/api/googleapi" + raw "google.golang.org/api/storage/v1" +) + +// Create creates the Bucket in the project. +// If attrs is nil the API defaults will be used. +func (b *BucketHandle) Create(ctx context.Context, projectID string, attrs *BucketAttrs) error { + var bkt *raw.Bucket + if attrs != nil { + bkt = attrs.toRawBucket() + } else { + bkt = &raw.Bucket{} + } + bkt.Name = b.name + req := b.c.raw.Buckets.Insert(projectID, bkt) + _, err := req.Context(ctx).Do() + return err +} + +// Delete deletes the Bucket. +func (b *BucketHandle) Delete(ctx context.Context) error { + req := b.c.raw.Buckets.Delete(b.name) + return req.Context(ctx).Do() +} + +// ACL returns an ACLHandle, which provides access to the bucket's access control list. +// This controls who can list, create or overwrite the objects in a bucket. +// This call does not perform any network operations. +func (c *BucketHandle) ACL() *ACLHandle { + return c.acl +} + +// DefaultObjectACL returns an ACLHandle, which provides access to the bucket's default object ACLs. +// These ACLs are applied to newly created objects in this bucket that do not have a defined ACL. +// This call does not perform any network operations. +func (c *BucketHandle) DefaultObjectACL() *ACLHandle { + return c.defaultObjectACL +} + +// Object returns an ObjectHandle, which provides operations on the named object. +// This call does not perform any network operations. +// +// name must consist entirely of valid UTF-8-encoded runes. The full specification +// for valid object names can be found at: +// https://cloud.google.com/storage/docs/bucket-naming +func (b *BucketHandle) Object(name string) *ObjectHandle { + return &ObjectHandle{ + c: b.c, + bucket: b.name, + object: name, + acl: &ACLHandle{ + c: b.c, + bucket: b.name, + object: name, + }, + } +} + +// Attrs returns the metadata for the bucket. +func (b *BucketHandle) Attrs(ctx context.Context) (*BucketAttrs, error) { + resp, err := b.c.raw.Buckets.Get(b.name).Projection("full").Context(ctx).Do() + if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { + return nil, ErrBucketNotExist + } + if err != nil { + return nil, err + } + return newBucket(resp), nil +} + +// BucketAttrs represents the metadata for a Google Cloud Storage bucket. +type BucketAttrs struct { + // Name is the name of the bucket. + Name string + + // ACL is the list of access control rules on the bucket. + ACL []ACLRule + + // DefaultObjectACL is the list of access controls to + // apply to new objects when no object ACL is provided. + DefaultObjectACL []ACLRule + + // Location is the location of the bucket. It defaults to "US". + Location string + + // MetaGeneration is the metadata generation of the bucket. + MetaGeneration int64 + + // StorageClass is the storage class of the bucket. This defines + // how objects in the bucket are stored and determines the SLA + // and the cost of storage. Typical values are "STANDARD" and + // "DURABLE_REDUCED_AVAILABILITY". Defaults to "STANDARD". + StorageClass string + + // Created is the creation time of the bucket. + Created time.Time +} + +func newBucket(b *raw.Bucket) *BucketAttrs { + if b == nil { + return nil + } + bucket := &BucketAttrs{ + Name: b.Name, + Location: b.Location, + MetaGeneration: b.Metageneration, + StorageClass: b.StorageClass, + Created: convertTime(b.TimeCreated), + } + acl := make([]ACLRule, len(b.Acl)) + for i, rule := range b.Acl { + acl[i] = ACLRule{ + Entity: ACLEntity(rule.Entity), + Role: ACLRole(rule.Role), + } + } + bucket.ACL = acl + objACL := make([]ACLRule, len(b.DefaultObjectAcl)) + for i, rule := range b.DefaultObjectAcl { + objACL[i] = ACLRule{ + Entity: ACLEntity(rule.Entity), + Role: ACLRole(rule.Role), + } + } + bucket.DefaultObjectACL = objACL + return bucket +} + +// toRawBucket copies the editable attribute from b to the raw library's Bucket type. +func (b *BucketAttrs) toRawBucket() *raw.Bucket { + var acl []*raw.BucketAccessControl + if len(b.ACL) > 0 { + acl = make([]*raw.BucketAccessControl, len(b.ACL)) + for i, rule := range b.ACL { + acl[i] = &raw.BucketAccessControl{ + Entity: string(rule.Entity), + Role: string(rule.Role), + } + } + } + dACL := toRawObjectACL(b.DefaultObjectACL) + return &raw.Bucket{ + Name: b.Name, + DefaultObjectAcl: dACL, + Location: b.Location, + StorageClass: b.StorageClass, + Acl: acl, + } +} + +// ObjectList represents a list of objects returned from a bucket List call. +type ObjectList struct { + // Results represent a list of object results. + Results []*ObjectAttrs + + // Next is the continuation query to retrieve more + // results with the same filtering criteria. If there + // are no more results to retrieve, it is nil. + Next *Query + + // Prefixes represents prefixes of objects + // matching-but-not-listed up to and including + // the requested delimiter. + Prefixes []string +} + +// List lists objects from the bucket. You can specify a query +// to filter the results. If q is nil, no filtering is applied. +// +// Deprecated. Use BucketHandle.Objects instead. +func (b *BucketHandle) List(ctx context.Context, q *Query) (*ObjectList, error) { + it := b.Objects(ctx, q) + attrs, pres, err := it.NextPage() + if err != nil && err != Done { + return nil, err + } + objects := &ObjectList{ + Results: attrs, + Prefixes: pres, + } + if it.NextPageToken() != "" { + objects.Next = &it.query + } + return objects, nil +} + +// Objects returns an iterator over the objects in the bucket that match the Query q. +// If q is nil, no filtering is done. +func (b *BucketHandle) Objects(ctx context.Context, q *Query) *ObjectIterator { + it := &ObjectIterator{ + ctx: ctx, + bucket: b, + } + if q != nil { + it.query = *q + } + return it +} + +// An ObjectIterator is an iterator over ObjectAttrs. +type ObjectIterator struct { + ctx context.Context + bucket *BucketHandle + query Query + pageSize int + objs []*ObjectAttrs + prefixes []string + err error +} + +// Next returns the next result. Its second return value is Done if there are +// no more results. Once Next returns Done, all subsequent calls will return +// Done. +// +// Internally, Next retrieves results in bulk. You can call SetPageSize as a +// performance hint to affect how many results are retrieved in a single RPC. +// +// SetPageToken should not be called when using Next. +// +// Next and NextPage should not be used with the same iterator. +// +// If Query.Delimiter is non-empty, Next returns an error. Use NextPage when using delimiters. +func (it *ObjectIterator) Next() (*ObjectAttrs, error) { + if it.query.Delimiter != "" { + return nil, errors.New("cannot use ObjectIterator.Next with a delimiter") + } + for len(it.objs) == 0 { // "for", not "if", to handle empty pages + if it.err != nil { + return nil, it.err + } + it.nextPage() + if it.err != nil { + it.objs = nil + return nil, it.err + } + if it.query.Cursor == "" { + it.err = Done + } + } + o := it.objs[0] + it.objs = it.objs[1:] + return o, nil +} + +// DefaultPageSize is the default page size for calls to an iterator's NextPage method. +const DefaultPageSize = 1000 + +// NextPage returns the next page of results, both objects (as *ObjectAttrs) +// and prefixes. Prefixes will be nil if query.Delimiter is empty. +// +// NextPage will return exactly the number of results (the total of objects and +// prefixes) specified by the last call to SetPageSize, unless there are not +// enough results available. If no page size was specified, or was set to less +// than 1, it uses DefaultPageSize. +// +// NextPage may return a second return value of Done along with the last page +// of results. +// +// After NextPage returns Done, all subsequent calls to NextPage will return +// (nil, Done). +// +// Next and NextPage should not be used with the same iterator. +func (it *ObjectIterator) NextPage() (objs []*ObjectAttrs, prefixes []string, err error) { + defer it.SetPageSize(it.pageSize) // restore value at entry + if it.pageSize <= 0 { + it.pageSize = DefaultPageSize + } + for len(objs)+len(prefixes) < it.pageSize { + it.pageSize -= len(objs) + len(prefixes) + it.nextPage() + if it.err != nil { + return nil, nil, it.err + } + objs = append(objs, it.objs...) + it.objs = nil + prefixes = append(prefixes, it.prefixes...) + if it.query.Cursor == "" { + it.err = Done + return objs, prefixes, it.err + } + } + return objs, prefixes, nil +} + +// nextPage gets the next page of results by making a single call to the underlying method. +// It sets it.objs, it.prefixes, it.query.Cursor, and it.err. It never sets it.err to Done. +func (it *ObjectIterator) nextPage() { + if it.err != nil { + return + } + req := it.bucket.c.raw.Objects.List(it.bucket.name) + req.Projection("full") + req.Delimiter(it.query.Delimiter) + req.Prefix(it.query.Prefix) + req.Versions(it.query.Versions) + req.PageToken(it.query.Cursor) + if it.pageSize > 0 { + req.MaxResults(int64(it.pageSize)) + } + resp, err := req.Context(it.ctx).Do() + if err != nil { + it.err = err + return + } + it.query.Cursor = resp.NextPageToken + for _, item := range resp.Items { + it.objs = append(it.objs, newObject(item)) + } + it.prefixes = resp.Prefixes +} + +// SetPageSize sets the page size for all subsequent calls to NextPage. +// NextPage will return exactly this many items if they are present. +func (it *ObjectIterator) SetPageSize(pageSize int) { + it.pageSize = pageSize +} + +// SetPageToken sets the page token for the next call to NextPage, to resume +// the iteration from a previous point. +func (it *ObjectIterator) SetPageToken(t string) { + it.query.Cursor = t +} + +// NextPageToken returns a page token that can be used with SetPageToken to +// resume iteration from the next page. It returns the empty string if there +// are no more pages. For an example, see SetPageToken. +func (it *ObjectIterator) NextPageToken() string { + return it.query.Cursor +} + +// TODO(jbd): Add storage.buckets.update. + +// Buckets returns an iterator over the buckets in the project. You may +// optionally set the iterator's Prefix field to restrict the list to buckets +// whose names begin with the prefix. By default, all buckets in the project +// are returned. +func (c *Client) Buckets(ctx context.Context, projectID string) *BucketIterator { + return &BucketIterator{ + ctx: ctx, + client: c, + projectID: projectID, + } +} + +// A BucketIterator is an iterator over BucketAttrs. +type BucketIterator struct { + // Prefix restricts the iterator to buckets whose names begin with it. + Prefix string + + ctx context.Context + client *Client + projectID string + pageSize int + pageToken string + buckets []*BucketAttrs + err error +} + +// Next returns the next result. Its second return value is Done if there are +// no more results. Once Next returns Done, all subsequent calls will return +// Done. +// +// Internally, Next retrieves results in bulk. You can call SetPageSize as a +// performance hint to affect how many results are retrieved in a single RPC. +// +// SetPageToken should not be called when using Next. +// +// Next and NextPage should not be used with the same iterator. +func (it *BucketIterator) Next() (*BucketAttrs, error) { + for len(it.buckets) == 0 { // "for", not "if", to handle empty pages + if it.err != nil { + return nil, it.err + } + it.nextPage() + if it.err != nil { + it.buckets = nil + return nil, it.err + } + if it.pageToken == "" { + it.err = Done + } + } + b := it.buckets[0] + it.buckets = it.buckets[1:] + return b, nil +} + +// NextPage returns the next page of results. +// +// NextPage will return exactly the number of results specified by the last +// call to SetPageSize, unless fewer results remain. If no page size was +// specified, or was set to less than 1, it uses DefaultPageSize. +// +// NextPage may return a second return value of Done along with the last page +// of results. +// +// After NextPage returns Done, all subsequent calls to NextPage will return +// (nil, Done). +// +// Next and NextPage should not be used with the same iterator. +func (it *BucketIterator) NextPage() (buckets []*BucketAttrs, err error) { + defer it.SetPageSize(it.pageSize) // restore value at entry + if it.pageSize <= 0 { + it.pageSize = DefaultPageSize + } + for len(buckets) < it.pageSize { + it.pageSize -= len(buckets) + it.nextPage() + if it.err != nil { + return nil, it.err + } + buckets = append(buckets, it.buckets...) + it.buckets = nil + if it.pageToken == "" { + it.err = Done + return buckets, it.err + } + } + return buckets, nil +} + +// nextPage gets the next page of results by making a single call to the +// underlying method. It sets it.buckets, it.pageToken, and it.err. It never +// sets it.err to Done. +// +// Note that the underlying service is free to return less than pageSize items. +// It can even return none. +func (it *BucketIterator) nextPage() { + if it.err != nil { + return + } + req := it.client.raw.Buckets.List(it.projectID) + req.Projection("full") + req.Prefix(it.Prefix) + req.PageToken(it.pageToken) + if it.pageSize > 0 { + req.MaxResults(int64(it.pageSize)) + } + resp, err := req.Context(it.ctx).Do() + if err != nil { + it.err = err + return + } + it.pageToken = resp.NextPageToken + for _, item := range resp.Items { + it.buckets = append(it.buckets, newBucket(item)) + } +} + +// SetPageSize sets the page size for all subsequent calls to NextPage. +// NextPage will return exactly this many items if they are present. +func (it *BucketIterator) SetPageSize(pageSize int) { + it.pageSize = pageSize +} + +// SetPageToken sets the page token for the next call to NextPage, to resume +// the iteration from a previous point. +func (it *BucketIterator) SetPageToken(t string) { + it.pageToken = t +} + +// NextPageToken returns a page token that can be used with SetPageToken to +// resume iteration from the next page. It returns the empty string if there +// are no more pages. For an example, see SetPageToken. +func (it *BucketIterator) NextPageToken() string { + return it.pageToken +} diff --git a/vendor/google.golang.org/cloud/storage/reader.go b/vendor/cloud.google.com/go/storage/reader.go similarity index 100% rename from vendor/google.golang.org/cloud/storage/reader.go rename to vendor/cloud.google.com/go/storage/reader.go diff --git a/vendor/google.golang.org/cloud/storage/storage.go b/vendor/cloud.google.com/go/storage/storage.go similarity index 73% rename from vendor/google.golang.org/cloud/storage/storage.go rename to vendor/cloud.google.com/go/storage/storage.go index 85dca803..ae6c36f5 100644 --- a/vendor/google.golang.org/cloud/storage/storage.go +++ b/vendor/cloud.google.com/go/storage/storage.go @@ -15,7 +15,7 @@ // Package storage contains a Google Cloud Storage client. // // This package is experimental and may make backwards-incompatible changes. -package storage // import "google.golang.org/cloud/storage" +package storage // import "cloud.google.com/go/storage" import ( "bytes" @@ -38,8 +38,8 @@ import ( "time" "unicode/utf8" - "google.golang.org/cloud" - "google.golang.org/cloud/internal/transport" + "google.golang.org/api/option" + "google.golang.org/api/transport" "golang.org/x/net/context" "google.golang.org/api/googleapi" @@ -82,7 +82,7 @@ type AdminClient struct { // NewAdminClient creates a new AdminClient for a given project. // // Deprecated: use NewClient instead. -func NewAdminClient(ctx context.Context, projectID string, opts ...cloud.ClientOption) (*AdminClient, error) { +func NewAdminClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*AdminClient, error) { c, err := NewClient(ctx, opts...) if err != nil { return nil, err @@ -120,11 +120,11 @@ type Client struct { } // NewClient creates a new Google Cloud Storage client. -// The default scope is ScopeFullControl. To use a different scope, like ScopeReadOnly, use cloud.WithScopes. -func NewClient(ctx context.Context, opts ...cloud.ClientOption) (*Client, error) { - o := []cloud.ClientOption{ - cloud.WithScopes(ScopeFullControl), - cloud.WithUserAgent(userAgent), +// The default scope is ScopeFullControl. To use a different scope, like ScopeReadOnly, use option.WithScopes. +func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { + o := []option.ClientOption{ + option.WithScopes(ScopeFullControl), + option.WithUserAgent(userAgent), } opts = append(o, opts...) hc, _, err := transport.NewHTTPClient(ctx, opts...) @@ -179,237 +179,6 @@ func (c *Client) Bucket(name string) *BucketHandle { } } -// Create creates the Bucket in the project. -// If attrs is nil the API defaults will be used. -func (b *BucketHandle) Create(ctx context.Context, projectID string, attrs *BucketAttrs) error { - var bkt *raw.Bucket - if attrs != nil { - bkt = attrs.toRawBucket() - } else { - bkt = &raw.Bucket{} - } - bkt.Name = b.name - req := b.c.raw.Buckets.Insert(projectID, bkt) - _, err := req.Context(ctx).Do() - return err -} - -// Delete deletes the Bucket. -func (b *BucketHandle) Delete(ctx context.Context) error { - req := b.c.raw.Buckets.Delete(b.name) - return req.Context(ctx).Do() -} - -// ACL returns an ACLHandle, which provides access to the bucket's access control list. -// This controls who can list, create or overwrite the objects in a bucket. -// This call does not perform any network operations. -func (c *BucketHandle) ACL() *ACLHandle { - return c.acl -} - -// DefaultObjectACL returns an ACLHandle, which provides access to the bucket's default object ACLs. -// These ACLs are applied to newly created objects in this bucket that do not have a defined ACL. -// This call does not perform any network operations. -func (c *BucketHandle) DefaultObjectACL() *ACLHandle { - return c.defaultObjectACL -} - -// Object returns an ObjectHandle, which provides operations on the named object. -// This call does not perform any network operations. -// -// name must consist entirely of valid UTF-8-encoded runes. The full specification -// for valid object names can be found at: -// https://cloud.google.com/storage/docs/bucket-naming -func (b *BucketHandle) Object(name string) *ObjectHandle { - return &ObjectHandle{ - c: b.c, - bucket: b.name, - object: name, - acl: &ACLHandle{ - c: b.c, - bucket: b.name, - object: name, - }, - } -} - -// TODO(jbd): Add storage.buckets.list. -// TODO(jbd): Add storage.buckets.update. - -// TODO(jbd): Add storage.objects.watch. - -// Attrs returns the metadata for the bucket. -func (b *BucketHandle) Attrs(ctx context.Context) (*BucketAttrs, error) { - resp, err := b.c.raw.Buckets.Get(b.name).Projection("full").Context(ctx).Do() - if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { - return nil, ErrBucketNotExist - } - if err != nil { - return nil, err - } - return newBucket(resp), nil -} - -// List lists objects from the bucket. You can specify a query -// to filter the results. If q is nil, no filtering is applied. -// -// Deprecated. Use BucketHandle.Objects instead. -func (b *BucketHandle) List(ctx context.Context, q *Query) (*ObjectList, error) { - it := b.Objects(ctx, q) - attrs, pres, err := it.NextPage() - if err != nil && err != Done { - return nil, err - } - objects := &ObjectList{ - Results: attrs, - Prefixes: pres, - } - if it.NextPageToken() != "" { - objects.Next = &it.query - } - return objects, nil -} - -func (b *BucketHandle) Objects(ctx context.Context, q *Query) *ObjectIterator { - it := &ObjectIterator{ - ctx: ctx, - bucket: b, - } - if q != nil { - it.query = *q - } - return it -} - -type ObjectIterator struct { - ctx context.Context - bucket *BucketHandle - query Query - pageSize int32 - objs []*ObjectAttrs - prefixes []string - err error -} - -// Next returns the next result. Its second return value is Done if there are -// no more results. Once Next returns Done, all subsequent calls will return -// Done. -// -// Internally, Next retrieves results in bulk. You can call SetPageSize as a -// performance hint to affect how many results are retrieved in a single RPC. -// -// SetPageToken should not be called when using Next. -// -// Next and NextPage should not be used with the same iterator. -// -// If Query.Delimiter is non-empty, Next returns an error. Use NextPage when using delimiters. -func (it *ObjectIterator) Next() (*ObjectAttrs, error) { - if it.query.Delimiter != "" { - return nil, errors.New("cannot use ObjectIterator.Next with a delimiter") - } - for len(it.objs) == 0 { // "for", not "if", to handle empty pages - if it.err != nil { - return nil, it.err - } - it.nextPage() - if it.err != nil { - it.objs = nil - return nil, it.err - } - if it.query.Cursor == "" { - it.err = Done - } - } - o := it.objs[0] - it.objs = it.objs[1:] - return o, nil -} - -const DefaultPageSize = 1000 - -// NextPage returns the next page of results, both objects (as *ObjectAttrs) -// and prefixes. Prefixes will be nil if query.Delimiter is empty. -// -// NextPage will return exactly the number of results (the total of objects and -// prefixes) specified by the last call to SetPageSize, unless there are not -// enough results available. If no page size was specified, it uses -// DefaultPageSize. -// -// NextPage may return a second return value of Done along with the last page -// of results. -// -// After NextPage returns Done, all subsequent calls to NextPage will return -// (nil, Done). -// -// Next and NextPage should not be used with the same iterator. -func (it *ObjectIterator) NextPage() (objs []*ObjectAttrs, prefixes []string, err error) { - defer it.SetPageSize(it.pageSize) // restore value at entry - if it.pageSize <= 0 { - it.pageSize = DefaultPageSize - } - for len(objs)+len(prefixes) < int(it.pageSize) { - it.pageSize -= int32(len(objs) + len(prefixes)) - it.nextPage() - if it.err != nil { - return nil, nil, it.err - } - objs = append(objs, it.objs...) - prefixes = append(prefixes, it.prefixes...) - if it.query.Cursor == "" { - it.err = Done - return objs, prefixes, it.err - } - } - return objs, prefixes, it.err -} - -// nextPage gets the next page of results by making a single call to the underlying method. -// It sets it.objs, it.prefixes, it.query.Cursor, and it.err. It never sets it.err to Done. -func (it *ObjectIterator) nextPage() { - if it.err != nil { - return - } - req := it.bucket.c.raw.Objects.List(it.bucket.name) - req.Projection("full") - req.Delimiter(it.query.Delimiter) - req.Prefix(it.query.Prefix) - req.Versions(it.query.Versions) - req.PageToken(it.query.Cursor) - if it.pageSize > 0 { - req.MaxResults(int64(it.pageSize)) - } - resp, err := req.Context(it.ctx).Do() - if err != nil { - it.err = err - return - } - it.query.Cursor = resp.NextPageToken - it.objs = nil - for _, item := range resp.Items { - it.objs = append(it.objs, newObject(item)) - } - it.prefixes = resp.Prefixes -} - -// SetPageSize sets the page size for all subsequent calls to NextPage. -// NextPage will return exactly this many items if they are present. -func (it *ObjectIterator) SetPageSize(pageSize int32) { - it.pageSize = pageSize -} - -// SetPageToken sets the page token for the next call to NextPage, to resume -// the iteration from a previous point. -func (it *ObjectIterator) SetPageToken(t string) { - it.query.Cursor = t -} - -// NextPageToken returns a page token that can be used with SetPageToken to -// resume iteration from the next page. It returns the empty string if there -// are no more pages. For an example, see SetPageToken. -func (it *ObjectIterator) NextPageToken() string { - return it.query.Cursor -} - // SignedURLOptions allows you to restrict the access to the signed URL. type SignedURLOptions struct { // GoogleAccessID represents the authorizer of the signed URL generation. @@ -794,64 +563,6 @@ func parseKey(key []byte) (*rsa.PrivateKey, error) { return parsed, nil } -// BucketAttrs represents the metadata for a Google Cloud Storage bucket. -type BucketAttrs struct { - // Name is the name of the bucket. - Name string - - // ACL is the list of access control rules on the bucket. - ACL []ACLRule - - // DefaultObjectACL is the list of access controls to - // apply to new objects when no object ACL is provided. - DefaultObjectACL []ACLRule - - // Location is the location of the bucket. It defaults to "US". - Location string - - // MetaGeneration is the metadata generation of the bucket. - MetaGeneration int64 - - // StorageClass is the storage class of the bucket. This defines - // how objects in the bucket are stored and determines the SLA - // and the cost of storage. Typical values are "STANDARD" and - // "DURABLE_REDUCED_AVAILABILITY". Defaults to "STANDARD". - StorageClass string - - // Created is the creation time of the bucket. - Created time.Time -} - -func newBucket(b *raw.Bucket) *BucketAttrs { - if b == nil { - return nil - } - bucket := &BucketAttrs{ - Name: b.Name, - Location: b.Location, - MetaGeneration: b.Metageneration, - StorageClass: b.StorageClass, - Created: convertTime(b.TimeCreated), - } - acl := make([]ACLRule, len(b.Acl)) - for i, rule := range b.Acl { - acl[i] = ACLRule{ - Entity: ACLEntity(rule.Entity), - Role: ACLRole(rule.Role), - } - } - bucket.ACL = acl - objACL := make([]ACLRule, len(b.DefaultObjectAcl)) - for i, rule := range b.DefaultObjectAcl { - objACL[i] = ACLRule{ - Entity: ACLEntity(rule.Entity), - Role: ACLRole(rule.Role), - } - } - bucket.DefaultObjectACL = objACL - return bucket -} - func toRawObjectACL(oldACL []ACLRule) []*raw.ObjectAccessControl { var acl []*raw.ObjectAccessControl if len(oldACL) > 0 { @@ -866,28 +577,6 @@ func toRawObjectACL(oldACL []ACLRule) []*raw.ObjectAccessControl { return acl } -// toRawBucket copies the editable attribute from b to the raw library's Bucket type. -func (b *BucketAttrs) toRawBucket() *raw.Bucket { - var acl []*raw.BucketAccessControl - if len(b.ACL) > 0 { - acl = make([]*raw.BucketAccessControl, len(b.ACL)) - for i, rule := range b.ACL { - acl[i] = &raw.BucketAccessControl{ - Entity: string(rule.Entity), - Role: string(rule.Role), - } - } - } - dACL := toRawObjectACL(b.DefaultObjectACL) - return &raw.Bucket{ - Name: b.Name, - DefaultObjectAcl: dACL, - Location: b.Location, - StorageClass: b.StorageClass, - Acl: acl, - } -} - // toRawObject copies the editable attributes from o to the raw library's Object type. func (o ObjectAttrs) toRawObject(bucket string) *raw.Object { acl := toRawObjectACL(o.ACL) @@ -1075,22 +764,6 @@ type Query struct { MaxResults int } -// ObjectList represents a list of objects returned from a bucket List call. -type ObjectList struct { - // Results represent a list of object results. - Results []*ObjectAttrs - - // Next is the continuation query to retrieve more - // results with the same filtering criteria. If there - // are no more results to retrieve, it is nil. - Next *Query - - // Prefixes represents prefixes of objects - // matching-but-not-listed up to and including - // the requested delimiter. - Prefixes []string -} - // contentTyper implements ContentTyper to enable an // io.ReadCloser to specify its MIME type. type contentTyper struct { @@ -1202,3 +875,5 @@ func (c objectsGetCall) IfMetagenerationMatch(gen int64) { func (c objectsGetCall) IfMetagenerationNotMatch(gen int64) { appendParam(c.req, "ifMetagenerationNotMatch", fmt.Sprint(gen)) } + +// TODO(jbd): Add storage.objects.watch. diff --git a/vendor/google.golang.org/cloud/storage/writer.go b/vendor/cloud.google.com/go/storage/writer.go similarity index 100% rename from vendor/google.golang.org/cloud/storage/writer.go rename to vendor/cloud.google.com/go/storage/writer.go diff --git a/vendor/github.com/beorn7/perks/quantile/stream.go b/vendor/github.com/beorn7/perks/quantile/stream.go new file mode 100644 index 00000000..587b1fc5 --- /dev/null +++ b/vendor/github.com/beorn7/perks/quantile/stream.go @@ -0,0 +1,292 @@ +// Package quantile computes approximate quantiles over an unbounded data +// stream within low memory and CPU bounds. +// +// A small amount of accuracy is traded to achieve the above properties. +// +// Multiple streams can be merged before calling Query to generate a single set +// of results. This is meaningful when the streams represent the same type of +// data. See Merge and Samples. +// +// For more detailed information about the algorithm used, see: +// +// Effective Computation of Biased Quantiles over Data Streams +// +// http://www.cs.rutgers.edu/~muthu/bquant.pdf +package quantile + +import ( + "math" + "sort" +) + +// Sample holds an observed value and meta information for compression. JSON +// tags have been added for convenience. +type Sample struct { + Value float64 `json:",string"` + Width float64 `json:",string"` + Delta float64 `json:",string"` +} + +// Samples represents a slice of samples. It implements sort.Interface. +type Samples []Sample + +func (a Samples) Len() int { return len(a) } +func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value } +func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +type invariant func(s *stream, r float64) float64 + +// NewLowBiased returns an initialized Stream for low-biased quantiles +// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but +// error guarantees can still be given even for the lower ranks of the data +// distribution. +// +// The provided epsilon is a relative error, i.e. the true quantile of a value +// returned by a query is guaranteed to be within (1±Epsilon)*Quantile. +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error +// properties. +func NewLowBiased(epsilon float64) *Stream { + ƒ := func(s *stream, r float64) float64 { + return 2 * epsilon * r + } + return newStream(ƒ) +} + +// NewHighBiased returns an initialized Stream for high-biased quantiles +// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but +// error guarantees can still be given even for the higher ranks of the data +// distribution. +// +// The provided epsilon is a relative error, i.e. the true quantile of a value +// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile). +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error +// properties. +func NewHighBiased(epsilon float64) *Stream { + ƒ := func(s *stream, r float64) float64 { + return 2 * epsilon * (s.n - r) + } + return newStream(ƒ) +} + +// NewTargeted returns an initialized Stream concerned with a particular set of +// quantile values that are supplied a priori. Knowing these a priori reduces +// space and computation time. The targets map maps the desired quantiles to +// their absolute errors, i.e. the true quantile of a value returned by a query +// is guaranteed to be within (Quantile±Epsilon). +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties. +func NewTargeted(targets map[float64]float64) *Stream { + ƒ := func(s *stream, r float64) float64 { + var m = math.MaxFloat64 + var f float64 + for quantile, epsilon := range targets { + if quantile*s.n <= r { + f = (2 * epsilon * r) / quantile + } else { + f = (2 * epsilon * (s.n - r)) / (1 - quantile) + } + if f < m { + m = f + } + } + return m + } + return newStream(ƒ) +} + +// Stream computes quantiles for a stream of float64s. It is not thread-safe by +// design. Take care when using across multiple goroutines. +type Stream struct { + *stream + b Samples + sorted bool +} + +func newStream(ƒ invariant) *Stream { + x := &stream{ƒ: ƒ} + return &Stream{x, make(Samples, 0, 500), true} +} + +// Insert inserts v into the stream. +func (s *Stream) Insert(v float64) { + s.insert(Sample{Value: v, Width: 1}) +} + +func (s *Stream) insert(sample Sample) { + s.b = append(s.b, sample) + s.sorted = false + if len(s.b) == cap(s.b) { + s.flush() + } +} + +// Query returns the computed qth percentiles value. If s was created with +// NewTargeted, and q is not in the set of quantiles provided a priori, Query +// will return an unspecified result. +func (s *Stream) Query(q float64) float64 { + if !s.flushed() { + // Fast path when there hasn't been enough data for a flush; + // this also yields better accuracy for small sets of data. + l := len(s.b) + if l == 0 { + return 0 + } + i := int(float64(l) * q) + if i > 0 { + i -= 1 + } + s.maybeSort() + return s.b[i].Value + } + s.flush() + return s.stream.query(q) +} + +// Merge merges samples into the underlying streams samples. This is handy when +// merging multiple streams from separate threads, database shards, etc. +// +// ATTENTION: This method is broken and does not yield correct results. The +// underlying algorithm is not capable of merging streams correctly. +func (s *Stream) Merge(samples Samples) { + sort.Sort(samples) + s.stream.merge(samples) +} + +// Reset reinitializes and clears the list reusing the samples buffer memory. +func (s *Stream) Reset() { + s.stream.reset() + s.b = s.b[:0] +} + +// Samples returns stream samples held by s. +func (s *Stream) Samples() Samples { + if !s.flushed() { + return s.b + } + s.flush() + return s.stream.samples() +} + +// Count returns the total number of samples observed in the stream +// since initialization. +func (s *Stream) Count() int { + return len(s.b) + s.stream.count() +} + +func (s *Stream) flush() { + s.maybeSort() + s.stream.merge(s.b) + s.b = s.b[:0] +} + +func (s *Stream) maybeSort() { + if !s.sorted { + s.sorted = true + sort.Sort(s.b) + } +} + +func (s *Stream) flushed() bool { + return len(s.stream.l) > 0 +} + +type stream struct { + n float64 + l []Sample + ƒ invariant +} + +func (s *stream) reset() { + s.l = s.l[:0] + s.n = 0 +} + +func (s *stream) insert(v float64) { + s.merge(Samples{{v, 1, 0}}) +} + +func (s *stream) merge(samples Samples) { + // TODO(beorn7): This tries to merge not only individual samples, but + // whole summaries. The paper doesn't mention merging summaries at + // all. Unittests show that the merging is inaccurate. Find out how to + // do merges properly. + var r float64 + i := 0 + for _, sample := range samples { + for ; i < len(s.l); i++ { + c := s.l[i] + if c.Value > sample.Value { + // Insert at position i. + s.l = append(s.l, Sample{}) + copy(s.l[i+1:], s.l[i:]) + s.l[i] = Sample{ + sample.Value, + sample.Width, + math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1), + // TODO(beorn7): How to calculate delta correctly? + } + i++ + goto inserted + } + r += c.Width + } + s.l = append(s.l, Sample{sample.Value, sample.Width, 0}) + i++ + inserted: + s.n += sample.Width + r += sample.Width + } + s.compress() +} + +func (s *stream) count() int { + return int(s.n) +} + +func (s *stream) query(q float64) float64 { + t := math.Ceil(q * s.n) + t += math.Ceil(s.ƒ(s, t) / 2) + p := s.l[0] + var r float64 + for _, c := range s.l[1:] { + r += p.Width + if r+c.Width+c.Delta > t { + return p.Value + } + p = c + } + return p.Value +} + +func (s *stream) compress() { + if len(s.l) < 2 { + return + } + x := s.l[len(s.l)-1] + xi := len(s.l) - 1 + r := s.n - 1 - x.Width + + for i := len(s.l) - 2; i >= 0; i-- { + c := s.l[i] + if c.Width+x.Width+x.Delta <= s.ƒ(s, r) { + x.Width += c.Width + s.l[xi] = x + // Remove element at i. + copy(s.l[i:], s.l[i+1:]) + s.l = s.l[:len(s.l)-1] + xi -= 1 + } else { + x = c + xi = i + } + r -= c.Width + } +} + +func (s *stream) samples() Samples { + samples := make(Samples, len(s.l)) + copy(samples, s.l) + return samples +} diff --git a/vendor/github.com/cloudfoundry-incubator/candiedyaml/api.go b/vendor/github.com/cloudfoundry-incubator/candiedyaml/api.go deleted file mode 100644 index 87c1043e..00000000 --- a/vendor/github.com/cloudfoundry-incubator/candiedyaml/api.go +++ /dev/null @@ -1,834 +0,0 @@ -/* -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package candiedyaml - -import ( - "io" -) - -/* - * Create a new parser object. - */ - -func yaml_parser_initialize(parser *yaml_parser_t) bool { - *parser = yaml_parser_t{ - raw_buffer: make([]byte, 0, INPUT_RAW_BUFFER_SIZE), - buffer: make([]byte, 0, INPUT_BUFFER_SIZE), - } - - return true -} - -/* - * Destroy a parser object. - */ -func yaml_parser_delete(parser *yaml_parser_t) { - *parser = yaml_parser_t{} -} - -/* - * String read handler. - */ - -func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (int, error) { - if parser.input_pos == len(parser.input) { - return 0, io.EOF - } - - n := copy(buffer, parser.input[parser.input_pos:]) - parser.input_pos += n - return n, nil -} - -/* - * File read handler. - */ - -func yaml_file_read_handler(parser *yaml_parser_t, buffer []byte) (int, error) { - return parser.input_reader.Read(buffer) -} - -/* - * Set a string input. - */ - -func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { - if parser.read_handler != nil { - panic("input already set") - } - - parser.read_handler = yaml_string_read_handler - - parser.input = input - parser.input_pos = 0 -} - -/* - * Set a reader input - */ -func yaml_parser_set_input_reader(parser *yaml_parser_t, reader io.Reader) { - if parser.read_handler != nil { - panic("input already set") - } - - parser.read_handler = yaml_file_read_handler - parser.input_reader = reader -} - -/* - * Set a generic input. - */ - -func yaml_parser_set_input(parser *yaml_parser_t, handler yaml_read_handler_t) { - if parser.read_handler != nil { - panic("input already set") - } - - parser.read_handler = handler -} - -/* - * Set the source encoding. - */ - -func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { - if parser.encoding != yaml_ANY_ENCODING { - panic("encoding already set") - } - - parser.encoding = encoding -} - -/* - * Create a new emitter object. - */ - -func yaml_emitter_initialize(emitter *yaml_emitter_t) { - *emitter = yaml_emitter_t{ - buffer: make([]byte, OUTPUT_BUFFER_SIZE), - raw_buffer: make([]byte, 0, OUTPUT_RAW_BUFFER_SIZE), - states: make([]yaml_emitter_state_t, 0, INITIAL_STACK_SIZE), - events: make([]yaml_event_t, 0, INITIAL_QUEUE_SIZE), - } -} - -func yaml_emitter_delete(emitter *yaml_emitter_t) { - *emitter = yaml_emitter_t{} -} - -/* - * String write handler. - */ - -func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { - *emitter.output_buffer = append(*emitter.output_buffer, buffer...) - return nil -} - -/* - * File write handler. - */ - -func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error { - _, err := emitter.output_writer.Write(buffer) - return err -} - -/* - * Set a string output. - */ - -func yaml_emitter_set_output_string(emitter *yaml_emitter_t, buffer *[]byte) { - if emitter.write_handler != nil { - panic("output already set") - } - - emitter.write_handler = yaml_string_write_handler - emitter.output_buffer = buffer -} - -/* - * Set a file output. - */ - -func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) { - if emitter.write_handler != nil { - panic("output already set") - } - - emitter.write_handler = yaml_writer_write_handler - emitter.output_writer = w -} - -/* - * Set a generic output handler. - */ - -func yaml_emitter_set_output(emitter *yaml_emitter_t, handler yaml_write_handler_t) { - if emitter.write_handler != nil { - panic("output already set") - } - - emitter.write_handler = handler -} - -/* - * Set the output encoding. - */ - -func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { - if emitter.encoding != yaml_ANY_ENCODING { - panic("encoding already set") - } - - emitter.encoding = encoding -} - -/* - * Set the canonical output style. - */ - -func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { - emitter.canonical = canonical -} - -/* - * Set the indentation increment. - */ - -func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { - if indent < 2 || indent > 9 { - indent = 2 - } - emitter.best_indent = indent -} - -/* - * Set the preferred line width. - */ - -func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { - if width < 0 { - width = -1 - } - emitter.best_width = width -} - -/* - * Set if unescaped non-ASCII characters are allowed. - */ - -func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { - emitter.unicode = unicode -} - -/* - * Set the preferred line break character. - */ - -func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { - emitter.line_break = line_break -} - -/* - * Destroy a token object. - */ - -// yaml_DECLARE(void) -// yaml_token_delete(yaml_token_t *token) -// { -// assert(token); /* Non-NULL token object expected. */ -// -// switch (token.type) -// { -// case yaml_TAG_DIRECTIVE_TOKEN: -// yaml_free(token.data.tag_directive.handle); -// yaml_free(token.data.tag_directive.prefix); -// break; -// -// case yaml_ALIAS_TOKEN: -// yaml_free(token.data.alias.value); -// break; -// -// case yaml_ANCHOR_TOKEN: -// yaml_free(token.data.anchor.value); -// break; -// -// case yaml_TAG_TOKEN: -// yaml_free(token.data.tag.handle); -// yaml_free(token.data.tag.suffix); -// break; -// -// case yaml_SCALAR_TOKEN: -// yaml_free(token.data.scalar.value); -// break; -// -// default: -// break; -// } -// -// memset(token, 0, sizeof(yaml_token_t)); -// } - -/* - * Check if a string is a valid UTF-8 sequence. - * - * Check 'reader.c' for more details on UTF-8 encoding. - */ - -// static int -// yaml_check_utf8(yaml_char_t *start, size_t length) -// { -// yaml_char_t *end = start+length; -// yaml_char_t *pointer = start; -// -// while (pointer < end) { -// unsigned char octet; -// unsigned int width; -// unsigned int value; -// size_t k; -// -// octet = pointer[0]; -// width = (octet & 0x80) == 0x00 ? 1 : -// (octet & 0xE0) == 0xC0 ? 2 : -// (octet & 0xF0) == 0xE0 ? 3 : -// (octet & 0xF8) == 0xF0 ? 4 : 0; -// value = (octet & 0x80) == 0x00 ? octet & 0x7F : -// (octet & 0xE0) == 0xC0 ? octet & 0x1F : -// (octet & 0xF0) == 0xE0 ? octet & 0x0F : -// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; -// if (!width) return 0; -// if (pointer+width > end) return 0; -// for (k = 1; k < width; k ++) { -// octet = pointer[k]; -// if ((octet & 0xC0) != 0x80) return 0; -// value = (value << 6) + (octet & 0x3F); -// } -// if (!((width == 1) || -// (width == 2 && value >= 0x80) || -// (width == 3 && value >= 0x800) || -// (width == 4 && value >= 0x10000))) return 0; -// -// pointer += width; -// } -// -// return 1; -// } - -/* - * Create STREAM-START. - */ - -func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) { - *event = yaml_event_t{ - event_type: yaml_STREAM_START_EVENT, - encoding: encoding, - } -} - -/* - * Create STREAM-END. - */ - -func yaml_stream_end_event_initialize(event *yaml_event_t) { - *event = yaml_event_t{ - event_type: yaml_STREAM_END_EVENT, - } -} - -/* - * Create DOCUMENT-START. - */ - -func yaml_document_start_event_initialize(event *yaml_event_t, - version_directive *yaml_version_directive_t, - tag_directives []yaml_tag_directive_t, - implicit bool) { - *event = yaml_event_t{ - event_type: yaml_DOCUMENT_START_EVENT, - version_directive: version_directive, - tag_directives: tag_directives, - implicit: implicit, - } -} - -/* - * Create DOCUMENT-END. - */ - -func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) { - *event = yaml_event_t{ - event_type: yaml_DOCUMENT_END_EVENT, - implicit: implicit, - } -} - -/* - * Create ALIAS. - */ - -func yaml_alias_event_initialize(event *yaml_event_t, anchor []byte) { - *event = yaml_event_t{ - event_type: yaml_ALIAS_EVENT, - anchor: anchor, - } -} - -/* - * Create SCALAR. - */ - -func yaml_scalar_event_initialize(event *yaml_event_t, - anchor []byte, tag []byte, - value []byte, - plain_implicit bool, quoted_implicit bool, - style yaml_scalar_style_t) { - - *event = yaml_event_t{ - event_type: yaml_SCALAR_EVENT, - anchor: anchor, - tag: tag, - value: value, - implicit: plain_implicit, - quoted_implicit: quoted_implicit, - style: yaml_style_t(style), - } -} - -/* - * Create SEQUENCE-START. - */ - -func yaml_sequence_start_event_initialize(event *yaml_event_t, - anchor []byte, tag []byte, implicit bool, style yaml_sequence_style_t) { - *event = yaml_event_t{ - event_type: yaml_SEQUENCE_START_EVENT, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(style), - } -} - -/* - * Create SEQUENCE-END. - */ - -func yaml_sequence_end_event_initialize(event *yaml_event_t) { - *event = yaml_event_t{ - event_type: yaml_SEQUENCE_END_EVENT, - } -} - -/* - * Create MAPPING-START. - */ - -func yaml_mapping_start_event_initialize(event *yaml_event_t, - anchor []byte, tag []byte, implicit bool, style yaml_mapping_style_t) { - *event = yaml_event_t{ - event_type: yaml_MAPPING_START_EVENT, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(style), - } -} - -/* - * Create MAPPING-END. - */ - -func yaml_mapping_end_event_initialize(event *yaml_event_t) { - *event = yaml_event_t{ - event_type: yaml_MAPPING_END_EVENT, - } -} - -/* - * Destroy an event object. - */ - -func yaml_event_delete(event *yaml_event_t) { - *event = yaml_event_t{} -} - -// /* -// * Create a document object. -// */ -// -// func yaml_document_initialize(document *yaml_document_t, -// version_directive *yaml_version_directive_t, -// tag_directives []yaml_tag_directive_t, -// start_implicit, end_implicit bool) bool { -// -// -// { -// struct { -// YAML_error_type_t error; -// } context; -// struct { -// yaml_node_t *start; -// yaml_node_t *end; -// yaml_node_t *top; -// } nodes = { NULL, NULL, NULL }; -// yaml_version_directive_t *version_directive_copy = NULL; -// struct { -// yaml_tag_directive_t *start; -// yaml_tag_directive_t *end; -// yaml_tag_directive_t *top; -// } tag_directives_copy = { NULL, NULL, NULL }; -// yaml_tag_directive_t value = { NULL, NULL }; -// YAML_mark_t mark = { 0, 0, 0 }; -// -// assert(document); /* Non-NULL document object is expected. */ -// assert((tag_directives_start && tag_directives_end) || -// (tag_directives_start == tag_directives_end)); -// /* Valid tag directives are expected. */ -// -// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error; -// -// if (version_directive) { -// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)); -// if (!version_directive_copy) goto error; -// version_directive_copy.major = version_directive.major; -// version_directive_copy.minor = version_directive.minor; -// } -// -// if (tag_directives_start != tag_directives_end) { -// yaml_tag_directive_t *tag_directive; -// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) -// goto error; -// for (tag_directive = tag_directives_start; -// tag_directive != tag_directives_end; tag_directive ++) { -// assert(tag_directive.handle); -// assert(tag_directive.prefix); -// if (!yaml_check_utf8(tag_directive.handle, -// strlen((char *)tag_directive.handle))) -// goto error; -// if (!yaml_check_utf8(tag_directive.prefix, -// strlen((char *)tag_directive.prefix))) -// goto error; -// value.handle = yaml_strdup(tag_directive.handle); -// value.prefix = yaml_strdup(tag_directive.prefix); -// if (!value.handle || !value.prefix) goto error; -// if (!PUSH(&context, tag_directives_copy, value)) -// goto error; -// value.handle = NULL; -// value.prefix = NULL; -// } -// } -// -// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, -// tag_directives_copy.start, tag_directives_copy.top, -// start_implicit, end_implicit, mark, mark); -// -// return 1; -// -// error: -// STACK_DEL(&context, nodes); -// yaml_free(version_directive_copy); -// while (!STACK_EMPTY(&context, tag_directives_copy)) { -// yaml_tag_directive_t value = POP(&context, tag_directives_copy); -// yaml_free(value.handle); -// yaml_free(value.prefix); -// } -// STACK_DEL(&context, tag_directives_copy); -// yaml_free(value.handle); -// yaml_free(value.prefix); -// -// return 0; -// } -// -// /* -// * Destroy a document object. -// */ -// -// yaml_DECLARE(void) -// yaml_document_delete(document *yaml_document_t) -// { -// struct { -// YAML_error_type_t error; -// } context; -// yaml_tag_directive_t *tag_directive; -// -// context.error = yaml_NO_ERROR; /* Eliminate a compliler warning. */ -// -// assert(document); /* Non-NULL document object is expected. */ -// -// while (!STACK_EMPTY(&context, document.nodes)) { -// yaml_node_t node = POP(&context, document.nodes); -// yaml_free(node.tag); -// switch (node.type) { -// case yaml_SCALAR_NODE: -// yaml_free(node.data.scalar.value); -// break; -// case yaml_SEQUENCE_NODE: -// STACK_DEL(&context, node.data.sequence.items); -// break; -// case yaml_MAPPING_NODE: -// STACK_DEL(&context, node.data.mapping.pairs); -// break; -// default: -// assert(0); /* Should not happen. */ -// } -// } -// STACK_DEL(&context, document.nodes); -// -// yaml_free(document.version_directive); -// for (tag_directive = document.tag_directives.start; -// tag_directive != document.tag_directives.end; -// tag_directive++) { -// yaml_free(tag_directive.handle); -// yaml_free(tag_directive.prefix); -// } -// yaml_free(document.tag_directives.start); -// -// memset(document, 0, sizeof(yaml_document_t)); -// } -// -// /** -// * Get a document node. -// */ -// -// yaml_DECLARE(yaml_node_t *) -// yaml_document_get_node(document *yaml_document_t, int index) -// { -// assert(document); /* Non-NULL document object is expected. */ -// -// if (index > 0 && document.nodes.start + index <= document.nodes.top) { -// return document.nodes.start + index - 1; -// } -// return NULL; -// } -// -// /** -// * Get the root object. -// */ -// -// yaml_DECLARE(yaml_node_t *) -// yaml_document_get_root_node(document *yaml_document_t) -// { -// assert(document); /* Non-NULL document object is expected. */ -// -// if (document.nodes.top != document.nodes.start) { -// return document.nodes.start; -// } -// return NULL; -// } -// -// /* -// * Add a scalar node to a document. -// */ -// -// yaml_DECLARE(int) -// yaml_document_add_scalar(document *yaml_document_t, -// yaml_char_t *tag, yaml_char_t *value, int length, -// yaml_scalar_style_t style) -// { -// struct { -// YAML_error_type_t error; -// } context; -// YAML_mark_t mark = { 0, 0, 0 }; -// yaml_char_t *tag_copy = NULL; -// yaml_char_t *value_copy = NULL; -// yaml_node_t node; -// -// assert(document); /* Non-NULL document object is expected. */ -// assert(value); /* Non-NULL value is expected. */ -// -// if (!tag) { -// tag = (yaml_char_t *)yaml_DEFAULT_SCALAR_TAG; -// } -// -// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error; -// tag_copy = yaml_strdup(tag); -// if (!tag_copy) goto error; -// -// if (length < 0) { -// length = strlen((char *)value); -// } -// -// if (!yaml_check_utf8(value, length)) goto error; -// value_copy = yaml_malloc(length+1); -// if (!value_copy) goto error; -// memcpy(value_copy, value, length); -// value_copy[length] = '\0'; -// -// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark); -// if (!PUSH(&context, document.nodes, node)) goto error; -// -// return document.nodes.top - document.nodes.start; -// -// error: -// yaml_free(tag_copy); -// yaml_free(value_copy); -// -// return 0; -// } -// -// /* -// * Add a sequence node to a document. -// */ -// -// yaml_DECLARE(int) -// yaml_document_add_sequence(document *yaml_document_t, -// yaml_char_t *tag, yaml_sequence_style_t style) -// { -// struct { -// YAML_error_type_t error; -// } context; -// YAML_mark_t mark = { 0, 0, 0 }; -// yaml_char_t *tag_copy = NULL; -// struct { -// yaml_node_item_t *start; -// yaml_node_item_t *end; -// yaml_node_item_t *top; -// } items = { NULL, NULL, NULL }; -// yaml_node_t node; -// -// assert(document); /* Non-NULL document object is expected. */ -// -// if (!tag) { -// tag = (yaml_char_t *)yaml_DEFAULT_SEQUENCE_TAG; -// } -// -// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error; -// tag_copy = yaml_strdup(tag); -// if (!tag_copy) goto error; -// -// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error; -// -// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, -// style, mark, mark); -// if (!PUSH(&context, document.nodes, node)) goto error; -// -// return document.nodes.top - document.nodes.start; -// -// error: -// STACK_DEL(&context, items); -// yaml_free(tag_copy); -// -// return 0; -// } -// -// /* -// * Add a mapping node to a document. -// */ -// -// yaml_DECLARE(int) -// yaml_document_add_mapping(document *yaml_document_t, -// yaml_char_t *tag, yaml_mapping_style_t style) -// { -// struct { -// YAML_error_type_t error; -// } context; -// YAML_mark_t mark = { 0, 0, 0 }; -// yaml_char_t *tag_copy = NULL; -// struct { -// yaml_node_pair_t *start; -// yaml_node_pair_t *end; -// yaml_node_pair_t *top; -// } pairs = { NULL, NULL, NULL }; -// yaml_node_t node; -// -// assert(document); /* Non-NULL document object is expected. */ -// -// if (!tag) { -// tag = (yaml_char_t *)yaml_DEFAULT_MAPPING_TAG; -// } -// -// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error; -// tag_copy = yaml_strdup(tag); -// if (!tag_copy) goto error; -// -// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error; -// -// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, -// style, mark, mark); -// if (!PUSH(&context, document.nodes, node)) goto error; -// -// return document.nodes.top - document.nodes.start; -// -// error: -// STACK_DEL(&context, pairs); -// yaml_free(tag_copy); -// -// return 0; -// } -// -// /* -// * Append an item to a sequence node. -// */ -// -// yaml_DECLARE(int) -// yaml_document_append_sequence_item(document *yaml_document_t, -// int sequence, int item) -// { -// struct { -// YAML_error_type_t error; -// } context; -// -// assert(document); /* Non-NULL document is required. */ -// assert(sequence > 0 -// && document.nodes.start + sequence <= document.nodes.top); -// /* Valid sequence id is required. */ -// assert(document.nodes.start[sequence-1].type == yaml_SEQUENCE_NODE); -// /* A sequence node is required. */ -// assert(item > 0 && document.nodes.start + item <= document.nodes.top); -// /* Valid item id is required. */ -// -// if (!PUSH(&context, -// document.nodes.start[sequence-1].data.sequence.items, item)) -// return 0; -// -// return 1; -// } -// -// /* -// * Append a pair of a key and a value to a mapping node. -// */ -// -// yaml_DECLARE(int) -// yaml_document_append_mapping_pair(document *yaml_document_t, -// int mapping, int key, int value) -// { -// struct { -// YAML_error_type_t error; -// } context; -// -// yaml_node_pair_t pair; -// -// assert(document); /* Non-NULL document is required. */ -// assert(mapping > 0 -// && document.nodes.start + mapping <= document.nodes.top); -// /* Valid mapping id is required. */ -// assert(document.nodes.start[mapping-1].type == yaml_MAPPING_NODE); -// /* A mapping node is required. */ -// assert(key > 0 && document.nodes.start + key <= document.nodes.top); -// /* Valid key id is required. */ -// assert(value > 0 && document.nodes.start + value <= document.nodes.top); -// /* Valid value id is required. */ -// -// pair.key = key; -// pair.value = value; -// -// if (!PUSH(&context, -// document.nodes.start[mapping-1].data.mapping.pairs, pair)) -// return 0; -// -// return 1; -// } -// diff --git a/vendor/github.com/cloudfoundry-incubator/candiedyaml/decode.go b/vendor/github.com/cloudfoundry-incubator/candiedyaml/decode.go deleted file mode 100644 index dcc1b89c..00000000 --- a/vendor/github.com/cloudfoundry-incubator/candiedyaml/decode.go +++ /dev/null @@ -1,622 +0,0 @@ -/* -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package candiedyaml - -import ( - "bytes" - "errors" - "fmt" - "io" - "reflect" - "runtime" - "strconv" - "strings" -) - -type Unmarshaler interface { - UnmarshalYAML(tag string, value interface{}) error -} - -// A Number represents a JSON number literal. -type Number string - -// String returns the literal text of the number. -func (n Number) String() string { return string(n) } - -// Float64 returns the number as a float64. -func (n Number) Float64() (float64, error) { - return strconv.ParseFloat(string(n), 64) -} - -// Int64 returns the number as an int64. -func (n Number) Int64() (int64, error) { - return strconv.ParseInt(string(n), 10, 64) -} - -type Decoder struct { - parser yaml_parser_t - event yaml_event_t - replay_events []yaml_event_t - useNumber bool - - anchors map[string][]yaml_event_t - tracking_anchors [][]yaml_event_t -} - -type ParserError struct { - ErrorType YAML_error_type_t - Context string - ContextMark YAML_mark_t - Problem string - ProblemMark YAML_mark_t -} - -func (e *ParserError) Error() string { - return fmt.Sprintf("yaml: [%s] %s at line %d, column %d", e.Context, e.Problem, e.ProblemMark.line+1, e.ProblemMark.column+1) -} - -type UnexpectedEventError struct { - Value string - EventType yaml_event_type_t - At YAML_mark_t -} - -func (e *UnexpectedEventError) Error() string { - return fmt.Sprintf("yaml: Unexpect event [%d]: '%s' at line %d, column %d", e.EventType, e.Value, e.At.line+1, e.At.column+1) -} - -func recovery(err *error) { - if r := recover(); r != nil { - if _, ok := r.(runtime.Error); ok { - panic(r) - } - - var tmpError error - switch r := r.(type) { - case error: - tmpError = r - case string: - tmpError = errors.New(r) - default: - tmpError = errors.New("Unknown panic: " + reflect.ValueOf(r).String()) - } - - *err = tmpError - } -} - -func Unmarshal(data []byte, v interface{}) error { - d := NewDecoder(bytes.NewBuffer(data)) - return d.Decode(v) -} - -func NewDecoder(r io.Reader) *Decoder { - d := &Decoder{ - anchors: make(map[string][]yaml_event_t), - tracking_anchors: make([][]yaml_event_t, 1), - } - yaml_parser_initialize(&d.parser) - yaml_parser_set_input_reader(&d.parser, r) - return d -} - -func (d *Decoder) Decode(v interface{}) (err error) { - defer recovery(&err) - - rv := reflect.ValueOf(v) - if rv.Kind() != reflect.Ptr || rv.IsNil() { - return fmt.Errorf("Expected a pointer or nil but was a %s at %s", rv.String(), d.event.start_mark) - } - - if d.event.event_type == yaml_NO_EVENT { - d.nextEvent() - - if d.event.event_type != yaml_STREAM_START_EVENT { - return errors.New("Invalid stream") - } - - d.nextEvent() - } - - d.document(rv) - return nil -} - -func (d *Decoder) UseNumber() { d.useNumber = true } - -func (d *Decoder) error(err error) { - panic(err) -} - -func (d *Decoder) nextEvent() { - if d.event.event_type == yaml_STREAM_END_EVENT { - d.error(errors.New("The stream is closed")) - } - - if d.replay_events != nil { - d.event = d.replay_events[0] - if len(d.replay_events) == 1 { - d.replay_events = nil - } else { - d.replay_events = d.replay_events[1:] - } - } else { - if !yaml_parser_parse(&d.parser, &d.event) { - yaml_event_delete(&d.event) - - d.error(&ParserError{ - ErrorType: d.parser.error, - Context: d.parser.context, - ContextMark: d.parser.context_mark, - Problem: d.parser.problem, - ProblemMark: d.parser.problem_mark, - }) - } - } - - last := len(d.tracking_anchors) - // skip aliases when tracking an anchor - if last > 0 && d.event.event_type != yaml_ALIAS_EVENT { - d.tracking_anchors[last-1] = append(d.tracking_anchors[last-1], d.event) - } -} - -func (d *Decoder) document(rv reflect.Value) { - if d.event.event_type != yaml_DOCUMENT_START_EVENT { - d.error(fmt.Errorf("Expected document start at %s", d.event.start_mark)) - } - - d.nextEvent() - d.parse(rv) - - if d.event.event_type != yaml_DOCUMENT_END_EVENT { - d.error(fmt.Errorf("Expected document end at %s", d.event.start_mark)) - } - - d.nextEvent() -} - -func (d *Decoder) parse(rv reflect.Value) { - if !rv.IsValid() { - // skip ahead since we cannot store - d.valueInterface() - return - } - - anchor := string(d.event.anchor) - switch d.event.event_type { - case yaml_SEQUENCE_START_EVENT: - d.begin_anchor(anchor) - d.sequence(rv) - d.end_anchor(anchor) - case yaml_MAPPING_START_EVENT: - d.begin_anchor(anchor) - d.mapping(rv) - d.end_anchor(anchor) - case yaml_SCALAR_EVENT: - d.begin_anchor(anchor) - d.scalar(rv) - d.end_anchor(anchor) - case yaml_ALIAS_EVENT: - d.alias(rv) - case yaml_DOCUMENT_END_EVENT: - default: - d.error(&UnexpectedEventError{ - Value: string(d.event.value), - EventType: d.event.event_type, - At: d.event.start_mark, - }) - } -} - -func (d *Decoder) begin_anchor(anchor string) { - if anchor != "" { - events := []yaml_event_t{d.event} - d.tracking_anchors = append(d.tracking_anchors, events) - } -} - -func (d *Decoder) end_anchor(anchor string) { - if anchor != "" { - events := d.tracking_anchors[len(d.tracking_anchors)-1] - d.tracking_anchors = d.tracking_anchors[0 : len(d.tracking_anchors)-1] - // remove the anchor, replaying events shouldn't have anchors - events[0].anchor = nil - // we went one too many, remove the extra event - events = events[:len(events)-1] - // if nested, append to all the other anchors - for i, e := range d.tracking_anchors { - d.tracking_anchors[i] = append(e, events...) - } - d.anchors[anchor] = events - } -} - -func (d *Decoder) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, reflect.Value) { - // If v is a named type and is addressable, - // start with its address, so that if the type has pointer methods, - // we find them. - if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { - v = v.Addr() - } - for { - // Load value from interface, but only if the result will be - // usefully addressable. - if v.Kind() == reflect.Interface && !v.IsNil() { - e := v.Elem() - if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { - v = e - continue - } - } - - if v.Kind() != reflect.Ptr { - break - } - - if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { - break - } - - if v.IsNil() { - v.Set(reflect.New(v.Type().Elem())) - } - - if v.Type().NumMethod() > 0 { - if u, ok := v.Interface().(Unmarshaler); ok { - var temp interface{} - return u, reflect.ValueOf(&temp) - } - } - - v = v.Elem() - } - - return nil, v -} - -func (d *Decoder) sequence(v reflect.Value) { - if d.event.event_type != yaml_SEQUENCE_START_EVENT { - d.error(fmt.Errorf("Expected sequence start at %s", d.event.start_mark)) - } - - u, pv := d.indirect(v, false) - if u != nil { - defer func() { - if err := u.UnmarshalYAML(yaml_SEQ_TAG, pv.Interface()); err != nil { - d.error(err) - } - }() - _, pv = d.indirect(pv, false) - } - - v = pv - - // Check type of target. - switch v.Kind() { - case reflect.Interface: - if v.NumMethod() == 0 { - // Decoding into nil interface? Switch to non-reflect code. - v.Set(reflect.ValueOf(d.sequenceInterface())) - return - } - // Otherwise it's invalid. - fallthrough - default: - d.error(fmt.Errorf("Expected an array, slice or interface{} but was a %s at %s", v, d.event.start_mark)) - case reflect.Array: - case reflect.Slice: - break - } - - d.nextEvent() - - i := 0 -done: - for { - switch d.event.event_type { - case yaml_SEQUENCE_END_EVENT, yaml_DOCUMENT_END_EVENT: - break done - } - - // Get element of array, growing if necessary. - if v.Kind() == reflect.Slice { - // Grow slice if necessary - if i >= v.Cap() { - newcap := v.Cap() + v.Cap()/2 - if newcap < 4 { - newcap = 4 - } - newv := reflect.MakeSlice(v.Type(), v.Len(), newcap) - reflect.Copy(newv, v) - v.Set(newv) - } - if i >= v.Len() { - v.SetLen(i + 1) - } - } - - if i < v.Len() { - // Decode into element. - d.parse(v.Index(i)) - } else { - // Ran out of fixed array: skip. - d.parse(reflect.Value{}) - } - i++ - } - - if i < v.Len() { - if v.Kind() == reflect.Array { - // Array. Zero the rest. - z := reflect.Zero(v.Type().Elem()) - for ; i < v.Len(); i++ { - v.Index(i).Set(z) - } - } else { - v.SetLen(i) - } - } - if i == 0 && v.Kind() == reflect.Slice { - v.Set(reflect.MakeSlice(v.Type(), 0, 0)) - } - - if d.event.event_type != yaml_DOCUMENT_END_EVENT { - d.nextEvent() - } -} - -func (d *Decoder) mapping(v reflect.Value) { - u, pv := d.indirect(v, false) - if u != nil { - defer func() { - if err := u.UnmarshalYAML(yaml_MAP_TAG, pv.Interface()); err != nil { - d.error(err) - } - }() - _, pv = d.indirect(pv, false) - } - v = pv - - // Decoding into nil interface? Switch to non-reflect code. - if v.Kind() == reflect.Interface && v.NumMethod() == 0 { - v.Set(reflect.ValueOf(d.mappingInterface())) - return - } - - // Check type of target: struct or map[X]Y - switch v.Kind() { - case reflect.Struct: - d.mappingStruct(v) - return - case reflect.Map: - default: - d.error(fmt.Errorf("Expected a struct or map but was a %s at %s ", v, d.event.start_mark)) - } - - mapt := v.Type() - if v.IsNil() { - v.Set(reflect.MakeMap(mapt)) - } - - d.nextEvent() - - keyt := mapt.Key() - mapElemt := mapt.Elem() - - var mapElem reflect.Value -done: - for { - switch d.event.event_type { - case yaml_MAPPING_END_EVENT: - break done - case yaml_DOCUMENT_END_EVENT: - return - } - - key := reflect.New(keyt) - d.parse(key.Elem()) - - if !mapElem.IsValid() { - mapElem = reflect.New(mapElemt).Elem() - } else { - mapElem.Set(reflect.Zero(mapElemt)) - } - - d.parse(mapElem) - - v.SetMapIndex(key.Elem(), mapElem) - } - - d.nextEvent() -} - -func (d *Decoder) mappingStruct(v reflect.Value) { - - structt := v.Type() - fields := cachedTypeFields(structt) - - d.nextEvent() - -done: - for { - switch d.event.event_type { - case yaml_MAPPING_END_EVENT: - break done - case yaml_DOCUMENT_END_EVENT: - return - } - - key := "" - d.parse(reflect.ValueOf(&key)) - - // Figure out field corresponding to key. - var subv reflect.Value - - var f *field - for i := range fields { - ff := &fields[i] - if ff.name == key { - f = ff - break - } - - if f == nil && strings.EqualFold(ff.name, key) { - f = ff - } - } - - if f != nil { - subv = v - for _, i := range f.index { - if subv.Kind() == reflect.Ptr { - if subv.IsNil() { - subv.Set(reflect.New(subv.Type().Elem())) - } - subv = subv.Elem() - } - subv = subv.Field(i) - } - } - d.parse(subv) - } - - d.nextEvent() -} - -func (d *Decoder) scalar(v reflect.Value) { - val := string(d.event.value) - wantptr := null_values[val] - - u, pv := d.indirect(v, wantptr) - - var tag string - if u != nil { - defer func() { - if err := u.UnmarshalYAML(tag, pv.Interface()); err != nil { - d.error(err) - } - }() - - _, pv = d.indirect(pv, wantptr) - } - v = pv - - var err error - tag, err = resolve(d.event, v, d.useNumber) - if err != nil { - d.error(err) - } - - d.nextEvent() -} - -func (d *Decoder) alias(rv reflect.Value) { - val, ok := d.anchors[string(d.event.anchor)] - if !ok { - d.error(fmt.Errorf("missing anchor: '%s' at %s", d.event.anchor, d.event.start_mark)) - } - - d.replay_events = val - d.nextEvent() - d.parse(rv) -} - -func (d *Decoder) valueInterface() interface{} { - var v interface{} - - anchor := string(d.event.anchor) - switch d.event.event_type { - case yaml_SEQUENCE_START_EVENT: - d.begin_anchor(anchor) - v = d.sequenceInterface() - case yaml_MAPPING_START_EVENT: - d.begin_anchor(anchor) - v = d.mappingInterface() - case yaml_SCALAR_EVENT: - d.begin_anchor(anchor) - v = d.scalarInterface() - case yaml_ALIAS_EVENT: - rv := reflect.ValueOf(&v) - d.alias(rv) - return v - case yaml_DOCUMENT_END_EVENT: - d.error(&UnexpectedEventError{ - Value: string(d.event.value), - EventType: d.event.event_type, - At: d.event.start_mark, - }) - - } - d.end_anchor(anchor) - - return v -} - -func (d *Decoder) scalarInterface() interface{} { - _, v := resolveInterface(d.event, d.useNumber) - - d.nextEvent() - return v -} - -// sequenceInterface is like sequence but returns []interface{}. -func (d *Decoder) sequenceInterface() []interface{} { - var v = make([]interface{}, 0) - - d.nextEvent() - -done: - for { - switch d.event.event_type { - case yaml_SEQUENCE_END_EVENT, yaml_DOCUMENT_END_EVENT: - break done - } - - v = append(v, d.valueInterface()) - } - - if d.event.event_type != yaml_DOCUMENT_END_EVENT { - d.nextEvent() - } - - return v -} - -// mappingInterface is like mapping but returns map[interface{}]interface{}. -func (d *Decoder) mappingInterface() map[interface{}]interface{} { - m := make(map[interface{}]interface{}) - - d.nextEvent() - -done: - for { - switch d.event.event_type { - case yaml_MAPPING_END_EVENT, yaml_DOCUMENT_END_EVENT: - break done - } - - key := d.valueInterface() - - // Read value. - m[key] = d.valueInterface() - } - - if d.event.event_type != yaml_DOCUMENT_END_EVENT { - d.nextEvent() - } - - return m -} diff --git a/vendor/github.com/cloudfoundry-incubator/candiedyaml/emitter.go b/vendor/github.com/cloudfoundry-incubator/candiedyaml/emitter.go deleted file mode 100644 index bd2014f3..00000000 --- a/vendor/github.com/cloudfoundry-incubator/candiedyaml/emitter.go +++ /dev/null @@ -1,2072 +0,0 @@ -/* -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package candiedyaml - -import ( - "bytes" -) - -var default_tag_directives = []yaml_tag_directive_t{ - {[]byte("!"), []byte("!")}, - {[]byte("!!"), []byte("tag:yaml.org,2002:")}, -} - -/* - * Flush the buffer if needed. - */ - -func flush(emitter *yaml_emitter_t) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) { - return yaml_emitter_flush(emitter) - } - return true -} - -/* - * Put a character to the output buffer. - */ -func put(emitter *yaml_emitter_t, value byte) bool { - if !flush(emitter) { - return false - } - - emitter.buffer[emitter.buffer_pos] = value - emitter.buffer_pos++ - emitter.column++ - return true -} - -/* - * Put a line break to the output buffer. - */ - -func put_break(emitter *yaml_emitter_t) bool { - if !flush(emitter) { - return false - } - switch emitter.line_break { - case yaml_CR_BREAK: - emitter.buffer[emitter.buffer_pos] = '\r' - emitter.buffer_pos++ - case yaml_LN_BREAK: - emitter.buffer[emitter.buffer_pos] = '\n' - emitter.buffer_pos++ - case yaml_CRLN_BREAK: - emitter.buffer[emitter.buffer_pos] = '\r' - emitter.buffer[emitter.buffer_pos] = '\n' - emitter.buffer_pos += 2 - default: - return false - } - emitter.column = 0 - emitter.line++ - return true -} - -/* - * Copy a character from a string into buffer. - */ -func write(emitter *yaml_emitter_t, src []byte, src_pos *int) bool { - if !flush(emitter) { - return false - } - copy_bytes(emitter.buffer, &emitter.buffer_pos, src, src_pos) - emitter.column++ - return true -} - -/* - * Copy a line break character from a string into buffer. - */ - -func write_break(emitter *yaml_emitter_t, src []byte, src_pos *int) bool { - if src[*src_pos] == '\n' { - if !put_break(emitter) { - return false - } - *src_pos++ - } else { - if !write(emitter, src, src_pos) { - return false - } - emitter.column = 0 - emitter.line++ - } - - return true -} - -/* - * Set an emitter error and return 0. - */ - -func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { - emitter.error = yaml_EMITTER_ERROR - emitter.problem = problem - return false -} - -/* - * Emit an event. - */ - -func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { - emitter.events = append(emitter.events, *event) - for !yaml_emitter_need_more_events(emitter) { - event := &emitter.events[emitter.events_head] - if !yaml_emitter_analyze_event(emitter, event) { - return false - } - if !yaml_emitter_state_machine(emitter, event) { - return false - } - yaml_event_delete(event) - emitter.events_head++ - } - return true -} - -/* - * Check if we need to accumulate more events before emitting. - * - * We accumulate extra - * - 1 event for DOCUMENT-START - * - 2 events for SEQUENCE-START - * - 3 events for MAPPING-START - */ - -func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { - if emitter.events_head == len(emitter.events) { - return true - } - - accumulate := 0 - switch emitter.events[emitter.events_head].event_type { - case yaml_DOCUMENT_START_EVENT: - accumulate = 1 - case yaml_SEQUENCE_START_EVENT: - accumulate = 2 - case yaml_MAPPING_START_EVENT: - accumulate = 3 - default: - return false - } - - if len(emitter.events)-emitter.events_head > accumulate { - return false - } - - level := 0 - for i := emitter.events_head; i < len(emitter.events); i++ { - switch emitter.events[i].event_type { - case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: - level++ - case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: - level-- - } - - if level == 0 { - return false - } - } - return true -} - -/* - * Append a directive to the directives stack. - */ - -func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, - value *yaml_tag_directive_t, allow_duplicates bool) bool { - - for i := range emitter.tag_directives { - - if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { - if allow_duplicates { - return true - } - return yaml_emitter_set_emitter_error(emitter, "duplicat %TAG directive") - } - } - - tag_copy := yaml_tag_directive_t{ - handle: value.handle, - prefix: value.prefix, - } - - emitter.tag_directives = append(emitter.tag_directives, tag_copy) - - return true -} - -/* - * Increase the indentation level. - */ - -func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow bool, indentless bool) bool { - - emitter.indents = append(emitter.indents, emitter.indent) - - if emitter.indent < 0 { - if flow { - emitter.indent = emitter.best_indent - } else { - emitter.indent = 0 - } - } else if !indentless { - emitter.indent += emitter.best_indent - } - - return true -} - -/* - * State dispatcher. - */ - -func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { - switch emitter.state { - case yaml_EMIT_STREAM_START_STATE: - return yaml_emitter_emit_stream_start(emitter, event) - - case yaml_EMIT_FIRST_DOCUMENT_START_STATE: - return yaml_emitter_emit_document_start(emitter, event, true) - - case yaml_EMIT_DOCUMENT_START_STATE: - return yaml_emitter_emit_document_start(emitter, event, false) - - case yaml_EMIT_DOCUMENT_CONTENT_STATE: - return yaml_emitter_emit_document_content(emitter, event) - - case yaml_EMIT_DOCUMENT_END_STATE: - return yaml_emitter_emit_document_end(emitter, event) - - case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: - return yaml_emitter_emit_flow_sequence_item(emitter, event, true) - - case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: - return yaml_emitter_emit_flow_sequence_item(emitter, event, false) - - case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: - return yaml_emitter_emit_flow_mapping_key(emitter, event, true) - - case yaml_EMIT_FLOW_MAPPING_KEY_STATE: - return yaml_emitter_emit_flow_mapping_key(emitter, event, false) - - case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: - return yaml_emitter_emit_flow_mapping_value(emitter, event, true) - - case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: - return yaml_emitter_emit_flow_mapping_value(emitter, event, false) - - case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: - return yaml_emitter_emit_block_sequence_item(emitter, event, true) - - case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: - return yaml_emitter_emit_block_sequence_item(emitter, event, false) - - case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: - return yaml_emitter_emit_block_mapping_key(emitter, event, true) - - case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: - return yaml_emitter_emit_block_mapping_key(emitter, event, false) - - case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: - return yaml_emitter_emit_block_mapping_value(emitter, event, true) - - case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: - return yaml_emitter_emit_block_mapping_value(emitter, event, false) - - case yaml_EMIT_END_STATE: - return yaml_emitter_set_emitter_error(emitter, - "expected nothing after STREAM-END") - - } - - panic("invalid state") -} - -/* - * Expect STREAM-START. - */ - -func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { - - if event.event_type != yaml_STREAM_START_EVENT { - return yaml_emitter_set_emitter_error(emitter, - "expected STREAM-START") - } - - if emitter.encoding == yaml_ANY_ENCODING { - emitter.encoding = event.encoding - - if emitter.encoding == yaml_ANY_ENCODING { - emitter.encoding = yaml_UTF8_ENCODING - } - } - - if emitter.best_indent < 2 || emitter.best_indent > 9 { - emitter.best_indent = 2 - } - - if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { - emitter.best_width = 80 - } - - if emitter.best_width < 0 { - emitter.best_width = 1<<31 - 1 - } - - if emitter.line_break == yaml_ANY_BREAK { - emitter.line_break = yaml_LN_BREAK - } - - emitter.indent = -1 - - emitter.line = 0 - emitter.column = 0 - emitter.whitespace = true - emitter.indention = true - - if emitter.encoding != yaml_UTF8_ENCODING { - if !yaml_emitter_write_bom(emitter) { - return false - } - } - - emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE - - return true -} - -/* - * Expect DOCUMENT-START or STREAM-END. - */ - -func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, - event *yaml_event_t, first bool) bool { - - if event.event_type == yaml_DOCUMENT_START_EVENT { - if event.version_directive != nil { - if !yaml_emitter_analyze_version_directive(emitter, - *event.version_directive) { - return false - } - } - - for i := range event.tag_directives { - tag_directive := &event.tag_directives[i] - - if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { - return false - } - if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { - return false - } - } - - for i := range default_tag_directives { - if !yaml_emitter_append_tag_directive(emitter, &default_tag_directives[i], true) { - return false - } - } - - implicit := event.implicit - if !first || emitter.canonical { - implicit = false - } - - if (event.version_directive != nil || len(event.tag_directives) > 0) && - emitter.open_ended { - if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if event.version_directive != nil { - implicit = false - if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { - return false - } - - if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { - return false - } - - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if len(event.tag_directives) > 0 { - implicit = false - for i := range event.tag_directives { - tag_directive := &event.tag_directives[i] - - if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { - return false - } - if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { - return false - } - if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - } - - if yaml_emitter_check_empty_document(emitter) { - implicit = false - } - - if !implicit { - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { - return false - } - - if emitter.canonical { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - } - - emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE - - return true - } else if event.event_type == yaml_STREAM_END_EVENT { - if emitter.open_ended { - if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if !yaml_emitter_flush(emitter) { - return false - } - - emitter.state = yaml_EMIT_END_STATE - - return true - } - - return yaml_emitter_set_emitter_error(emitter, - "expected DOCUMENT-START or STREAM-END") -} - -/* - * Expect the root node. - */ - -func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { - emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) - - return yaml_emitter_emit_node(emitter, event, true, false, false, false) -} - -/* - * Expect DOCUMENT-END. - */ - -func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { - - if event.event_type != yaml_DOCUMENT_END_EVENT { - return yaml_emitter_set_emitter_error(emitter, - "expected DOCUMENT-END") - } - - if !yaml_emitter_write_indent(emitter) { - return false - } - if !event.implicit { - if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_flush(emitter) { - return false - } - - emitter.state = yaml_EMIT_DOCUMENT_START_STATE - emitter.tag_directives = emitter.tag_directives[:0] - return true -} - -/* - * - * Expect a flow item node. - */ - -func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - if first { - if !yaml_emitter_write_indicator(emitter, []byte("["), true, true, false) { - return false - } - if !yaml_emitter_increase_indent(emitter, true, false) { - return false - } - emitter.flow_level++ - } - - if event.event_type == yaml_SEQUENCE_END_EVENT { - emitter.flow_level-- - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - if emitter.canonical && !first { - if !yaml_emitter_write_indicator(emitter, []byte(","), false, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_write_indicator(emitter, []byte("]"), false, false, false) { - return false - } - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - - return true - } - - if !first { - if !yaml_emitter_write_indicator(emitter, []byte(","), false, false, false) { - return false - } - } - - if emitter.canonical || emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) - return yaml_emitter_emit_node(emitter, event, false, true, false, false) -} - -/* - * Expect a flow key node. - */ - -func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, - event *yaml_event_t, first bool) bool { - - if first { - - if !yaml_emitter_write_indicator(emitter, []byte("{"), true, true, false) { - return false - } - if !yaml_emitter_increase_indent(emitter, true, false) { - return false - } - emitter.flow_level++ - } - - if event.event_type == yaml_MAPPING_END_EVENT { - emitter.flow_level-- - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - - if emitter.canonical && !first { - if !yaml_emitter_write_indicator(emitter, []byte(","), false, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_write_indicator(emitter, []byte("}"), false, false, false) { - return false - } - - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - - return true - } - - if !first { - if !yaml_emitter_write_indicator(emitter, []byte(","), false, false, false) { - return false - } - } - if emitter.canonical || emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { - emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, true) - } else { - if !yaml_emitter_write_indicator(emitter, []byte("?"), true, false, false) { - return false - } - - emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) - } -} - -/* - * Expect a flow value node. - */ - -func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, - event *yaml_event_t, simple bool) bool { - - if simple { - if !yaml_emitter_write_indicator(emitter, []byte(":"), false, false, false) { - return false - } - } else { - if emitter.canonical || emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_write_indicator(emitter, []byte(":"), true, false, false) { - return false - } - } - emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -/* - * Expect a block item node. - */ - -func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, - event *yaml_event_t, first bool) bool { - - if first { - if !yaml_emitter_increase_indent(emitter, false, - (emitter.mapping_context && !emitter.indention)) { - return false - } - } - - if event.event_type == yaml_SEQUENCE_END_EVENT { - - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - - return true - } - - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte("-"), true, false, true) { - return false - } - - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) - return yaml_emitter_emit_node(emitter, event, false, true, false, false) -} - -/* - * Expect a block key node. - */ - -func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, - event *yaml_event_t, first bool) bool { - - if first { - if !yaml_emitter_increase_indent(emitter, false, false) { - return false - } - } - - if event.event_type == yaml_MAPPING_END_EVENT { - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - - return true - } - - if !yaml_emitter_write_indent(emitter) { - return false - } - - if yaml_emitter_check_simple_key(emitter) { - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) - - return yaml_emitter_emit_node(emitter, event, false, false, true, true) - } else { - if !yaml_emitter_write_indicator(emitter, []byte("?"), true, false, true) { - return false - } - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) - - return yaml_emitter_emit_node(emitter, event, false, false, true, false) - } -} - -/* - * Expect a block value node. - */ - -func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, - event *yaml_event_t, simple bool) bool { - - if simple { - if !yaml_emitter_write_indicator(emitter, []byte(":"), false, false, false) { - return false - } - } else { - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte(":"), true, false, true) { - return false - } - } - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) - - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -/* - * Expect a node. - */ - -func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, - root bool, sequence bool, mapping bool, simple_key bool) bool { - emitter.root_context = root - emitter.sequence_context = sequence - emitter.mapping_context = mapping - emitter.simple_key_context = simple_key - - switch event.event_type { - case yaml_ALIAS_EVENT: - return yaml_emitter_emit_alias(emitter, event) - - case yaml_SCALAR_EVENT: - return yaml_emitter_emit_scalar(emitter, event) - - case yaml_SEQUENCE_START_EVENT: - return yaml_emitter_emit_sequence_start(emitter, event) - - case yaml_MAPPING_START_EVENT: - return yaml_emitter_emit_mapping_start(emitter, event) - - default: - return yaml_emitter_set_emitter_error(emitter, - "expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS") - } - - return false -} - -/* - * Expect ALIAS. - */ - -func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_process_anchor(emitter) { - return false - } - - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - - return true -} - -/* - * Expect SCALAR. - */ - -func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_select_scalar_style(emitter, event) { - return false - } - if !yaml_emitter_process_anchor(emitter) { - return false - } - if !yaml_emitter_process_tag(emitter) { - return false - } - if !yaml_emitter_increase_indent(emitter, true, false) { - return false - } - if !yaml_emitter_process_scalar(emitter) { - return false - } - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - - return true -} - -/* - * Expect SEQUENCE-START. - */ - -func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_process_anchor(emitter) { - return false - } - if !yaml_emitter_process_tag(emitter) { - return false - } - - if emitter.flow_level > 0 || emitter.canonical || - event.style == yaml_style_t(yaml_FLOW_SEQUENCE_STYLE) || - yaml_emitter_check_empty_sequence(emitter) { - emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE - } else { - emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE - } - - return true -} - -/* - * Expect MAPPING-START. - */ - -func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_process_anchor(emitter) { - return false - } - if !yaml_emitter_process_tag(emitter) { - return false - } - - if emitter.flow_level > 0 || emitter.canonical || - event.style == yaml_style_t(yaml_FLOW_MAPPING_STYLE) || - yaml_emitter_check_empty_mapping(emitter) { - emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE - } else { - emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE - } - - return true -} - -/* - * Check if the document content is an empty scalar. - */ - -func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { - return false -} - -/* - * Check if the next events represent an empty sequence. - */ - -func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { - if len(emitter.events)-emitter.events_head < 2 { - return false - } - - return (emitter.events[emitter.events_head].event_type == yaml_SEQUENCE_START_EVENT && - emitter.events[emitter.events_head+1].event_type == yaml_SEQUENCE_END_EVENT) -} - -/* - * Check if the next events represent an empty mapping. - */ - -func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { - if len(emitter.events)-emitter.events_head < 2 { - return false - } - - return (emitter.events[emitter.events_head].event_type == yaml_MAPPING_START_EVENT && - emitter.events[emitter.events_head+1].event_type == yaml_MAPPING_END_EVENT) -} - -/* - * Check if the next node can be expressed as a simple key. - */ - -func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { - length := 0 - - switch emitter.events[emitter.events_head].event_type { - case yaml_ALIAS_EVENT: - length += len(emitter.anchor_data.anchor) - - case yaml_SCALAR_EVENT: - if emitter.scalar_data.multiline { - return false - } - length += len(emitter.anchor_data.anchor) + - len(emitter.tag_data.handle) + - len(emitter.tag_data.suffix) + - len(emitter.scalar_data.value) - - case yaml_SEQUENCE_START_EVENT: - if !yaml_emitter_check_empty_sequence(emitter) { - return false - } - - length += len(emitter.anchor_data.anchor) + - len(emitter.tag_data.handle) + - len(emitter.tag_data.suffix) - - case yaml_MAPPING_START_EVENT: - if !yaml_emitter_check_empty_mapping(emitter) { - return false - } - - length += len(emitter.anchor_data.anchor) + - len(emitter.tag_data.handle) + - len(emitter.tag_data.suffix) - - default: - return false - } - - if length > 128 { - return false - } - - return true -} - -/* - * Determine an acceptable scalar style. - */ - -func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { - no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 - - if no_tag && !event.implicit && !event.quoted_implicit { - return yaml_emitter_set_emitter_error(emitter, - "neither tag nor implicit flags are specified") - } - - style := yaml_scalar_style_t(event.style) - - if style == yaml_ANY_SCALAR_STYLE { - style = yaml_PLAIN_SCALAR_STYLE - } - - if emitter.canonical { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - - if emitter.simple_key_context && emitter.scalar_data.multiline { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - - if style == yaml_PLAIN_SCALAR_STYLE { - if (emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed) || - (emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed) { - style = yaml_SINGLE_QUOTED_SCALAR_STYLE - } - if len(emitter.scalar_data.value) == 0 && - (emitter.flow_level > 0 || emitter.simple_key_context) { - style = yaml_SINGLE_QUOTED_SCALAR_STYLE - } - if no_tag && !event.implicit { - style = yaml_SINGLE_QUOTED_SCALAR_STYLE - } - } - - if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { - if !emitter.scalar_data.single_quoted_allowed { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - } - - if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { - if !emitter.scalar_data.block_allowed || - emitter.flow_level > 0 || emitter.simple_key_context { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - } - - if no_tag && !event.quoted_implicit && - style != yaml_PLAIN_SCALAR_STYLE { - emitter.tag_data.handle = []byte("!") - } - - emitter.scalar_data.style = style - - return true -} - -/* - * Write an achor. - */ - -func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { - if emitter.anchor_data.anchor == nil { - return true - } - - indicator := "*" - if !emitter.anchor_data.alias { - indicator = "&" - } - if !yaml_emitter_write_indicator(emitter, []byte(indicator), true, false, false) { - return false - } - - return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) -} - -/* - * Write a tag. - */ - -func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { - if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { - return true - } - - if len(emitter.tag_data.handle) > 0 { - if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { - return false - } - - if len(emitter.tag_data.suffix) > 0 { - if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { - return false - } - - } - } else { - if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { - return false - } - - if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { - return false - } - - if !yaml_emitter_write_indicator(emitter, []byte(">"), false, false, false) { - return false - } - - } - - return true -} - -/* - * Write a scalar. - */ - -func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { - switch emitter.scalar_data.style { - case yaml_PLAIN_SCALAR_STYLE: - return yaml_emitter_write_plain_scalar(emitter, - emitter.scalar_data.value, - !emitter.simple_key_context) - - case yaml_SINGLE_QUOTED_SCALAR_STYLE: - return yaml_emitter_write_single_quoted_scalar(emitter, - emitter.scalar_data.value, - !emitter.simple_key_context) - - case yaml_DOUBLE_QUOTED_SCALAR_STYLE: - return yaml_emitter_write_double_quoted_scalar(emitter, - emitter.scalar_data.value, - !emitter.simple_key_context) - - case yaml_LITERAL_SCALAR_STYLE: - return yaml_emitter_write_literal_scalar(emitter, - emitter.scalar_data.value) - - case yaml_FOLDED_SCALAR_STYLE: - return yaml_emitter_write_folded_scalar(emitter, - emitter.scalar_data.value) - - default: - panic("unknown scalar") - } - - return false -} - -/* - * Check if a %YAML directive is valid. - */ - -func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, - version_directive yaml_version_directive_t) bool { - if version_directive.major != 1 || version_directive.minor != 1 { - return yaml_emitter_set_emitter_error(emitter, - "incompatible %YAML directive") - } - - return true -} - -/* - * Check if a %TAG directive is valid. - */ - -func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, - tag_directive *yaml_tag_directive_t) bool { - handle := tag_directive.handle - prefix := tag_directive.prefix - - if len(handle) == 0 { - return yaml_emitter_set_emitter_error(emitter, - "tag handle must not be empty") - } - - if handle[0] != '!' { - return yaml_emitter_set_emitter_error(emitter, - "tag handle must start with '!'") - } - - if handle[len(handle)-1] != '!' { - return yaml_emitter_set_emitter_error(emitter, - "tag handle must end with '!'") - } - - for i := 1; i < len(handle)-1; width(handle[i]) { - if !is_alpha(handle[i]) { - return yaml_emitter_set_emitter_error(emitter, - "tag handle must contain alphanumerical characters only") - } - } - - if len(prefix) == 0 { - return yaml_emitter_set_emitter_error(emitter, - "tag prefix must not be empty") - } - - return true -} - -/* - * Check if an anchor is valid. - */ - -func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, - anchor []byte, alias bool) bool { - if len(anchor) == 0 { - errmsg := "alias value must not be empty" - if !alias { - errmsg = "anchor value must not be empty" - } - return yaml_emitter_set_emitter_error(emitter, errmsg) - } - - for i := 0; i < len(anchor); i += width(anchor[i]) { - if !is_alpha(anchor[i]) { - errmsg := "alias value must contain alphanumerical characters only" - if !alias { - errmsg = "anchor value must contain alphanumerical characters only" - } - return yaml_emitter_set_emitter_error(emitter, errmsg) - } - } - - emitter.anchor_data.anchor = anchor - emitter.anchor_data.alias = alias - - return true -} - -/* - * Check if a tag is valid. - */ - -func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { - if len(tag) == 0 { - return yaml_emitter_set_emitter_error(emitter, - "tag value must not be empty") - } - - for i := range emitter.tag_directives { - tag_directive := &emitter.tag_directives[i] - if bytes.HasPrefix(tag, tag_directive.prefix) { - emitter.tag_data.handle = tag_directive.handle - emitter.tag_data.suffix = tag[len(tag_directive.prefix):] - return true - } - } - - emitter.tag_data.suffix = tag - - return true -} - -/* - * Check if a scalar is valid. - */ - -func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { - block_indicators := false - flow_indicators := false - line_breaks := false - special_characters := false - - leading_space := false - leading_break := false - trailing_space := false - trailing_break := false - break_space := false - space_break := false - - preceeded_by_whitespace := false - followed_by_whitespace := false - previous_space := false - previous_break := false - - emitter.scalar_data.value = value - - if len(value) == 0 { - emitter.scalar_data.multiline = false - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = true - emitter.scalar_data.single_quoted_allowed = true - emitter.scalar_data.block_allowed = false - - return true - } - - if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || - (value[0] == '.' && value[1] == '.' && value[2] == '.')) { - block_indicators = true - flow_indicators = true - } - - preceeded_by_whitespace = true - - for i, w := 0, 0; i < len(value); i += w { - w = width(value[i]) - followed_by_whitespace = i+w >= len(value) || is_blankz_at(value, i+w) - - if i == 0 { - switch value[i] { - case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': - flow_indicators = true - block_indicators = true - case '?', ':': - flow_indicators = true - if followed_by_whitespace { - block_indicators = true - } - case '-': - if followed_by_whitespace { - flow_indicators = true - block_indicators = true - } - } - } else { - switch value[i] { - case ',', '?', '[', ']', '{', '}': - flow_indicators = true - case ':': - flow_indicators = true - if followed_by_whitespace { - block_indicators = true - } - case '#': - if preceeded_by_whitespace { - flow_indicators = true - block_indicators = true - } - } - } - - if !is_printable_at(value, i) || (!is_ascii(value[i]) && !emitter.unicode) { - special_characters = true - } - - if is_break_at(value, i) { - line_breaks = true - } - - if is_space(value[i]) { - if i == 0 { - leading_space = true - } - if i+w == len(value) { - trailing_space = true - } - if previous_break { - break_space = true - } - previous_space = true - previous_break = false - } else if is_break_at(value, i) { - if i == 0 { - leading_break = true - } - if i+width(value[i]) == len(value) { - trailing_break = true - } - if previous_space { - space_break = true - } - previous_space = false - previous_break = true - } else { - previous_space = false - previous_break = false - } - - preceeded_by_whitespace = is_blankz_at(value, i) - } - - emitter.scalar_data.multiline = line_breaks - - emitter.scalar_data.flow_plain_allowed = true - emitter.scalar_data.block_plain_allowed = true - emitter.scalar_data.single_quoted_allowed = true - emitter.scalar_data.block_allowed = true - - if leading_space || leading_break || trailing_space || trailing_break { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - } - - if trailing_space { - emitter.scalar_data.block_allowed = false - } - - if break_space { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - emitter.scalar_data.single_quoted_allowed = false - } - - if space_break || special_characters { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - emitter.scalar_data.single_quoted_allowed = false - emitter.scalar_data.block_allowed = false - } - - if line_breaks { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - } - - if flow_indicators { - emitter.scalar_data.flow_plain_allowed = false - } - - if block_indicators { - emitter.scalar_data.block_plain_allowed = false - } - - return true -} - -/* - * Check if the event data is valid. - */ - -func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { - emitter.anchor_data.anchor = nil - emitter.tag_data.handle = nil - emitter.tag_data.suffix = nil - emitter.scalar_data.value = nil - - switch event.event_type { - case yaml_ALIAS_EVENT: - if !yaml_emitter_analyze_anchor(emitter, - event.anchor, true) { - return false - } - - case yaml_SCALAR_EVENT: - if len(event.anchor) > 0 { - if !yaml_emitter_analyze_anchor(emitter, - event.anchor, false) { - return false - } - } - if len(event.tag) > 0 && (emitter.canonical || - (!event.implicit && - !event.quoted_implicit)) { - if !yaml_emitter_analyze_tag(emitter, event.tag) { - return false - } - } - if !yaml_emitter_analyze_scalar(emitter, event.value) { - return false - } - case yaml_SEQUENCE_START_EVENT: - if len(event.anchor) > 0 { - if !yaml_emitter_analyze_anchor(emitter, - event.anchor, false) { - return false - } - } - if len(event.tag) > 0 && (emitter.canonical || - !event.implicit) { - if !yaml_emitter_analyze_tag(emitter, - event.tag) { - return false - } - } - case yaml_MAPPING_START_EVENT: - if len(event.anchor) > 0 { - if !yaml_emitter_analyze_anchor(emitter, - event.anchor, false) { - return false - } - } - if len(event.tag) > 0 && (emitter.canonical || - !event.implicit) { - if !yaml_emitter_analyze_tag(emitter, - event.tag) { - return false - } - } - - } - return true -} - -/* - * Write the BOM character. - */ - -func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { - if !flush(emitter) { - return false - } - - pos := emitter.buffer_pos - emitter.buffer[pos] = '\xEF' - emitter.buffer[pos+1] = '\xBB' - emitter.buffer[pos+2] = '\xBF' - emitter.buffer_pos += 3 - return true -} - -func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { - indent := emitter.indent - if indent < 0 { - indent = 0 - } - - if !emitter.indention || emitter.column > indent || - (emitter.column == indent && !emitter.whitespace) { - if !put_break(emitter) { - return false - } - } - - for emitter.column < indent { - if !put(emitter, ' ') { - return false - } - } - - emitter.whitespace = true - emitter.indention = true - - return true -} - -func yaml_emitter_write_indicator(emitter *yaml_emitter_t, - indicator []byte, need_whitespace bool, - is_whitespace bool, is_indention bool) bool { - if need_whitespace && !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - - ind_pos := 0 - for ind_pos < len(indicator) { - if !write(emitter, indicator, &ind_pos) { - return false - } - } - - emitter.whitespace = is_whitespace - emitter.indention = (emitter.indention && is_indention) - emitter.open_ended = false - - return true -} - -func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { - pos := 0 - for pos < len(value) { - if !write(emitter, value, &pos) { - return false - } - } - - emitter.whitespace = false - emitter.indention = false - - return true -} - -func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { - if !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - - pos := 0 - for pos < len(value) { - if !write(emitter, value, &pos) { - return false - } - } - - emitter.whitespace = false - emitter.indention = false - - return true -} - -func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, - need_whitespace bool) bool { - if need_whitespace && !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - - for i := 0; i < len(value); { - write_it := false - switch value[i] { - case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', - '.', '!', '~', '*', '\'', '(', ')', '[', ']': - write_it = true - default: - write_it = is_alpha(value[i]) - } - if write_it { - if !write(emitter, value, &i) { - return false - } - } else { - w := width(value[i]) - for j := 0; j < w; j++ { - val := value[i] - i++ - - if !put(emitter, '%') { - return false - } - c := val >> 4 - if c < 10 { - c += '0' - } else { - c += 'A' - 10 - } - if !put(emitter, c) { - return false - } - - c = val & 0x0f - if c < 10 { - c += '0' - } else { - c += 'A' - 10 - } - if !put(emitter, c) { - return false - } - - } - } - } - - emitter.whitespace = false - emitter.indention = false - - return true -} - -func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, - allow_breaks bool) bool { - spaces := false - breaks := false - - if !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - - for i := 0; i < len(value); { - if is_space(value[i]) { - if allow_breaks && !spaces && - emitter.column > emitter.best_width && - !is_space(value[i+1]) { - if !yaml_emitter_write_indent(emitter) { - return false - } - i += width(value[i]) - } else { - if !write(emitter, value, &i) { - return false - } - } - spaces = true - } else if is_break_at(value, i) { - if !breaks && value[i] == '\n' { - if !put_break(emitter) { - return false - } - } - if !write_break(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !write(emitter, value, &i) { - return false - } - emitter.indention = false - spaces = false - breaks = false - } - } - - emitter.whitespace = false - emitter.indention = false - if emitter.root_context { - emitter.open_ended = true - } - - return true -} - -func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, - allow_breaks bool) bool { - spaces := false - breaks := false - - if !yaml_emitter_write_indicator(emitter, []byte("'"), true, false, false) { - return false - } - - for i := 0; i < len(value); { - if is_space(value[i]) { - if allow_breaks && !spaces && - emitter.column > emitter.best_width && - i > 0 && i < len(value)-1 && - !is_space(value[i+1]) { - if !yaml_emitter_write_indent(emitter) { - return false - } - i += width(value[i]) - } else { - if !write(emitter, value, &i) { - return false - } - } - spaces = true - } else if is_break_at(value, i) { - if !breaks && value[i] == '\n' { - if !put_break(emitter) { - return false - } - } - if !write_break(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if value[i] == '\'' { - if !put(emitter, '\'') { - return false - } - } - if !write(emitter, value, &i) { - return false - } - emitter.indention = false - spaces = false - breaks = false - } - } - - if !yaml_emitter_write_indicator(emitter, []byte("'"), false, false, false) { - return false - } - - emitter.whitespace = false - emitter.indention = false - - return true -} - -func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, - allow_breaks bool) bool { - - spaces := false - - if !yaml_emitter_write_indicator(emitter, []byte("\""), true, false, false) { - return false - } - - for i := 0; i < len(value); { - if !is_printable_at(value, i) || (!emitter.unicode && !is_ascii(value[i])) || - is_bom_at(value, i) || is_break_at(value, i) || - value[i] == '"' || value[i] == '\\' { - octet := value[i] - - var w int - var v rune - switch { - case octet&0x80 == 0x00: - w, v = 1, rune(octet&0x7F) - case octet&0xE0 == 0xC0: - w, v = 2, rune(octet&0x1F) - case octet&0xF0 == 0xE0: - w, v = 3, rune(octet&0x0F) - case octet&0xF8 == 0xF0: - w, v = 4, rune(octet&0x07) - } - - for k := 1; k < w; k++ { - octet = value[i+k] - v = (v << 6) + (rune(octet) & 0x3F) - } - i += w - - if !put(emitter, '\\') { - return false - } - - switch v { - case 0x00: - if !put(emitter, '0') { - return false - } - case 0x07: - if !put(emitter, 'a') { - return false - } - case 0x08: - if !put(emitter, 'b') { - return false - } - case 0x09: - if !put(emitter, 't') { - return false - } - - case 0x0A: - if !put(emitter, 'n') { - return false - } - - case 0x0B: - if !put(emitter, 'v') { - return false - } - - case 0x0C: - if !put(emitter, 'f') { - return false - } - - case 0x0D: - if !put(emitter, 'r') { - return false - } - - case 0x1B: - if !put(emitter, 'e') { - return false - } - case 0x22: - if !put(emitter, '"') { - return false - } - case 0x5C: - if !put(emitter, '\\') { - return false - } - case 0x85: - if !put(emitter, 'N') { - return false - } - - case 0xA0: - if !put(emitter, '_') { - return false - } - - case 0x2028: - if !put(emitter, 'L') { - return false - } - - case 0x2029: - if !put(emitter, 'P') { - return false - } - default: - if v <= 0xFF { - if !put(emitter, 'x') { - return false - } - w = 2 - } else if v <= 0xFFFF { - if !put(emitter, 'u') { - return false - } - w = 4 - } else { - if !put(emitter, 'U') { - return false - } - w = 8 - } - for k := (w - 1) * 4; k >= 0; k -= 4 { - digit := byte((v >> uint(k)) & 0x0F) - c := digit + '0' - if c > 9 { - c = digit + 'A' - 10 - } - if !put(emitter, c) { - return false - } - } - } - spaces = false - } else if is_space(value[i]) { - if allow_breaks && !spaces && - emitter.column > emitter.best_width && - i > 0 && i < len(value)-1 { - if !yaml_emitter_write_indent(emitter) { - return false - } - if is_space(value[i+1]) { - if !put(emitter, '\\') { - return false - } - } - i += width(value[i]) - } else { - if !write(emitter, value, &i) { - return false - } - } - spaces = true - } else { - if !write(emitter, value, &i) { - return false - } - spaces = false - } - } - - if !yaml_emitter_write_indicator(emitter, []byte("\""), false, false, false) { - return false - } - - emitter.whitespace = false - emitter.indention = false - - return true -} - -func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { - - if is_space(value[0]) || is_break_at(value, 0) { - indent_hint := []byte{'0' + byte(emitter.best_indent)} - if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { - return false - } - } - - emitter.open_ended = false - - var chomp_hint [1]byte - if len(value) == 0 { - chomp_hint[0] = '-' - } else { - i := len(value) - 1 - for value[i]&0xC0 == 0x80 { - i-- - } - - if !is_break_at(value, i) { - chomp_hint[0] = '-' - } else if i == 0 { - chomp_hint[0] = '+' - emitter.open_ended = true - } else { - for value[i]&0xC0 == 0x80 { - i-- - } - - if is_break_at(value, i) { - chomp_hint[0] = '+' - emitter.open_ended = true - } - } - } - - if chomp_hint[0] != 0 { - if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { - return false - } - } - - return true -} - -func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { - - breaks := true - - if !yaml_emitter_write_indicator(emitter, []byte("|"), true, false, false) { - return false - } - - if !yaml_emitter_write_block_scalar_hints(emitter, value) { - return false - } - - if !put_break(emitter) { - return false - } - - emitter.indention = true - emitter.whitespace = true - - for i := 0; i < len(value); { - if is_break_at(value, i) { - if !write_break(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !write(emitter, value, &i) { - return false - } - emitter.indention = false - breaks = false - } - } - - return true -} - -func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { - breaks := true - leading_spaces := true - - if !yaml_emitter_write_indicator(emitter, []byte(">"), true, false, false) { - return false - } - if !yaml_emitter_write_block_scalar_hints(emitter, value) { - return false - } - if !put_break(emitter) { - return false - } - emitter.indention = true - emitter.whitespace = true - - for i := 0; i < len(value); { - if is_break_at(value, i) { - if !breaks && !leading_spaces && value[i] == '\n' { - k := i - for is_break_at(value, k) { - k += width(value[k]) - } - if !is_blankz_at(value, k) { - if !put_break(emitter) { - return false - } - } - } - if !write_break(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - leading_spaces = is_blank(value[i]) - } - if !breaks && is_space(value[i]) && !is_space(value[i+1]) && - emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - i += width(value[i]) - } else { - if !write(emitter, value, &i) { - return false - } - } - emitter.indention = false - breaks = false - } - } - - return true -} diff --git a/vendor/github.com/cloudfoundry-incubator/candiedyaml/encode.go b/vendor/github.com/cloudfoundry-incubator/candiedyaml/encode.go deleted file mode 100644 index fd991808..00000000 --- a/vendor/github.com/cloudfoundry-incubator/candiedyaml/encode.go +++ /dev/null @@ -1,395 +0,0 @@ -/* -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package candiedyaml - -import ( - "bytes" - "encoding/base64" - "io" - "math" - "reflect" - "regexp" - "sort" - "strconv" - "time" -) - -var ( - timeTimeType = reflect.TypeOf(time.Time{}) - marshalerType = reflect.TypeOf(new(Marshaler)).Elem() - numberType = reflect.TypeOf(Number("")) - nonPrintable = regexp.MustCompile("[^\t\n\r\u0020-\u007E\u0085\u00A0-\uD7FF\uE000-\uFFFD]") - multiline = regexp.MustCompile("\n|\u0085|\u2028|\u2029") - - shortTags = map[string]string{ - yaml_NULL_TAG: "!!null", - yaml_BOOL_TAG: "!!bool", - yaml_STR_TAG: "!!str", - yaml_INT_TAG: "!!int", - yaml_FLOAT_TAG: "!!float", - yaml_TIMESTAMP_TAG: "!!timestamp", - yaml_SEQ_TAG: "!!seq", - yaml_MAP_TAG: "!!map", - yaml_BINARY_TAG: "!!binary", - } -) - -type Marshaler interface { - MarshalYAML() (tag string, value interface{}, err error) -} - -// An Encoder writes JSON objects to an output stream. -type Encoder struct { - w io.Writer - emitter yaml_emitter_t - event yaml_event_t - flow bool - err error -} - -func Marshal(v interface{}) ([]byte, error) { - b := bytes.Buffer{} - e := NewEncoder(&b) - err := e.Encode(v) - return b.Bytes(), err -} - -// NewEncoder returns a new encoder that writes to w. -func NewEncoder(w io.Writer) *Encoder { - e := &Encoder{w: w} - yaml_emitter_initialize(&e.emitter) - yaml_emitter_set_output_writer(&e.emitter, e.w) - yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING) - e.emit() - yaml_document_start_event_initialize(&e.event, nil, nil, true) - e.emit() - - return e -} - -func (e *Encoder) Encode(v interface{}) (err error) { - defer recovery(&err) - - if e.err != nil { - return e.err - } - - e.marshal("", reflect.ValueOf(v), true) - - yaml_document_end_event_initialize(&e.event, true) - e.emit() - e.emitter.open_ended = false - yaml_stream_end_event_initialize(&e.event) - e.emit() - - return nil -} - -func (e *Encoder) emit() { - if !yaml_emitter_emit(&e.emitter, &e.event) { - panic("bad emit") - } -} - -func (e *Encoder) marshal(tag string, v reflect.Value, allowAddr bool) { - vt := v.Type() - - if vt.Implements(marshalerType) { - e.emitMarshaler(tag, v) - return - } - - if vt.Kind() != reflect.Ptr && allowAddr { - if reflect.PtrTo(vt).Implements(marshalerType) { - e.emitAddrMarshaler(tag, v) - return - } - } - - switch v.Kind() { - case reflect.Interface: - if v.IsNil() { - e.emitNil() - } else { - e.marshal(tag, v.Elem(), allowAddr) - } - case reflect.Map: - e.emitMap(tag, v) - case reflect.Ptr: - if v.IsNil() { - e.emitNil() - } else { - e.marshal(tag, v.Elem(), true) - } - case reflect.Struct: - e.emitStruct(tag, v) - case reflect.Slice: - e.emitSlice(tag, v) - case reflect.String: - e.emitString(tag, v) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - e.emitInt(tag, v) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - e.emitUint(tag, v) - case reflect.Float32, reflect.Float64: - e.emitFloat(tag, v) - case reflect.Bool: - e.emitBool(tag, v) - default: - panic("Can't marshal type yet: " + v.Type().String()) - } -} - -func (e *Encoder) emitMap(tag string, v reflect.Value) { - e.mapping(tag, func() { - var keys stringValues = v.MapKeys() - sort.Sort(keys) - for _, k := range keys { - e.marshal("", k, true) - e.marshal("", v.MapIndex(k), true) - } - }) -} - -func (e *Encoder) emitStruct(tag string, v reflect.Value) { - if v.Type() == timeTimeType { - e.emitTime(tag, v) - return - } - - fields := cachedTypeFields(v.Type()) - - e.mapping(tag, func() { - for _, f := range fields { - fv := fieldByIndex(v, f.index) - if !fv.IsValid() || f.omitEmpty && isEmptyValue(fv) { - continue - } - - e.marshal("", reflect.ValueOf(f.name), true) - e.flow = f.flow - e.marshal("", fv, true) - } - }) -} - -func (e *Encoder) emitTime(tag string, v reflect.Value) { - t := v.Interface().(time.Time) - bytes, _ := t.MarshalText() - e.emitScalar(string(bytes), "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func isEmptyValue(v reflect.Value) bool { - switch v.Kind() { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - } - return false -} - -func (e *Encoder) mapping(tag string, f func()) { - implicit := tag == "" - style := yaml_BLOCK_MAPPING_STYLE - if e.flow { - e.flow = false - style = yaml_FLOW_MAPPING_STYLE - } - yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style) - e.emit() - - f() - - yaml_mapping_end_event_initialize(&e.event) - e.emit() -} - -func (e *Encoder) emitSlice(tag string, v reflect.Value) { - if v.Type() == byteSliceType { - e.emitBase64(tag, v) - return - } - - implicit := tag == "" - style := yaml_BLOCK_SEQUENCE_STYLE - if e.flow { - e.flow = false - style = yaml_FLOW_SEQUENCE_STYLE - } - yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style) - e.emit() - - n := v.Len() - for i := 0; i < n; i++ { - e.marshal("", v.Index(i), true) - } - - yaml_sequence_end_event_initialize(&e.event) - e.emit() -} - -func (e *Encoder) emitBase64(tag string, v reflect.Value) { - if v.IsNil() { - e.emitNil() - return - } - - s := v.Bytes() - - dst := make([]byte, base64.StdEncoding.EncodedLen(len(s))) - - base64.StdEncoding.Encode(dst, s) - e.emitScalar(string(dst), "", yaml_BINARY_TAG, yaml_DOUBLE_QUOTED_SCALAR_STYLE) -} - -func (e *Encoder) emitString(tag string, v reflect.Value) { - var style yaml_scalar_style_t - s := v.String() - - if nonPrintable.MatchString(s) { - e.emitBase64(tag, v) - return - } - - if v.Type() == numberType { - style = yaml_PLAIN_SCALAR_STYLE - } else { - event := yaml_event_t{ - implicit: true, - value: []byte(s), - } - - rtag, _ := resolveInterface(event, false) - if tag == "" && rtag != yaml_STR_TAG { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } else if multiline.MatchString(s) { - style = yaml_LITERAL_SCALAR_STYLE - } else { - style = yaml_PLAIN_SCALAR_STYLE - } - } - - e.emitScalar(s, "", tag, style) -} - -func (e *Encoder) emitBool(tag string, v reflect.Value) { - s := strconv.FormatBool(v.Bool()) - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *Encoder) emitInt(tag string, v reflect.Value) { - s := strconv.FormatInt(v.Int(), 10) - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *Encoder) emitUint(tag string, v reflect.Value) { - s := strconv.FormatUint(v.Uint(), 10) - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *Encoder) emitFloat(tag string, v reflect.Value) { - f := v.Float() - - var s string - switch { - case math.IsNaN(f): - s = ".nan" - case math.IsInf(f, 1): - s = "+.inf" - case math.IsInf(f, -1): - s = "-.inf" - default: - s = strconv.FormatFloat(f, 'g', -1, v.Type().Bits()) - } - - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *Encoder) emitNil() { - e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE) -} - -func (e *Encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) { - implicit := tag == "" - if !implicit { - style = yaml_PLAIN_SCALAR_STYLE - } - - stag := shortTags[tag] - if stag == "" { - stag = tag - } - - yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(stag), []byte(value), implicit, implicit, style) - e.emit() -} - -func (e *Encoder) emitMarshaler(tag string, v reflect.Value) { - if v.Kind() == reflect.Ptr && v.IsNil() { - e.emitNil() - return - } - - m := v.Interface().(Marshaler) - if m == nil { - e.emitNil() - return - } - t, val, err := m.MarshalYAML() - if err != nil { - panic(err) - } - if val == nil { - e.emitNil() - return - } - - e.marshal(t, reflect.ValueOf(val), false) -} - -func (e *Encoder) emitAddrMarshaler(tag string, v reflect.Value) { - if !v.CanAddr() { - e.marshal(tag, v, false) - return - } - - va := v.Addr() - if va.IsNil() { - e.emitNil() - return - } - - m := v.Interface().(Marshaler) - t, val, err := m.MarshalYAML() - if err != nil { - panic(err) - } - - if val == nil { - e.emitNil() - return - } - - e.marshal(t, reflect.ValueOf(val), false) -} diff --git a/vendor/github.com/cloudfoundry-incubator/candiedyaml/parser.go b/vendor/github.com/cloudfoundry-incubator/candiedyaml/parser.go deleted file mode 100644 index 8d38e306..00000000 --- a/vendor/github.com/cloudfoundry-incubator/candiedyaml/parser.go +++ /dev/null @@ -1,1230 +0,0 @@ -/* -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package candiedyaml - -import ( - "bytes" -) - -/* - * The parser implements the following grammar: - * - * stream ::= STREAM-START implicit_document? explicit_document* STREAM-END - * implicit_document ::= block_node DOCUMENT-END* - * explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* - * block_node_or_indentless_sequence ::= - * ALIAS - * | properties (block_content | indentless_block_sequence)? - * | block_content - * | indentless_block_sequence - * block_node ::= ALIAS - * | properties block_content? - * | block_content - * flow_node ::= ALIAS - * | properties flow_content? - * | flow_content - * properties ::= TAG ANCHOR? | ANCHOR TAG? - * block_content ::= block_collection | flow_collection | SCALAR - * flow_content ::= flow_collection | SCALAR - * block_collection ::= block_sequence | block_mapping - * flow_collection ::= flow_sequence | flow_mapping - * block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END - * indentless_sequence ::= (BLOCK-ENTRY block_node?)+ - * block_mapping ::= BLOCK-MAPPING_START - * ((KEY block_node_or_indentless_sequence?)? - * (VALUE block_node_or_indentless_sequence?)?)* - * BLOCK-END - * flow_sequence ::= FLOW-SEQUENCE-START - * (flow_sequence_entry FLOW-ENTRY)* - * flow_sequence_entry? - * FLOW-SEQUENCE-END - * flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? - * flow_mapping ::= FLOW-MAPPING-START - * (flow_mapping_entry FLOW-ENTRY)* - * flow_mapping_entry? - * FLOW-MAPPING-END - * flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? - */ - -/* - * Peek the next token in the token queue. - */ -func peek_token(parser *yaml_parser_t) *yaml_token_t { - if parser.token_available || yaml_parser_fetch_more_tokens(parser) { - return &parser.tokens[parser.tokens_head] - } - return nil -} - -/* - * Remove the next token from the queue (must be called after peek_token). - */ -func skip_token(parser *yaml_parser_t) { - parser.token_available = false - parser.tokens_parsed++ - parser.stream_end_produced = parser.tokens[parser.tokens_head].token_type == yaml_STREAM_END_TOKEN - parser.tokens_head++ -} - -/* - * Get the next event. - */ - -func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { - /* Erase the event object. */ - *event = yaml_event_t{} - - /* No events after the end of the stream or error. */ - - if parser.stream_end_produced || parser.error != yaml_NO_ERROR || - parser.state == yaml_PARSE_END_STATE { - return true - } - - /* Generate the next event. */ - - return yaml_parser_state_machine(parser, event) -} - -/* - * Set parser error. - */ - -func yaml_parser_set_parser_error(parser *yaml_parser_t, - problem string, problem_mark YAML_mark_t) bool { - parser.error = yaml_PARSER_ERROR - parser.problem = problem - parser.problem_mark = problem_mark - - return false -} - -func yaml_parser_set_parser_error_context(parser *yaml_parser_t, - context string, context_mark YAML_mark_t, - problem string, problem_mark YAML_mark_t) bool { - parser.error = yaml_PARSER_ERROR - parser.context = context - parser.context_mark = context_mark - parser.problem = problem - parser.problem_mark = problem_mark - - return false -} - -/* - * State dispatcher. - */ - -func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { - switch parser.state { - case yaml_PARSE_STREAM_START_STATE: - return yaml_parser_parse_stream_start(parser, event) - - case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: - return yaml_parser_parse_document_start(parser, event, true) - - case yaml_PARSE_DOCUMENT_START_STATE: - return yaml_parser_parse_document_start(parser, event, false) - - case yaml_PARSE_DOCUMENT_CONTENT_STATE: - return yaml_parser_parse_document_content(parser, event) - - case yaml_PARSE_DOCUMENT_END_STATE: - return yaml_parser_parse_document_end(parser, event) - - case yaml_PARSE_BLOCK_NODE_STATE: - return yaml_parser_parse_node(parser, event, true, false) - - case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: - return yaml_parser_parse_node(parser, event, true, true) - - case yaml_PARSE_FLOW_NODE_STATE: - return yaml_parser_parse_node(parser, event, false, false) - - case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: - return yaml_parser_parse_block_sequence_entry(parser, event, true) - - case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: - return yaml_parser_parse_block_sequence_entry(parser, event, false) - - case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: - return yaml_parser_parse_indentless_sequence_entry(parser, event) - - case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: - return yaml_parser_parse_block_mapping_key(parser, event, true) - - case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: - return yaml_parser_parse_block_mapping_key(parser, event, false) - - case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: - return yaml_parser_parse_block_mapping_value(parser, event) - - case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: - return yaml_parser_parse_flow_sequence_entry(parser, event, true) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: - return yaml_parser_parse_flow_sequence_entry(parser, event, false) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: - return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: - return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: - return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) - - case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: - return yaml_parser_parse_flow_mapping_key(parser, event, true) - - case yaml_PARSE_FLOW_MAPPING_KEY_STATE: - return yaml_parser_parse_flow_mapping_key(parser, event, false) - - case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: - return yaml_parser_parse_flow_mapping_value(parser, event, false) - - case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: - return yaml_parser_parse_flow_mapping_value(parser, event, true) - } - - panic("invalid parser state") -} - -/* - * Parse the production: - * stream ::= STREAM-START implicit_document? explicit_document* STREAM-END - * ************ - */ - -func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - - if token.token_type != yaml_STREAM_START_TOKEN { - return yaml_parser_set_parser_error(parser, - "did not find expected ", token.start_mark) - } - - parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE - *event = yaml_event_t{ - event_type: yaml_STREAM_START_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - encoding: token.encoding, - } - skip_token(parser) - - return true -} - -/* - * Parse the productions: - * implicit_document ::= block_node DOCUMENT-END* - * * - * explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* - * ************************* - */ - -func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, - implicit bool) bool { - - token := peek_token(parser) - if token == nil { - return false - } - - /* Parse extra document end indicators. */ - - if !implicit { - for token.token_type == yaml_DOCUMENT_END_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - } - - /* Parse an implicit document. */ - - if implicit && token.token_type != yaml_VERSION_DIRECTIVE_TOKEN && - token.token_type != yaml_TAG_DIRECTIVE_TOKEN && - token.token_type != yaml_DOCUMENT_START_TOKEN && - token.token_type != yaml_STREAM_END_TOKEN { - if !yaml_parser_process_directives(parser, nil, nil) { - return false - } - - parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) - parser.state = yaml_PARSE_BLOCK_NODE_STATE - - *event = yaml_event_t{ - event_type: yaml_DOCUMENT_START_EVENT, - implicit: true, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - } else if token.token_type != yaml_STREAM_END_TOKEN { - /* Parse an explicit document. */ - var version_directive *yaml_version_directive_t - var tag_directives []yaml_tag_directive_t - - start_mark := token.start_mark - if !yaml_parser_process_directives(parser, &version_directive, - &tag_directives) { - return false - } - token = peek_token(parser) - if token == nil { - return false - } - if token.token_type != yaml_DOCUMENT_START_TOKEN { - yaml_parser_set_parser_error(parser, - "did not find expected ", token.start_mark) - return false - } - - parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) - parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE - - end_mark := token.end_mark - - *event = yaml_event_t{ - event_type: yaml_DOCUMENT_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - version_directive: version_directive, - tag_directives: tag_directives, - implicit: false, - } - skip_token(parser) - } else { - /* Parse the stream end. */ - parser.state = yaml_PARSE_END_STATE - - *event = yaml_event_t{ - event_type: yaml_STREAM_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - skip_token(parser) - } - return true -} - -/* - * Parse the productions: - * explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* - * *********** - */ - -func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - - if token.token_type == yaml_VERSION_DIRECTIVE_TOKEN || - token.token_type == yaml_TAG_DIRECTIVE_TOKEN || - token.token_type == yaml_DOCUMENT_START_TOKEN || - token.token_type == yaml_DOCUMENT_END_TOKEN || - token.token_type == yaml_STREAM_END_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - return yaml_parser_process_empty_scalar(parser, event, - token.start_mark) - } else { - return yaml_parser_parse_node(parser, event, true, false) - } -} - -/* - * Parse the productions: - * implicit_document ::= block_node DOCUMENT-END* - * ************* - * explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* - * ************* - */ - -func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { - implicit := true - - token := peek_token(parser) - if token == nil { - return false - } - - start_mark, end_mark := token.start_mark, token.start_mark - - if token.token_type == yaml_DOCUMENT_END_TOKEN { - end_mark = token.end_mark - skip_token(parser) - implicit = false - } - - parser.tag_directives = parser.tag_directives[:0] - - parser.state = yaml_PARSE_DOCUMENT_START_STATE - *event = yaml_event_t{ - event_type: yaml_DOCUMENT_END_EVENT, - start_mark: start_mark, - end_mark: end_mark, - implicit: implicit, - } - - return true -} - -/* - * Parse the productions: - * block_node_or_indentless_sequence ::= - * ALIAS - * ***** - * | properties (block_content | indentless_block_sequence)? - * ********** * - * | block_content | indentless_block_sequence - * * - * block_node ::= ALIAS - * ***** - * | properties block_content? - * ********** * - * | block_content - * * - * flow_node ::= ALIAS - * ***** - * | properties flow_content? - * ********** * - * | flow_content - * * - * properties ::= TAG ANCHOR? | ANCHOR TAG? - * ************************* - * block_content ::= block_collection | flow_collection | SCALAR - * ****** - * flow_content ::= flow_collection | SCALAR - * ****** - */ - -func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, - block bool, indentless_sequence bool) bool { - - token := peek_token(parser) - if token == nil { - return false - } - - if token.token_type == yaml_ALIAS_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - - *event = yaml_event_t{ - event_type: yaml_ALIAS_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - anchor: token.value, - } - skip_token(parser) - return true - } else { - start_mark, end_mark := token.start_mark, token.start_mark - - var tag_handle []byte - var tag_suffix, anchor []byte - var tag_mark YAML_mark_t - if token.token_type == yaml_ANCHOR_TOKEN { - anchor = token.value - start_mark = token.start_mark - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.token_type == yaml_TAG_TOKEN { - tag_handle = token.value - tag_suffix = token.suffix - tag_mark = token.start_mark - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - } else if token.token_type == yaml_TAG_TOKEN { - tag_handle = token.value - tag_suffix = token.suffix - start_mark, tag_mark = token.start_mark, token.start_mark - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.token_type == yaml_ANCHOR_TOKEN { - anchor = token.value - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - - } - } - - var tag []byte - if tag_handle != nil { - if len(tag_handle) == 0 { - tag = tag_suffix - tag_handle = nil - tag_suffix = nil - } else { - for i := range parser.tag_directives { - tag_directive := &parser.tag_directives[i] - if bytes.Equal(tag_directive.handle, tag_handle) { - tag = append([]byte(nil), tag_directive.prefix...) - tag = append(tag, tag_suffix...) - tag_handle = nil - tag_suffix = nil - break - } - } - if len(tag) == 0 { - yaml_parser_set_parser_error_context(parser, - "while parsing a node", start_mark, - "found undefined tag handle", tag_mark) - return false - } - } - } - - implicit := len(tag) == 0 - if indentless_sequence && token.token_type == yaml_BLOCK_ENTRY_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE - - *event = yaml_event_t{ - event_type: yaml_SEQUENCE_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), - } - - return true - } else { - if token.token_type == yaml_SCALAR_TOKEN { - plain_implicit := false - quoted_implicit := false - end_mark = token.end_mark - if (token.style == yaml_PLAIN_SCALAR_STYLE && len(tag) == 0) || - (len(tag) == 1 && tag[0] == '!') { - plain_implicit = true - } else if len(tag) == 0 { - quoted_implicit = true - } - - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - - *event = yaml_event_t{ - event_type: yaml_SCALAR_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - value: token.value, - implicit: plain_implicit, - quoted_implicit: quoted_implicit, - style: yaml_style_t(token.style), - } - - skip_token(parser) - return true - } else if token.token_type == yaml_FLOW_SEQUENCE_START_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE - - *event = yaml_event_t{ - event_type: yaml_SEQUENCE_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), - } - - return true - } else if token.token_type == yaml_FLOW_MAPPING_START_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE - - *event = yaml_event_t{ - event_type: yaml_MAPPING_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), - } - - return true - } else if block && token.token_type == yaml_BLOCK_SEQUENCE_START_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE - - *event = yaml_event_t{ - event_type: yaml_SEQUENCE_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), - } - - return true - } else if block && token.token_type == yaml_BLOCK_MAPPING_START_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE - - *event = yaml_event_t{ - event_type: yaml_MAPPING_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), - } - return true - } else if len(anchor) > 0 || len(tag) > 0 { - - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - - *event = yaml_event_t{ - event_type: yaml_SCALAR_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - quoted_implicit: false, - style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), - } - return true - } else { - msg := "while parsing a block node" - if !block { - msg = "while parsing a flow node" - } - yaml_parser_set_parser_error_context(parser, msg, start_mark, - "did not find expected node content", token.start_mark) - return false - } - } - } - - return false -} - -/* - * Parse the productions: - * block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END - * ******************** *********** * ********* - */ - -func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, - event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - - token := peek_token(parser) - if token == nil { - return false - } - - if token.token_type == yaml_BLOCK_ENTRY_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.token_type != yaml_BLOCK_ENTRY_TOKEN && - token.token_type != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) - return yaml_parser_parse_node(parser, event, true, false) - } else { - parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - } else if token.token_type == yaml_BLOCK_END_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - - *event = yaml_event_t{ - event_type: yaml_SEQUENCE_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - - skip_token(parser) - return true - } else { - mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - - return yaml_parser_set_parser_error_context(parser, - "while parsing a block collection", mark, - "did not find expected '-' indicator", token.start_mark) - } -} - -/* - * Parse the productions: - * indentless_sequence ::= (BLOCK-ENTRY block_node?)+ - * *********** * - */ - -func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, - event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - - if token.token_type == yaml_BLOCK_ENTRY_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.token_type != yaml_BLOCK_ENTRY_TOKEN && - token.token_type != yaml_KEY_TOKEN && - token.token_type != yaml_VALUE_TOKEN && - token.token_type != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) - return yaml_parser_parse_node(parser, event, true, false) - } else { - parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - } else { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - - *event = yaml_event_t{ - event_type: yaml_SEQUENCE_END_EVENT, - start_mark: token.start_mark, - end_mark: token.start_mark, - } - return true - } -} - -/* - * Parse the productions: - * block_mapping ::= BLOCK-MAPPING_START - * ******************* - * ((KEY block_node_or_indentless_sequence?)? - * *** * - * (VALUE block_node_or_indentless_sequence?)?)* - * - * BLOCK-END - * ********* - */ - -func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, - event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - - token := peek_token(parser) - if token == nil { - return false - } - - if token.token_type == yaml_KEY_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.token_type != yaml_KEY_TOKEN && - token.token_type != yaml_VALUE_TOKEN && - token.token_type != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) - return yaml_parser_parse_node(parser, event, true, true) - } else { - parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - } else if token.token_type == yaml_BLOCK_END_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - *event = yaml_event_t{ - event_type: yaml_MAPPING_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - skip_token(parser) - return true - } else { - mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - - return yaml_parser_set_parser_error_context(parser, - "while parsing a block mapping", mark, - "did not find expected key", token.start_mark) - } -} - -/* - * Parse the productions: - * block_mapping ::= BLOCK-MAPPING_START - * - * ((KEY block_node_or_indentless_sequence?)? - * - * (VALUE block_node_or_indentless_sequence?)?)* - * ***** * - * BLOCK-END - * - */ - -func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, - event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - - if token.token_type == yaml_VALUE_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.token_type != yaml_KEY_TOKEN && - token.token_type != yaml_VALUE_TOKEN && - token.token_type != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) - return yaml_parser_parse_node(parser, event, true, true) - } else { - parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - } else { - parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) - } -} - -/* - * Parse the productions: - * flow_sequence ::= FLOW-SEQUENCE-START - * ******************* - * (flow_sequence_entry FLOW-ENTRY)* - * * ********** - * flow_sequence_entry? - * * - * FLOW-SEQUENCE-END - * ***************** - * flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? - * * - */ - -func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, - event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - - token := peek_token(parser) - if token == nil { - return false - } - - if token.token_type != yaml_FLOW_SEQUENCE_END_TOKEN { - if !first { - if token.token_type == yaml_FLOW_ENTRY_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } else { - mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - - return yaml_parser_set_parser_error_context(parser, - "while parsing a flow sequence", mark, - "did not find expected ',' or ']'", token.start_mark) - } - } - - if token.token_type == yaml_KEY_TOKEN { - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE - *event = yaml_event_t{ - event_type: yaml_MAPPING_START_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - implicit: true, - style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), - } - - skip_token(parser) - return true - } else if token.token_type != yaml_FLOW_SEQUENCE_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - - *event = yaml_event_t{ - event_type: yaml_SEQUENCE_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - - skip_token(parser) - return true -} - -/* - * Parse the productions: - * flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? - * *** * - */ - -func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, - event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - - if token.token_type != yaml_VALUE_TOKEN && - token.token_type != yaml_FLOW_ENTRY_TOKEN && - token.token_type != yaml_FLOW_SEQUENCE_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } else { - mark := token.end_mark - skip_token(parser) - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } -} - -/* - * Parse the productions: - * flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? - * ***** * - */ - -func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, - event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - - if token.token_type == yaml_VALUE_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.token_type != yaml_FLOW_ENTRY_TOKEN && - token.token_type != yaml_FLOW_SEQUENCE_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) -} - -/* - * Parse the productions: - * flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? - * * - */ - -func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, - event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE - *event = yaml_event_t{ - event_type: yaml_MAPPING_END_EVENT, - start_mark: token.start_mark, - end_mark: token.start_mark, - } - - return true -} - -/* - * Parse the productions: - * flow_mapping ::= FLOW-MAPPING-START - * ****************** - * (flow_mapping_entry FLOW-ENTRY)* - * * ********** - * flow_mapping_entry? - * ****************** - * FLOW-MAPPING-END - * **************** - * flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? - * * *** * - */ - -func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, - event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - - token := peek_token(parser) - if token == nil { - return false - } - - if token.token_type != yaml_FLOW_MAPPING_END_TOKEN { - if !first { - if token.token_type == yaml_FLOW_ENTRY_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } else { - mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - - return yaml_parser_set_parser_error_context(parser, - "while parsing a flow mapping", mark, - "did not find expected ',' or '}'", token.start_mark) - } - } - - if token.token_type == yaml_KEY_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.token_type != yaml_VALUE_TOKEN && - token.token_type != yaml_FLOW_ENTRY_TOKEN && - token.token_type != yaml_FLOW_MAPPING_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } else { - parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE - return yaml_parser_process_empty_scalar(parser, event, - token.start_mark) - } - } else if token.token_type != yaml_FLOW_MAPPING_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - *event = yaml_event_t{ - event_type: yaml_MAPPING_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - - skip_token(parser) - return true -} - -/* - * Parse the productions: - * flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? - * * ***** * - */ - -func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, - event *yaml_event_t, empty bool) bool { - token := peek_token(parser) - if token == nil { - return false - } - - if empty { - parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, - token.start_mark) - } - - if token.token_type == yaml_VALUE_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.token_type != yaml_FLOW_ENTRY_TOKEN && - token.token_type != yaml_FLOW_MAPPING_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - - parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) -} - -/* - * Generate an empty scalar event. - */ - -func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, - mark YAML_mark_t) bool { - *event = yaml_event_t{ - event_type: yaml_SCALAR_EVENT, - start_mark: mark, - end_mark: mark, - value: nil, - implicit: true, - style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), - } - - return true -} - -/* - * Parse directives. - */ - -func yaml_parser_process_directives(parser *yaml_parser_t, - version_directive_ref **yaml_version_directive_t, - tag_directives_ref *[]yaml_tag_directive_t) bool { - - token := peek_token(parser) - if token == nil { - return false - } - - var version_directive *yaml_version_directive_t - var tag_directives []yaml_tag_directive_t - - for token.token_type == yaml_VERSION_DIRECTIVE_TOKEN || - token.token_type == yaml_TAG_DIRECTIVE_TOKEN { - if token.token_type == yaml_VERSION_DIRECTIVE_TOKEN { - if version_directive != nil { - yaml_parser_set_parser_error(parser, - "found duplicate %YAML directive", token.start_mark) - return false - } - if token.major != 1 || - token.minor != 1 { - yaml_parser_set_parser_error(parser, - "found incompatible YAML document", token.start_mark) - return false - } - version_directive = &yaml_version_directive_t{ - major: token.major, - minor: token.minor, - } - } else if token.token_type == yaml_TAG_DIRECTIVE_TOKEN { - value := yaml_tag_directive_t{ - handle: token.value, - prefix: token.prefix, - } - - if !yaml_parser_append_tag_directive(parser, value, false, - token.start_mark) { - return false - } - tag_directives = append(tag_directives, value) - } - - skip_token(parser) - token := peek_token(parser) - if token == nil { - return false - } - } - - for i := range default_tag_directives { - if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { - return false - } - } - - if version_directive_ref != nil { - *version_directive_ref = version_directive - } - if tag_directives_ref != nil { - *tag_directives_ref = tag_directives - } - - return true -} - -/* - * Append a tag directive to the directives stack. - */ - -func yaml_parser_append_tag_directive(parser *yaml_parser_t, - value yaml_tag_directive_t, allow_duplicates bool, mark YAML_mark_t) bool { - for i := range parser.tag_directives { - tag := &parser.tag_directives[i] - if bytes.Equal(value.handle, tag.handle) { - if allow_duplicates { - return true - } - return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) - } - } - - parser.tag_directives = append(parser.tag_directives, value) - return true -} diff --git a/vendor/github.com/cloudfoundry-incubator/candiedyaml/reader.go b/vendor/github.com/cloudfoundry-incubator/candiedyaml/reader.go deleted file mode 100644 index 5631da2d..00000000 --- a/vendor/github.com/cloudfoundry-incubator/candiedyaml/reader.go +++ /dev/null @@ -1,465 +0,0 @@ -/* -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package candiedyaml - -import ( - "io" -) - -/* - * Set the reader error and return 0. - */ - -func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, - offset int, value int) bool { - parser.error = yaml_READER_ERROR - parser.problem = problem - parser.problem_offset = offset - parser.problem_value = value - - return false -} - -/* - * Byte order marks. - */ -const ( - BOM_UTF8 = "\xef\xbb\xbf" - BOM_UTF16LE = "\xff\xfe" - BOM_UTF16BE = "\xfe\xff" -) - -/* - * Determine the input stream encoding by checking the BOM symbol. If no BOM is - * found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. - */ - -func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { - /* Ensure that we had enough bytes in the raw buffer. */ - for !parser.eof && - len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { - if !yaml_parser_update_raw_buffer(parser) { - return false - } - } - - /* Determine the encoding. */ - raw := parser.raw_buffer - pos := parser.raw_buffer_pos - remaining := len(raw) - pos - if remaining >= 2 && - raw[pos] == BOM_UTF16LE[0] && raw[pos+1] == BOM_UTF16LE[1] { - parser.encoding = yaml_UTF16LE_ENCODING - parser.raw_buffer_pos += 2 - parser.offset += 2 - } else if remaining >= 2 && - raw[pos] == BOM_UTF16BE[0] && raw[pos+1] == BOM_UTF16BE[1] { - parser.encoding = yaml_UTF16BE_ENCODING - parser.raw_buffer_pos += 2 - parser.offset += 2 - } else if remaining >= 3 && - raw[pos] == BOM_UTF8[0] && raw[pos+1] == BOM_UTF8[1] && raw[pos+2] == BOM_UTF8[2] { - parser.encoding = yaml_UTF8_ENCODING - parser.raw_buffer_pos += 3 - parser.offset += 3 - } else { - parser.encoding = yaml_UTF8_ENCODING - } - - return true -} - -/* - * Update the raw buffer. - */ - -func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { - size_read := 0 - - /* Return if the raw buffer is full. */ - if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { - return true - } - - /* Return on EOF. */ - - if parser.eof { - return true - } - - /* Move the remaining bytes in the raw buffer to the beginning. */ - if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { - copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) - } - parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] - parser.raw_buffer_pos = 0 - - /* Call the read handler to fill the buffer. */ - size_read, err := parser.read_handler(parser, - parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) - parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] - - if err == io.EOF { - parser.eof = true - } else if err != nil { - return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), - parser.offset, -1) - } - - return true -} - -/* - * Ensure that the buffer contains at least `length` characters. - * Return 1 on success, 0 on failure. - * - * The length is supposed to be significantly less that the buffer size. - */ - -func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { - /* Read handler must be set. */ - if parser.read_handler == nil { - panic("read handler must be set") - } - - /* If the EOF flag is set and the raw buffer is empty, do nothing. */ - - if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { - return true - } - - /* Return if the buffer contains enough characters. */ - - if parser.unread >= length { - return true - } - - /* Determine the input encoding if it is not known yet. */ - - if parser.encoding == yaml_ANY_ENCODING { - if !yaml_parser_determine_encoding(parser) { - return false - } - } - - /* Move the unread characters to the beginning of the buffer. */ - buffer_end := len(parser.buffer) - if 0 < parser.buffer_pos && - parser.buffer_pos < buffer_end { - copy(parser.buffer, parser.buffer[parser.buffer_pos:]) - buffer_end -= parser.buffer_pos - parser.buffer_pos = 0 - } else if parser.buffer_pos == buffer_end { - buffer_end = 0 - parser.buffer_pos = 0 - } - - parser.buffer = parser.buffer[:cap(parser.buffer)] - - /* Fill the buffer until it has enough characters. */ - first := true - for parser.unread < length { - /* Fill the raw buffer if necessary. */ - - if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { - if !yaml_parser_update_raw_buffer(parser) { - parser.buffer = parser.buffer[:buffer_end] - return false - } - } - first = false - - /* Decode the raw buffer. */ - for parser.raw_buffer_pos != len(parser.raw_buffer) { - var value rune - var w int - - raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos - incomplete := false - - /* Decode the next character. */ - - switch parser.encoding { - case yaml_UTF8_ENCODING: - - /* - * Decode a UTF-8 character. Check RFC 3629 - * (http://www.ietf.org/rfc/rfc3629.txt) for more details. - * - * The following table (taken from the RFC) is used for - * decoding. - * - * Char. number range | UTF-8 octet sequence - * (hexadecimal) | (binary) - * --------------------+------------------------------------ - * 0000 0000-0000 007F | 0xxxxxxx - * 0000 0080-0000 07FF | 110xxxxx 10xxxxxx - * 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx - * 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx - * - * Additionally, the characters in the range 0xD800-0xDFFF - * are prohibited as they are reserved for use with UTF-16 - * surrogate pairs. - */ - - /* Determine the length of the UTF-8 sequence. */ - - octet := parser.raw_buffer[parser.raw_buffer_pos] - w = width(octet) - - /* Check if the leading octet is valid. */ - - if w == 0 { - return yaml_parser_set_reader_error(parser, - "invalid leading UTF-8 octet", - parser.offset, int(octet)) - } - - /* Check if the raw buffer contains an incomplete character. */ - - if w > raw_unread { - if parser.eof { - return yaml_parser_set_reader_error(parser, - "incomplete UTF-8 octet sequence", - parser.offset, -1) - } - incomplete = true - break - } - - /* Decode the leading octet. */ - switch { - case octet&0x80 == 0x00: - value = rune(octet & 0x7F) - case octet&0xE0 == 0xC0: - value = rune(octet & 0x1F) - case octet&0xF0 == 0xE0: - value = rune(octet & 0x0F) - case octet&0xF8 == 0xF0: - value = rune(octet & 0x07) - default: - value = 0 - } - - /* Check and decode the trailing octets. */ - - for k := 1; k < w; k++ { - octet = parser.raw_buffer[parser.raw_buffer_pos+k] - - /* Check if the octet is valid. */ - - if (octet & 0xC0) != 0x80 { - return yaml_parser_set_reader_error(parser, - "invalid trailing UTF-8 octet", - parser.offset+k, int(octet)) - } - - /* Decode the octet. */ - - value = (value << 6) + rune(octet&0x3F) - } - - /* Check the length of the sequence against the value. */ - switch { - case w == 1: - case w == 2 && value >= 0x80: - case w == 3 && value >= 0x800: - case w == 4 && value >= 0x10000: - default: - return yaml_parser_set_reader_error(parser, - "invalid length of a UTF-8 sequence", - parser.offset, -1) - } - - /* Check the range of the value. */ - - if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { - return yaml_parser_set_reader_error(parser, - "invalid Unicode character", - parser.offset, int(value)) - } - case yaml_UTF16LE_ENCODING, - yaml_UTF16BE_ENCODING: - - var low, high int - if parser.encoding == yaml_UTF16LE_ENCODING { - low, high = 0, 1 - } else { - high, low = 1, 0 - } - - /* - * The UTF-16 encoding is not as simple as one might - * naively think. Check RFC 2781 - * (http://www.ietf.org/rfc/rfc2781.txt). - * - * Normally, two subsequent bytes describe a Unicode - * character. However a special technique (called a - * surrogate pair) is used for specifying character - * values larger than 0xFFFF. - * - * A surrogate pair consists of two pseudo-characters: - * high surrogate area (0xD800-0xDBFF) - * low surrogate area (0xDC00-0xDFFF) - * - * The following formulas are used for decoding - * and encoding characters using surrogate pairs: - * - * U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) - * U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) - * W1 = 110110yyyyyyyyyy - * W2 = 110111xxxxxxxxxx - * - * where U is the character value, W1 is the high surrogate - * area, W2 is the low surrogate area. - */ - - /* Check for incomplete UTF-16 character. */ - - if raw_unread < 2 { - if parser.eof { - return yaml_parser_set_reader_error(parser, - "incomplete UTF-16 character", - parser.offset, -1) - } - incomplete = true - break - } - - /* Get the character. */ - value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + - (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) - - /* Check for unexpected low surrogate area. */ - - if (value & 0xFC00) == 0xDC00 { - return yaml_parser_set_reader_error(parser, - "unexpected low surrogate area", - parser.offset, int(value)) - } - - /* Check for a high surrogate area. */ - - if (value & 0xFC00) == 0xD800 { - - w = 4 - - /* Check for incomplete surrogate pair. */ - - if raw_unread < 4 { - if parser.eof { - return yaml_parser_set_reader_error(parser, - "incomplete UTF-16 surrogate pair", - parser.offset, -1) - } - incomplete = true - break - } - - /* Get the next character. */ - - value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + - (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) - - /* Check for a low surrogate area. */ - - if (value2 & 0xFC00) != 0xDC00 { - return yaml_parser_set_reader_error(parser, - "expected low surrogate area", - parser.offset+2, int(value2)) - } - - /* Generate the value of the surrogate pair. */ - - value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) - } else { - w = 2 - } - - break - - default: - panic("Impossible") /* Impossible. */ - } - - /* Check if the raw buffer contains enough bytes to form a character. */ - - if incomplete { - break - } - - /* - * Check if the character is in the allowed range: - * #x9 | #xA | #xD | [#x20-#x7E] (8 bit) - * | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) - * | [#x10000-#x10FFFF] (32 bit) - */ - - if !(value == 0x09 || value == 0x0A || value == 0x0D || - (value >= 0x20 && value <= 0x7E) || - (value == 0x85) || (value >= 0xA0 && value <= 0xD7FF) || - (value >= 0xE000 && value <= 0xFFFD) || - (value >= 0x10000 && value <= 0x10FFFF)) { - return yaml_parser_set_reader_error(parser, - "control characters are not allowed", - parser.offset, int(value)) - } - - /* Move the raw pointers. */ - - parser.raw_buffer_pos += w - parser.offset += w - - /* Finally put the character into the buffer. */ - - /* 0000 0000-0000 007F . 0xxxxxxx */ - if value <= 0x7F { - parser.buffer[buffer_end] = byte(value) - } else if value <= 0x7FF { - /* 0000 0080-0000 07FF . 110xxxxx 10xxxxxx */ - parser.buffer[buffer_end] = byte(0xC0 + (value >> 6)) - parser.buffer[buffer_end+1] = byte(0x80 + (value & 0x3F)) - } else if value <= 0xFFFF { - /* 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx */ - parser.buffer[buffer_end] = byte(0xE0 + (value >> 12)) - parser.buffer[buffer_end+1] = byte(0x80 + ((value >> 6) & 0x3F)) - parser.buffer[buffer_end+2] = byte(0x80 + (value & 0x3F)) - } else { - /* 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx */ - parser.buffer[buffer_end] = byte(0xF0 + (value >> 18)) - parser.buffer[buffer_end+1] = byte(0x80 + ((value >> 12) & 0x3F)) - parser.buffer[buffer_end+2] = byte(0x80 + ((value >> 6) & 0x3F)) - parser.buffer[buffer_end+3] = byte(0x80 + (value & 0x3F)) - } - - buffer_end += w - parser.unread++ - } - - /* On EOF, put NUL into the buffer and return. */ - - if parser.eof { - parser.buffer[buffer_end] = 0 - buffer_end++ - parser.buffer = parser.buffer[:buffer_end] - parser.unread++ - return true - } - - } - - parser.buffer = parser.buffer[:buffer_end] - return true -} diff --git a/vendor/github.com/cloudfoundry-incubator/candiedyaml/resolver.go b/vendor/github.com/cloudfoundry-incubator/candiedyaml/resolver.go deleted file mode 100644 index fb9e8be8..00000000 --- a/vendor/github.com/cloudfoundry-incubator/candiedyaml/resolver.go +++ /dev/null @@ -1,449 +0,0 @@ -/* -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package candiedyaml - -import ( - "bytes" - "encoding/base64" - "fmt" - "math" - "reflect" - "regexp" - "strconv" - "strings" - "time" -) - -var byteSliceType = reflect.TypeOf([]byte(nil)) - -var binary_tags = [][]byte{[]byte("!binary"), []byte(yaml_BINARY_TAG)} -var bool_values map[string]bool -var null_values map[string]bool - -var signs = []byte{'-', '+'} -var nulls = []byte{'~', 'n', 'N'} -var bools = []byte{'t', 'T', 'f', 'F', 'y', 'Y', 'n', 'N', 'o', 'O'} - -var timestamp_regexp *regexp.Regexp -var ymd_regexp *regexp.Regexp - -func init() { - bool_values = make(map[string]bool) - bool_values["y"] = true - bool_values["yes"] = true - bool_values["n"] = false - bool_values["no"] = false - bool_values["true"] = true - bool_values["false"] = false - bool_values["on"] = true - bool_values["off"] = false - - null_values = make(map[string]bool) - null_values["~"] = true - null_values["null"] = true - null_values["Null"] = true - null_values["NULL"] = true - - timestamp_regexp = regexp.MustCompile("^([0-9][0-9][0-9][0-9])-([0-9][0-9]?)-([0-9][0-9]?)(?:(?:[Tt]|[ \t]+)([0-9][0-9]?):([0-9][0-9]):([0-9][0-9])(?:\\.([0-9]*))?(?:[ \t]*(?:Z|([-+][0-9][0-9]?)(?::([0-9][0-9])?)?))?)?$") - ymd_regexp = regexp.MustCompile("^([0-9][0-9][0-9][0-9])-([0-9][0-9]?)-([0-9][0-9]?)$") -} - -func resolve(event yaml_event_t, v reflect.Value, useNumber bool) (string, error) { - val := string(event.value) - - if null_values[val] { - v.Set(reflect.Zero(v.Type())) - return yaml_NULL_TAG, nil - } - - switch v.Kind() { - case reflect.String: - if useNumber && v.Type() == numberType { - tag, i := resolveInterface(event, useNumber) - if n, ok := i.(Number); ok { - v.Set(reflect.ValueOf(n)) - return tag, nil - } - return "", fmt.Errorf("Not a number: '%s' at %s", event.value, event.start_mark) - } - - return resolve_string(val, v, event) - case reflect.Bool: - return resolve_bool(val, v, event) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return resolve_int(val, v, useNumber, event) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - return resolve_uint(val, v, useNumber, event) - case reflect.Float32, reflect.Float64: - return resolve_float(val, v, useNumber, event) - case reflect.Interface: - _, i := resolveInterface(event, useNumber) - if i != nil { - v.Set(reflect.ValueOf(i)) - } else { - v.Set(reflect.Zero(v.Type())) - } - - case reflect.Struct: - return resolve_time(val, v, event) - case reflect.Slice: - if v.Type() != byteSliceType { - return "", fmt.Errorf("Cannot resolve %s into %s at %s", val, v.String(), event.start_mark) - } - b, err := decode_binary(event.value, event) - if err != nil { - return "", err - } - - v.Set(reflect.ValueOf(b)) - default: - return "", fmt.Errorf("Unknown resolution for '%s' using %s at %s", val, v.String(), event.start_mark) - } - - return yaml_STR_TAG, nil -} - -func hasBinaryTag(event yaml_event_t) bool { - for _, tag := range binary_tags { - if bytes.Equal(event.tag, tag) { - return true - } - } - return false -} - -func decode_binary(value []byte, event yaml_event_t) ([]byte, error) { - b := make([]byte, base64.StdEncoding.DecodedLen(len(value))) - n, err := base64.StdEncoding.Decode(b, value) - if err != nil { - return nil, fmt.Errorf("Invalid base64 text: '%s' at %s", string(b), event.start_mark) - } - return b[:n], nil -} - -func resolve_string(val string, v reflect.Value, event yaml_event_t) (string, error) { - if len(event.tag) > 0 { - if hasBinaryTag(event) { - b, err := decode_binary(event.value, event) - if err != nil { - return "", err - } - val = string(b) - } - } - v.SetString(val) - return yaml_STR_TAG, nil -} - -func resolve_bool(val string, v reflect.Value, event yaml_event_t) (string, error) { - b, found := bool_values[strings.ToLower(val)] - if !found { - return "", fmt.Errorf("Invalid boolean: '%s' at %s", val, event.start_mark) - } - - v.SetBool(b) - return yaml_BOOL_TAG, nil -} - -func resolve_int(val string, v reflect.Value, useNumber bool, event yaml_event_t) (string, error) { - original := val - val = strings.Replace(val, "_", "", -1) - var value uint64 - - isNumberValue := v.Type() == numberType - - sign := int64(1) - if val[0] == '-' { - sign = -1 - val = val[1:] - } else if val[0] == '+' { - val = val[1:] - } - - base := 0 - if val == "0" { - if isNumberValue { - v.SetString("0") - } else { - v.Set(reflect.Zero(v.Type())) - } - - return yaml_INT_TAG, nil - } - - if strings.HasPrefix(val, "0o") { - base = 8 - val = val[2:] - } - - value, err := strconv.ParseUint(val, base, 64) - if err != nil { - return "", fmt.Errorf("Invalid integer: '%s' at %s", original, event.start_mark) - } - - var val64 int64 - if value <= math.MaxInt64 { - val64 = int64(value) - if sign == -1 { - val64 = -val64 - } - } else if sign == -1 && value == uint64(math.MaxInt64)+1 { - val64 = math.MinInt64 - } else { - return "", fmt.Errorf("Invalid integer: '%s' at %s", original, event.start_mark) - } - - if isNumberValue { - v.SetString(strconv.FormatInt(val64, 10)) - } else { - if v.OverflowInt(val64) { - return "", fmt.Errorf("Invalid integer: '%s' at %s", original, event.start_mark) - } - v.SetInt(val64) - } - - return yaml_INT_TAG, nil -} - -func resolve_uint(val string, v reflect.Value, useNumber bool, event yaml_event_t) (string, error) { - original := val - val = strings.Replace(val, "_", "", -1) - var value uint64 - - isNumberValue := v.Type() == numberType - - if val[0] == '-' { - return "", fmt.Errorf("Unsigned int with negative value: '%s' at %s", original, event.start_mark) - } - - if val[0] == '+' { - val = val[1:] - } - - base := 0 - if val == "0" { - if isNumberValue { - v.SetString("0") - } else { - v.Set(reflect.Zero(v.Type())) - } - - return yaml_INT_TAG, nil - } - - if strings.HasPrefix(val, "0o") { - base = 8 - val = val[2:] - } - - value, err := strconv.ParseUint(val, base, 64) - if err != nil { - return "", fmt.Errorf("Invalid unsigned integer: '%s' at %s", val, event.start_mark) - } - - if isNumberValue { - v.SetString(strconv.FormatUint(value, 10)) - } else { - if v.OverflowUint(value) { - return "", fmt.Errorf("Invalid unsigned integer: '%s' at %s", val, event.start_mark) - } - - v.SetUint(value) - } - - return yaml_INT_TAG, nil -} - -func resolve_float(val string, v reflect.Value, useNumber bool, event yaml_event_t) (string, error) { - val = strings.Replace(val, "_", "", -1) - var value float64 - - isNumberValue := v.Type() == numberType - typeBits := 64 - if !isNumberValue { - typeBits = v.Type().Bits() - } - - sign := 1 - if val[0] == '-' { - sign = -1 - val = val[1:] - } else if val[0] == '+' { - val = val[1:] - } - - valLower := strings.ToLower(val) - if valLower == ".inf" { - value = math.Inf(sign) - } else if valLower == ".nan" { - value = math.NaN() - } else { - var err error - value, err = strconv.ParseFloat(val, typeBits) - value *= float64(sign) - - if err != nil { - return "", fmt.Errorf("Invalid float: '%s' at %s", val, event.start_mark) - } - } - - if isNumberValue { - v.SetString(strconv.FormatFloat(value, 'g', -1, typeBits)) - } else { - if v.OverflowFloat(value) { - return "", fmt.Errorf("Invalid float: '%s' at %s", val, event.start_mark) - } - - v.SetFloat(value) - } - - return yaml_FLOAT_TAG, nil -} - -func resolve_time(val string, v reflect.Value, event yaml_event_t) (string, error) { - var parsedTime time.Time - matches := ymd_regexp.FindStringSubmatch(val) - if len(matches) > 0 { - year, _ := strconv.Atoi(matches[1]) - month, _ := strconv.Atoi(matches[2]) - day, _ := strconv.Atoi(matches[3]) - parsedTime = time.Date(year, time.Month(month), day, 0, 0, 0, 0, time.UTC) - } else { - matches = timestamp_regexp.FindStringSubmatch(val) - if len(matches) == 0 { - return "", fmt.Errorf("Invalid timestamp: '%s' at %s", val, event.start_mark) - } - - year, _ := strconv.Atoi(matches[1]) - month, _ := strconv.Atoi(matches[2]) - day, _ := strconv.Atoi(matches[3]) - hour, _ := strconv.Atoi(matches[4]) - min, _ := strconv.Atoi(matches[5]) - sec, _ := strconv.Atoi(matches[6]) - - nsec := 0 - if matches[7] != "" { - millis, _ := strconv.Atoi(matches[7]) - nsec = int(time.Duration(millis) * time.Millisecond) - } - - loc := time.UTC - if matches[8] != "" { - sign := matches[8][0] - hr, _ := strconv.Atoi(matches[8][1:]) - min := 0 - if matches[9] != "" { - min, _ = strconv.Atoi(matches[9]) - } - - zoneOffset := (hr*60 + min) * 60 - if sign == '-' { - zoneOffset = -zoneOffset - } - - loc = time.FixedZone("", zoneOffset) - } - parsedTime = time.Date(year, time.Month(month), day, hour, min, sec, nsec, loc) - } - - v.Set(reflect.ValueOf(parsedTime)) - return "", nil -} - -func resolveInterface(event yaml_event_t, useNumber bool) (string, interface{}) { - val := string(event.value) - if len(event.tag) == 0 && !event.implicit { - return "", val - } - - if len(val) == 0 { - return yaml_NULL_TAG, nil - } - - var result interface{} - - sign := false - c := val[0] - switch { - case bytes.IndexByte(signs, c) != -1: - sign = true - fallthrough - case c >= '0' && c <= '9': - i := int64(0) - result = &i - if useNumber { - var n Number - result = &n - } - - v := reflect.ValueOf(result).Elem() - if _, err := resolve_int(val, v, useNumber, event); err == nil { - return yaml_INT_TAG, v.Interface() - } - - f := float64(0) - result = &f - if useNumber { - var n Number - result = &n - } - - v = reflect.ValueOf(result).Elem() - if _, err := resolve_float(val, v, useNumber, event); err == nil { - return yaml_FLOAT_TAG, v.Interface() - } - - if !sign { - t := time.Time{} - if _, err := resolve_time(val, reflect.ValueOf(&t).Elem(), event); err == nil { - return "", t - } - } - case bytes.IndexByte(nulls, c) != -1: - if null_values[val] { - return yaml_NULL_TAG, nil - } - b := false - if _, err := resolve_bool(val, reflect.ValueOf(&b).Elem(), event); err == nil { - return yaml_BOOL_TAG, b - } - case c == '.': - f := float64(0) - result = &f - if useNumber { - var n Number - result = &n - } - - v := reflect.ValueOf(result).Elem() - if _, err := resolve_float(val, v, useNumber, event); err == nil { - return yaml_FLOAT_TAG, v.Interface() - } - case bytes.IndexByte(bools, c) != -1: - b := false - if _, err := resolve_bool(val, reflect.ValueOf(&b).Elem(), event); err == nil { - return yaml_BOOL_TAG, b - } - } - - if hasBinaryTag(event) { - bytes, err := decode_binary(event.value, event) - if err == nil { - return yaml_BINARY_TAG, bytes - } - } - - return yaml_STR_TAG, val -} diff --git a/vendor/github.com/cloudfoundry-incubator/candiedyaml/run_parser.go b/vendor/github.com/cloudfoundry-incubator/candiedyaml/run_parser.go deleted file mode 100644 index 25c29816..00000000 --- a/vendor/github.com/cloudfoundry-incubator/candiedyaml/run_parser.go +++ /dev/null @@ -1,62 +0,0 @@ -/* -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package candiedyaml - -import ( - "fmt" - "os" -) - -func Run_parser(cmd string, args []string) { - for i := 0; i < len(args); i++ { - fmt.Printf("[%d] Scanning '%s'", i, args[i]) - file, err := os.Open(args[i]) - if err != nil { - panic(fmt.Sprintf("Invalid file '%s': %s", args[i], err.Error())) - } - - parser := yaml_parser_t{} - yaml_parser_initialize(&parser) - yaml_parser_set_input_reader(&parser, file) - - failed := false - token := yaml_token_t{} - count := 0 - for { - if !yaml_parser_scan(&parser, &token) { - failed = true - break - } - - if token.token_type == yaml_STREAM_END_TOKEN { - break - } - count++ - } - - file.Close() - - msg := "SUCCESS" - if failed { - msg = "FAILED" - if parser.error != yaml_NO_ERROR { - m := parser.problem_mark - fmt.Printf("ERROR: (%s) %s @ line: %d col: %d\n", - parser.context, parser.problem, m.line, m.column) - } - } - fmt.Printf("%s (%d tokens)\n", msg, count) - } -} diff --git a/vendor/github.com/cloudfoundry-incubator/candiedyaml/scanner.go b/vendor/github.com/cloudfoundry-incubator/candiedyaml/scanner.go deleted file mode 100644 index 5c080a06..00000000 --- a/vendor/github.com/cloudfoundry-incubator/candiedyaml/scanner.go +++ /dev/null @@ -1,3318 +0,0 @@ -/* -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package candiedyaml - -import ( - "bytes" -) - -/* - * Introduction - * ************ - * - * The following notes assume that you are familiar with the YAML specification - * (http://yaml.org/spec/cvs/current.html). We mostly follow it, although in - * some cases we are less restrictive that it requires. - * - * The process of transforming a YAML stream into a sequence of events is - * divided on two steps: Scanning and Parsing. - * - * The Scanner transforms the input stream into a sequence of tokens, while the - * parser transform the sequence of tokens produced by the Scanner into a - * sequence of parsing events. - * - * The Scanner is rather clever and complicated. The Parser, on the contrary, - * is a straightforward implementation of a recursive-descendant parser (or, - * LL(1) parser, as it is usually called). - * - * Actually there are two issues of Scanning that might be called "clever", the - * rest is quite straightforward. The issues are "block collection start" and - * "simple keys". Both issues are explained below in details. - * - * Here the Scanning step is explained and implemented. We start with the list - * of all the tokens produced by the Scanner together with short descriptions. - * - * Now, tokens: - * - * STREAM-START(encoding) # The stream start. - * STREAM-END # The stream end. - * VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. - * TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. - * DOCUMENT-START # '---' - * DOCUMENT-END # '...' - * BLOCK-SEQUENCE-START # Indentation increase denoting a block - * BLOCK-MAPPING-START # sequence or a block mapping. - * BLOCK-END # Indentation decrease. - * FLOW-SEQUENCE-START # '[' - * FLOW-SEQUENCE-END # ']' - * BLOCK-SEQUENCE-START # '{' - * BLOCK-SEQUENCE-END # '}' - * BLOCK-ENTRY # '-' - * FLOW-ENTRY # ',' - * KEY # '?' or nothing (simple keys). - * VALUE # ':' - * ALIAS(anchor) # '*anchor' - * ANCHOR(anchor) # '&anchor' - * TAG(handle,suffix) # '!handle!suffix' - * SCALAR(value,style) # A scalar. - * - * The following two tokens are "virtual" tokens denoting the beginning and the - * end of the stream: - * - * STREAM-START(encoding) - * STREAM-END - * - * We pass the information about the input stream encoding with the - * STREAM-START token. - * - * The next two tokens are responsible for tags: - * - * VERSION-DIRECTIVE(major,minor) - * TAG-DIRECTIVE(handle,prefix) - * - * Example: - * - * %YAML 1.1 - * %TAG ! !foo - * %TAG !yaml! tag:yaml.org,2002: - * --- - * - * The correspoding sequence of tokens: - * - * STREAM-START(utf-8) - * VERSION-DIRECTIVE(1,1) - * TAG-DIRECTIVE("!","!foo") - * TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") - * DOCUMENT-START - * STREAM-END - * - * Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole - * line. - * - * The document start and end indicators are represented by: - * - * DOCUMENT-START - * DOCUMENT-END - * - * Note that if a YAML stream contains an implicit document (without '---' - * and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be - * produced. - * - * In the following examples, we present whole documents together with the - * produced tokens. - * - * 1. An implicit document: - * - * 'a scalar' - * - * Tokens: - * - * STREAM-START(utf-8) - * SCALAR("a scalar",single-quoted) - * STREAM-END - * - * 2. An explicit document: - * - * --- - * 'a scalar' - * ... - * - * Tokens: - * - * STREAM-START(utf-8) - * DOCUMENT-START - * SCALAR("a scalar",single-quoted) - * DOCUMENT-END - * STREAM-END - * - * 3. Several documents in a stream: - * - * 'a scalar' - * --- - * 'another scalar' - * --- - * 'yet another scalar' - * - * Tokens: - * - * STREAM-START(utf-8) - * SCALAR("a scalar",single-quoted) - * DOCUMENT-START - * SCALAR("another scalar",single-quoted) - * DOCUMENT-START - * SCALAR("yet another scalar",single-quoted) - * STREAM-END - * - * We have already introduced the SCALAR token above. The following tokens are - * used to describe aliases, anchors, tag, and scalars: - * - * ALIAS(anchor) - * ANCHOR(anchor) - * TAG(handle,suffix) - * SCALAR(value,style) - * - * The following series of examples illustrate the usage of these tokens: - * - * 1. A recursive sequence: - * - * &A [ *A ] - * - * Tokens: - * - * STREAM-START(utf-8) - * ANCHOR("A") - * FLOW-SEQUENCE-START - * ALIAS("A") - * FLOW-SEQUENCE-END - * STREAM-END - * - * 2. A tagged scalar: - * - * !!float "3.14" # A good approximation. - * - * Tokens: - * - * STREAM-START(utf-8) - * TAG("!!","float") - * SCALAR("3.14",double-quoted) - * STREAM-END - * - * 3. Various scalar styles: - * - * --- # Implicit empty plain scalars do not produce tokens. - * --- a plain scalar - * --- 'a single-quoted scalar' - * --- "a double-quoted scalar" - * --- |- - * a literal scalar - * --- >- - * a folded - * scalar - * - * Tokens: - * - * STREAM-START(utf-8) - * DOCUMENT-START - * DOCUMENT-START - * SCALAR("a plain scalar",plain) - * DOCUMENT-START - * SCALAR("a single-quoted scalar",single-quoted) - * DOCUMENT-START - * SCALAR("a double-quoted scalar",double-quoted) - * DOCUMENT-START - * SCALAR("a literal scalar",literal) - * DOCUMENT-START - * SCALAR("a folded scalar",folded) - * STREAM-END - * - * Now it's time to review collection-related tokens. We will start with - * flow collections: - * - * FLOW-SEQUENCE-START - * FLOW-SEQUENCE-END - * FLOW-MAPPING-START - * FLOW-MAPPING-END - * FLOW-ENTRY - * KEY - * VALUE - * - * The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and - * FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' - * correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the - * indicators '?' and ':', which are used for denoting mapping keys and values, - * are represented by the KEY and VALUE tokens. - * - * The following examples show flow collections: - * - * 1. A flow sequence: - * - * [item 1, item 2, item 3] - * - * Tokens: - * - * STREAM-START(utf-8) - * FLOW-SEQUENCE-START - * SCALAR("item 1",plain) - * FLOW-ENTRY - * SCALAR("item 2",plain) - * FLOW-ENTRY - * SCALAR("item 3",plain) - * FLOW-SEQUENCE-END - * STREAM-END - * - * 2. A flow mapping: - * - * { - * a simple key: a value, # Note that the KEY token is produced. - * ? a complex key: another value, - * } - * - * Tokens: - * - * STREAM-START(utf-8) - * FLOW-MAPPING-START - * KEY - * SCALAR("a simple key",plain) - * VALUE - * SCALAR("a value",plain) - * FLOW-ENTRY - * KEY - * SCALAR("a complex key",plain) - * VALUE - * SCALAR("another value",plain) - * FLOW-ENTRY - * FLOW-MAPPING-END - * STREAM-END - * - * A simple key is a key which is not denoted by the '?' indicator. Note that - * the Scanner still produce the KEY token whenever it encounters a simple key. - * - * For scanning block collections, the following tokens are used (note that we - * repeat KEY and VALUE here): - * - * BLOCK-SEQUENCE-START - * BLOCK-MAPPING-START - * BLOCK-END - * BLOCK-ENTRY - * KEY - * VALUE - * - * The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation - * increase that precedes a block collection (cf. the INDENT token in Python). - * The token BLOCK-END denote indentation decrease that ends a block collection - * (cf. the DEDENT token in Python). However YAML has some syntax pecularities - * that makes detections of these tokens more complex. - * - * The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators - * '-', '?', and ':' correspondingly. - * - * The following examples show how the tokens BLOCK-SEQUENCE-START, - * BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: - * - * 1. Block sequences: - * - * - item 1 - * - item 2 - * - - * - item 3.1 - * - item 3.2 - * - - * key 1: value 1 - * key 2: value 2 - * - * Tokens: - * - * STREAM-START(utf-8) - * BLOCK-SEQUENCE-START - * BLOCK-ENTRY - * SCALAR("item 1",plain) - * BLOCK-ENTRY - * SCALAR("item 2",plain) - * BLOCK-ENTRY - * BLOCK-SEQUENCE-START - * BLOCK-ENTRY - * SCALAR("item 3.1",plain) - * BLOCK-ENTRY - * SCALAR("item 3.2",plain) - * BLOCK-END - * BLOCK-ENTRY - * BLOCK-MAPPING-START - * KEY - * SCALAR("key 1",plain) - * VALUE - * SCALAR("value 1",plain) - * KEY - * SCALAR("key 2",plain) - * VALUE - * SCALAR("value 2",plain) - * BLOCK-END - * BLOCK-END - * STREAM-END - * - * 2. Block mappings: - * - * a simple key: a value # The KEY token is produced here. - * ? a complex key - * : another value - * a mapping: - * key 1: value 1 - * key 2: value 2 - * a sequence: - * - item 1 - * - item 2 - * - * Tokens: - * - * STREAM-START(utf-8) - * BLOCK-MAPPING-START - * KEY - * SCALAR("a simple key",plain) - * VALUE - * SCALAR("a value",plain) - * KEY - * SCALAR("a complex key",plain) - * VALUE - * SCALAR("another value",plain) - * KEY - * SCALAR("a mapping",plain) - * BLOCK-MAPPING-START - * KEY - * SCALAR("key 1",plain) - * VALUE - * SCALAR("value 1",plain) - * KEY - * SCALAR("key 2",plain) - * VALUE - * SCALAR("value 2",plain) - * BLOCK-END - * KEY - * SCALAR("a sequence",plain) - * VALUE - * BLOCK-SEQUENCE-START - * BLOCK-ENTRY - * SCALAR("item 1",plain) - * BLOCK-ENTRY - * SCALAR("item 2",plain) - * BLOCK-END - * BLOCK-END - * STREAM-END - * - * YAML does not always require to start a new block collection from a new - * line. If the current line contains only '-', '?', and ':' indicators, a new - * block collection may start at the current line. The following examples - * illustrate this case: - * - * 1. Collections in a sequence: - * - * - - item 1 - * - item 2 - * - key 1: value 1 - * key 2: value 2 - * - ? complex key - * : complex value - * - * Tokens: - * - * STREAM-START(utf-8) - * BLOCK-SEQUENCE-START - * BLOCK-ENTRY - * BLOCK-SEQUENCE-START - * BLOCK-ENTRY - * SCALAR("item 1",plain) - * BLOCK-ENTRY - * SCALAR("item 2",plain) - * BLOCK-END - * BLOCK-ENTRY - * BLOCK-MAPPING-START - * KEY - * SCALAR("key 1",plain) - * VALUE - * SCALAR("value 1",plain) - * KEY - * SCALAR("key 2",plain) - * VALUE - * SCALAR("value 2",plain) - * BLOCK-END - * BLOCK-ENTRY - * BLOCK-MAPPING-START - * KEY - * SCALAR("complex key") - * VALUE - * SCALAR("complex value") - * BLOCK-END - * BLOCK-END - * STREAM-END - * - * 2. Collections in a mapping: - * - * ? a sequence - * : - item 1 - * - item 2 - * ? a mapping - * : key 1: value 1 - * key 2: value 2 - * - * Tokens: - * - * STREAM-START(utf-8) - * BLOCK-MAPPING-START - * KEY - * SCALAR("a sequence",plain) - * VALUE - * BLOCK-SEQUENCE-START - * BLOCK-ENTRY - * SCALAR("item 1",plain) - * BLOCK-ENTRY - * SCALAR("item 2",plain) - * BLOCK-END - * KEY - * SCALAR("a mapping",plain) - * VALUE - * BLOCK-MAPPING-START - * KEY - * SCALAR("key 1",plain) - * VALUE - * SCALAR("value 1",plain) - * KEY - * SCALAR("key 2",plain) - * VALUE - * SCALAR("value 2",plain) - * BLOCK-END - * BLOCK-END - * STREAM-END - * - * YAML also permits non-indented sequences if they are included into a block - * mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: - * - * key: - * - item 1 # BLOCK-SEQUENCE-START is NOT produced here. - * - item 2 - * - * Tokens: - * - * STREAM-START(utf-8) - * BLOCK-MAPPING-START - * KEY - * SCALAR("key",plain) - * VALUE - * BLOCK-ENTRY - * SCALAR("item 1",plain) - * BLOCK-ENTRY - * SCALAR("item 2",plain) - * BLOCK-END - */ - -/* - * Ensure that the buffer contains the required number of characters. - * Return 1 on success, 0 on failure (reader error or memory error). - */ -func cache(parser *yaml_parser_t, length int) bool { - if parser.unread >= length { - return true - } - - return yaml_parser_update_buffer(parser, length) -} - -/* - * Advance the buffer pointer. - */ -func skip(parser *yaml_parser_t) { - parser.mark.index++ - parser.mark.column++ - parser.unread-- - parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) -} - -func skip_line(parser *yaml_parser_t) { - if is_crlf_at(parser.buffer, parser.buffer_pos) { - parser.mark.index += 2 - parser.mark.column = 0 - parser.mark.line++ - parser.unread -= 2 - parser.buffer_pos += 2 - } else if is_break_at(parser.buffer, parser.buffer_pos) { - parser.mark.index++ - parser.mark.column = 0 - parser.mark.line++ - parser.unread-- - parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) - } -} - -/* - * Copy a character to a string buffer and advance pointers. - */ - -func read(parser *yaml_parser_t, s []byte) []byte { - w := width(parser.buffer[parser.buffer_pos]) - if w == 0 { - panic("invalid character sequence") - } - if len(s) == 0 { - s = make([]byte, 0, 32) - } - if w == 1 && len(s)+w <= cap(s) { - s = s[:len(s)+1] - s[len(s)-1] = parser.buffer[parser.buffer_pos] - parser.buffer_pos++ - } else { - s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) - parser.buffer_pos += w - } - parser.mark.index++ - parser.mark.column++ - parser.unread-- - return s -} - -/* - * Copy a line break character to a string buffer and advance pointers. - */ -func read_line(parser *yaml_parser_t, s []byte) []byte { - buf := parser.buffer - pos := parser.buffer_pos - if buf[pos] == '\r' && buf[pos+1] == '\n' { - /* CR LF . LF */ - s = append(s, '\n') - parser.buffer_pos += 2 - parser.mark.index++ - parser.unread-- - } else if buf[pos] == '\r' || buf[pos] == '\n' { - /* CR|LF . LF */ - s = append(s, '\n') - parser.buffer_pos += 1 - } else if buf[pos] == '\xC2' && buf[pos+1] == '\x85' { - /* NEL . LF */ - s = append(s, '\n') - parser.buffer_pos += 2 - } else if buf[pos] == '\xE2' && buf[pos+1] == '\x80' && - (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9') { - // LS|PS . LS|PS - s = append(s, buf[parser.buffer_pos:pos+3]...) - parser.buffer_pos += 3 - } else { - return s - } - - parser.mark.index++ - parser.mark.column = 0 - parser.mark.line++ - parser.unread-- - return s -} - -/* - * Get the next token. - */ - -func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { - /* Erase the token object. */ - *token = yaml_token_t{} - - /* No tokens after STREAM-END or error. */ - - if parser.stream_end_produced || parser.error != yaml_NO_ERROR { - return true - } - - /* Ensure that the tokens queue contains enough tokens. */ - - if !parser.token_available { - if !yaml_parser_fetch_more_tokens(parser) { - return false - } - } - - /* Fetch the next token from the queue. */ - - *token = parser.tokens[parser.tokens_head] - parser.tokens_head++ - parser.token_available = false - parser.tokens_parsed++ - - if token.token_type == yaml_STREAM_END_TOKEN { - parser.stream_end_produced = true - } - - return true -} - -/* - * Set the scanner error and return 0. - */ - -func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, - context_mark YAML_mark_t, problem string) bool { - parser.error = yaml_SCANNER_ERROR - parser.context = context - parser.context_mark = context_mark - parser.problem = problem - parser.problem_mark = parser.mark - - return false -} - -func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark YAML_mark_t, problem string) bool { - context := "while parsing a %TAG directive" - if directive { - context = "while parsing a tag" - } - return yaml_parser_set_scanner_error(parser, context, context_mark, "did not find URI escaped octet") -} - -/* - * Ensure that the tokens queue contains at least one token which can be - * returned to the Parser. - */ - -func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { - /* While we need more tokens to fetch, do it. */ - - for { - /* - * Check if we really need to fetch more tokens. - */ - - need_more_tokens := false - - if parser.tokens_head == len(parser.tokens) { - /* Queue is empty. */ - - need_more_tokens = true - } else { - - /* Check if any potential simple key may occupy the head position. */ - - if !yaml_parser_stale_simple_keys(parser) { - return false - } - - for i := range parser.simple_keys { - simple_key := &parser.simple_keys[i] - - if simple_key.possible && - simple_key.token_number == parser.tokens_parsed { - need_more_tokens = true - break - } - } - } - if len(parser.simple_keys) > 0 { - - } - /* We are finished. */ - - if !need_more_tokens { - break - } - - /* Fetch the next token. */ - - if !yaml_parser_fetch_next_token(parser) { - return false - } - - } - - parser.token_available = true - - return true -} - -/* - * The dispatcher for token fetchers. - */ - -func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool { - /* Ensure that the buffer is initialized. */ - - if !cache(parser, 1) { - return false - } - - /* Check if we just started scanning. Fetch STREAM-START then. */ - - if !parser.stream_start_produced { - return yaml_parser_fetch_stream_start(parser) - } - - /* Eat whitespaces and comments until we reach the next token. */ - - if !yaml_parser_scan_to_next_token(parser) { - return false - } - - /* Remove obsolete potential simple keys. */ - - if !yaml_parser_stale_simple_keys(parser) { - return false - } - - /* Check the indentation level against the current column. */ - - if !yaml_parser_unroll_indent(parser, parser.mark.column) { - return false - } - - /* - * Ensure that the buffer contains at least 4 characters. 4 is the length - * of the longest indicators ('--- ' and '... '). - */ - - if !cache(parser, 4) { - return false - } - - /* Is it the end of the stream? */ - buf := parser.buffer - pos := parser.buffer_pos - - if is_z(buf[pos]) { - return yaml_parser_fetch_stream_end(parser) - } - - /* Is it a directive? */ - - if parser.mark.column == 0 && buf[pos] == '%' { - return yaml_parser_fetch_directive(parser) - } - - /* Is it the document start indicator? */ - - if parser.mark.column == 0 && - buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && - is_blankz_at(buf, pos+3) { - return yaml_parser_fetch_document_indicator(parser, - yaml_DOCUMENT_START_TOKEN) - } - - /* Is it the document end indicator? */ - - if parser.mark.column == 0 && - buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && - is_blankz_at(buf, pos+3) { - return yaml_parser_fetch_document_indicator(parser, - yaml_DOCUMENT_END_TOKEN) - } - - /* Is it the flow sequence start indicator? */ - - if buf[pos] == '[' { - return yaml_parser_fetch_flow_collection_start(parser, - yaml_FLOW_SEQUENCE_START_TOKEN) - } - - /* Is it the flow mapping start indicator? */ - - if buf[pos] == '{' { - return yaml_parser_fetch_flow_collection_start(parser, - yaml_FLOW_MAPPING_START_TOKEN) - } - - /* Is it the flow sequence end indicator? */ - - if buf[pos] == ']' { - return yaml_parser_fetch_flow_collection_end(parser, - yaml_FLOW_SEQUENCE_END_TOKEN) - } - - /* Is it the flow mapping end indicator? */ - - if buf[pos] == '}' { - return yaml_parser_fetch_flow_collection_end(parser, - yaml_FLOW_MAPPING_END_TOKEN) - } - - /* Is it the flow entry indicator? */ - - if buf[pos] == ',' { - return yaml_parser_fetch_flow_entry(parser) - } - - /* Is it the block entry indicator? */ - if buf[pos] == '-' && is_blankz_at(buf, pos+1) { - return yaml_parser_fetch_block_entry(parser) - } - - /* Is it the key indicator? */ - if buf[pos] == '?' && - (parser.flow_level > 0 || is_blankz_at(buf, pos+1)) { - return yaml_parser_fetch_key(parser) - } - - /* Is it the value indicator? */ - if buf[pos] == ':' && - (parser.flow_level > 0 || is_blankz_at(buf, pos+1)) { - return yaml_parser_fetch_value(parser) - } - - /* Is it an alias? */ - if buf[pos] == '*' { - return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) - } - - /* Is it an anchor? */ - - if buf[pos] == '&' { - return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) - } - - /* Is it a tag? */ - - if buf[pos] == '!' { - return yaml_parser_fetch_tag(parser) - } - - /* Is it a literal scalar? */ - if buf[pos] == '|' && parser.flow_level == 0 { - return yaml_parser_fetch_block_scalar(parser, true) - } - - /* Is it a folded scalar? */ - if buf[pos] == '>' && parser.flow_level == 0 { - return yaml_parser_fetch_block_scalar(parser, false) - } - - /* Is it a single-quoted scalar? */ - - if buf[pos] == '\'' { - return yaml_parser_fetch_flow_scalar(parser, true) - } - - /* Is it a double-quoted scalar? */ - if buf[pos] == '"' { - return yaml_parser_fetch_flow_scalar(parser, false) - } - - /* - * Is it a plain scalar? - * - * A plain scalar may start with any non-blank characters except - * - * '-', '?', ':', ',', '[', ']', '{', '}', - * '#', '&', '*', '!', '|', '>', '\'', '\"', - * '%', '@', '`'. - * - * In the block context (and, for the '-' indicator, in the flow context - * too), it may also start with the characters - * - * '-', '?', ':' - * - * if it is followed by a non-space character. - * - * The last rule is more restrictive than the specification requires. - */ - - b := buf[pos] - if !(is_blankz_at(buf, pos) || b == '-' || - b == '?' || b == ':' || - b == ',' || b == '[' || - b == ']' || b == '{' || - b == '}' || b == '#' || - b == '&' || b == '*' || - b == '!' || b == '|' || - b == '>' || b == '\'' || - b == '"' || b == '%' || - b == '@' || b == '`') || - (b == '-' && !is_blank(buf[pos+1])) || - (parser.flow_level == 0 && - (buf[pos] == '?' || buf[pos] == ':') && - !is_blank(buf[pos+1])) { - return yaml_parser_fetch_plain_scalar(parser) - } - - /* - * If we don't determine the token type so far, it is an error. - */ - - return yaml_parser_set_scanner_error(parser, - "while scanning for the next token", parser.mark, - "found character that cannot start any token") -} - -/* - * Check the list of potential simple keys and remove the positions that - * cannot contain simple keys anymore. - */ - -func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool { - /* Check for a potential simple key for each flow level. */ - - for i := range parser.simple_keys { - /* - * The specification requires that a simple key - * - * - is limited to a single line, - * - is shorter than 1024 characters. - */ - - simple_key := &parser.simple_keys[i] - if simple_key.possible && - (simple_key.mark.line < parser.mark.line || - simple_key.mark.index+1024 < parser.mark.index) { - - /* Check if the potential simple key to be removed is required. */ - - if simple_key.required { - return yaml_parser_set_scanner_error(parser, - "while scanning a simple key", simple_key.mark, - "could not find expected ':'") - } - - simple_key.possible = false - } - } - - return true -} - -/* - * Check if a simple key may start at the current position and add it if - * needed. - */ - -func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { - /* - * A simple key is required at the current position if the scanner is in - * the block context and the current column coincides with the indentation - * level. - */ - - required := (parser.flow_level == 0 && - parser.indent == parser.mark.column) - - /* - * A simple key is required only when it is the first token in the current - * line. Therefore it is always allowed. But we add a check anyway. - */ - if required && !parser.simple_key_allowed { - panic("impossible") /* Impossible. */ - } - - /* - * If the current position may start a simple key, save it. - */ - - if parser.simple_key_allowed { - simple_key := yaml_simple_key_t{ - possible: true, - required: required, - token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), - } - simple_key.mark = parser.mark - - if !yaml_parser_remove_simple_key(parser) { - return false - } - - parser.simple_keys[len(parser.simple_keys)-1] = simple_key - } - - return true -} - -/* - * Remove a potential simple key at the current flow level. - */ - -func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { - simple_key := &parser.simple_keys[len(parser.simple_keys)-1] - - if simple_key.possible { - /* If the key is required, it is an error. */ - - if simple_key.required { - return yaml_parser_set_scanner_error(parser, - "while scanning a simple key", simple_key.mark, - "could not find expected ':'") - } - } - - /* Remove the key from the stack. */ - - simple_key.possible = false - - return true -} - -/* - * Increase the flow level and resize the simple key list if needed. - */ - -func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { - /* Reset the simple key on the next level. */ - - parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) - - /* Increase the flow level. */ - - parser.flow_level++ - - return true -} - -/* - * Decrease the flow level. - */ - -func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { - if parser.flow_level > 0 { - parser.flow_level-- - parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1] - } - - return true -} - -/* - * Push the current indentation level to the stack and set the new level - * the current column is greater than the indentation level. In this case, - * append or insert the specified token into the token queue. - * - */ - -func yaml_parser_roll_indent(parser *yaml_parser_t, column int, - number int, token_type yaml_token_type_t, mark YAML_mark_t) bool { - /* In the flow context, do nothing. */ - - if parser.flow_level > 0 { - return true - } - - if parser.indent == -1 || parser.indent < column { - /* - * Push the current indentation level to the stack and set the new - * indentation level. - */ - - parser.indents = append(parser.indents, parser.indent) - parser.indent = column - - /* Create a token and insert it into the queue. */ - token := yaml_token_t{ - token_type: token_type, - start_mark: mark, - end_mark: mark, - } - - // number == -1 -> enqueue otherwise insert - if number > -1 { - number -= parser.tokens_parsed - } - insert_token(parser, number, &token) - } - - return true -} - -/* - * Pop indentation levels from the indents stack until the current level - * becomes less or equal to the column. For each indentation level, append - * the BLOCK-END token. - */ - -func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool { - /* In the flow context, do nothing. */ - - if parser.flow_level > 0 { - return true - } - - /* - * column is unsigned and parser->indent is signed, so if - * parser->indent is less than zero the conditional in the while - * loop below is incorrect. Guard against that. - */ - - if parser.indent < 0 { - return true - } - - /* Loop through the indentation levels in the stack. */ - - for parser.indent > column { - /* Create a token and append it to the queue. */ - token := yaml_token_t{ - token_type: yaml_BLOCK_END_TOKEN, - start_mark: parser.mark, - end_mark: parser.mark, - } - insert_token(parser, -1, &token) - - /* Pop the indentation level. */ - parser.indent = parser.indents[len(parser.indents)-1] - parser.indents = parser.indents[:len(parser.indents)-1] - - } - - return true -} - -/* - * Pop indentation levels from the indents stack until the current - * level resets to -1. For each indentation level, append the - * BLOCK-END token. - */ - -func yaml_parser_reset_indent(parser *yaml_parser_t) bool { - /* In the flow context, do nothing. */ - - if parser.flow_level > 0 { - return true - } - - /* Loop through the indentation levels in the stack. */ - - for parser.indent > -1 { - /* Create a token and append it to the queue. */ - - token := yaml_token_t{ - token_type: yaml_BLOCK_END_TOKEN, - start_mark: parser.mark, - end_mark: parser.mark, - } - insert_token(parser, -1, &token) - - /* Pop the indentation level. */ - parser.indent = parser.indents[len(parser.indents)-1] - parser.indents = parser.indents[:len(parser.indents)-1] - } - - return true -} - -/* - * Initialize the scanner and produce the STREAM-START token. - */ - -func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { - /* Set the initial indentation. */ - - parser.indent = -1 - - /* Initialize the simple key stack. */ - parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) - - /* A simple key is allowed at the beginning of the stream. */ - - parser.simple_key_allowed = true - - /* We have started. */ - - parser.stream_start_produced = true - - /* Create the STREAM-START token and append it to the queue. */ - token := yaml_token_t{ - token_type: yaml_STREAM_START_TOKEN, - start_mark: parser.mark, - end_mark: parser.mark, - encoding: parser.encoding, - } - insert_token(parser, -1, &token) - - return true -} - -/* - * Produce the STREAM-END token and shut down the scanner. - */ - -func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { - /* Force new line. */ - - if parser.mark.column != 0 { - parser.mark.column = 0 - parser.mark.line++ - } - - /* Reset the indentation level. */ - - if !yaml_parser_reset_indent(parser) { - return false - } - - /* Reset simple keys. */ - - if !yaml_parser_remove_simple_key(parser) { - return false - } - - parser.simple_key_allowed = false - - /* Create the STREAM-END token and append it to the queue. */ - token := yaml_token_t{ - token_type: yaml_STREAM_END_TOKEN, - start_mark: parser.mark, - end_mark: parser.mark, - } - - insert_token(parser, -1, &token) - - return true -} - -/* - * Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. - */ - -func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { - /* Reset the indentation level. */ - - if !yaml_parser_reset_indent(parser) { - return false - } - - /* Reset simple keys. */ - - if !yaml_parser_remove_simple_key(parser) { - return false - } - - parser.simple_key_allowed = false - - /* Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. */ - var token yaml_token_t - if !yaml_parser_scan_directive(parser, &token) { - return false - } - - /* Append the token to the queue. */ - insert_token(parser, -1, &token) - - return true -} - -/* - * Produce the DOCUMENT-START or DOCUMENT-END token. - */ - -func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, - token_type yaml_token_type_t) bool { - - /* Reset the indentation level. */ - - if !yaml_parser_reset_indent(parser) { - return false - } - - /* Reset simple keys. */ - - if !yaml_parser_remove_simple_key(parser) { - return false - } - - parser.simple_key_allowed = false - - /* Consume the token. */ - - start_mark := parser.mark - - skip(parser) - skip(parser) - skip(parser) - - end_mark := parser.mark - - /* Create the DOCUMENT-START or DOCUMENT-END token. */ - - token := yaml_token_t{ - token_type: token_type, - start_mark: start_mark, - end_mark: end_mark, - } - - /* Append the token to the queue. */ - - insert_token(parser, -1, &token) - - return true -} - -/* - * Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. - */ - -func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, - token_type yaml_token_type_t) bool { - - /* The indicators '[' and '{' may start a simple key. */ - - if !yaml_parser_save_simple_key(parser) { - return false - } - - /* Increase the flow level. */ - - if !yaml_parser_increase_flow_level(parser) { - return false - } - - /* A simple key may follow the indicators '[' and '{'. */ - - parser.simple_key_allowed = true - - /* Consume the token. */ - - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - /* Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. */ - - token := yaml_token_t{ - token_type: token_type, - start_mark: start_mark, - end_mark: end_mark, - } - - /* Append the token to the queue. */ - - insert_token(parser, -1, &token) - - return true -} - -/* - * Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. - */ - -func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, - token_type yaml_token_type_t) bool { - - /* Reset any potential simple key on the current flow level. */ - - if !yaml_parser_remove_simple_key(parser) { - return false - } - - /* Decrease the flow level. */ - - if !yaml_parser_decrease_flow_level(parser) { - return false - } - - /* No simple keys after the indicators ']' and '}'. */ - - parser.simple_key_allowed = false - - /* Consume the token. */ - - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - /* Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. */ - - token := yaml_token_t{ - token_type: token_type, - start_mark: start_mark, - end_mark: end_mark, - } - - /* Append the token to the queue. */ - - insert_token(parser, -1, &token) - - return true -} - -/* - * Produce the FLOW-ENTRY token. - */ - -func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { - - /* Reset any potential simple keys on the current flow level. */ - - if !yaml_parser_remove_simple_key(parser) { - return false - } - - /* Simple keys are allowed after ','. */ - - parser.simple_key_allowed = true - - /* Consume the token. */ - - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - /* Create the FLOW-ENTRY token and append it to the queue. */ - - token := yaml_token_t{ - token_type: yaml_FLOW_ENTRY_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - - insert_token(parser, -1, &token) - - return true -} - -/* - * Produce the BLOCK-ENTRY token. - */ - -func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { - - /* Check if the scanner is in the block context. */ - - if parser.flow_level == 0 { - /* Check if we are allowed to start a new entry. */ - - if !parser.simple_key_allowed { - return yaml_parser_set_scanner_error(parser, "", parser.mark, - "block sequence entries are not allowed in this context") - } - - /* Add the BLOCK-SEQUENCE-START token if needed. */ - - if !yaml_parser_roll_indent(parser, parser.mark.column, -1, - yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { - return false - } - } else { - /* - * It is an error for the '-' indicator to occur in the flow context, - * but we let the Parser detect and report about it because the Parser - * is able to point to the context. - */ - } - - /* Reset any potential simple keys on the current flow level. */ - - if !yaml_parser_remove_simple_key(parser) { - return false - } - - /* Simple keys are allowed after '-'. */ - - parser.simple_key_allowed = true - - /* Consume the token. */ - - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - /* Create the BLOCK-ENTRY token and append it to the queue. */ - - token := yaml_token_t{ - token_type: yaml_BLOCK_ENTRY_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - - insert_token(parser, -1, &token) - - return true -} - -/* - * Produce the KEY token. - */ - -func yaml_parser_fetch_key(parser *yaml_parser_t) bool { - /* In the block context, additional checks are required. */ - - if parser.flow_level == 0 { - /* Check if we are allowed to start a new key (not nessesary simple). */ - - if !parser.simple_key_allowed { - return yaml_parser_set_scanner_error(parser, "", parser.mark, - "mapping keys are not allowed in this context") - } - - /* Add the BLOCK-MAPPING-START token if needed. */ - - if !yaml_parser_roll_indent(parser, parser.mark.column, -1, - yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { - return false - } - } - - /* Reset any potential simple keys on the current flow level. */ - - if !yaml_parser_remove_simple_key(parser) { - return false - } - - /* Simple keys are allowed after '?' in the block context. */ - - parser.simple_key_allowed = (parser.flow_level == 0) - - /* Consume the token. */ - - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - /* Create the KEY token and append it to the queue. */ - - token := yaml_token_t{ - token_type: yaml_KEY_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - - insert_token(parser, -1, &token) - - return true -} - -/* - * Produce the VALUE token. - */ - -func yaml_parser_fetch_value(parser *yaml_parser_t) bool { - - simple_key := &parser.simple_keys[len(parser.simple_keys)-1] - - /* Have we found a simple key? */ - - if simple_key.possible { - - /* Create the KEY token and insert it into the queue. */ - - token := yaml_token_t{ - token_type: yaml_KEY_TOKEN, - start_mark: simple_key.mark, - end_mark: simple_key.mark, - } - - insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) - - /* In the block context, we may need to add the BLOCK-MAPPING-START token. */ - - if !yaml_parser_roll_indent(parser, simple_key.mark.column, - simple_key.token_number, - yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { - return false - } - - /* Remove the simple key. */ - - simple_key.possible = false - - /* A simple key cannot follow another simple key. */ - - parser.simple_key_allowed = false - } else { - /* The ':' indicator follows a complex key. */ - - /* In the block context, extra checks are required. */ - - if parser.flow_level == 0 { - /* Check if we are allowed to start a complex value. */ - - if !parser.simple_key_allowed { - return yaml_parser_set_scanner_error(parser, "", parser.mark, - "mapping values are not allowed in this context") - } - - /* Add the BLOCK-MAPPING-START token if needed. */ - - if !yaml_parser_roll_indent(parser, parser.mark.column, -1, - yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { - return false - } - } - - /* Simple keys after ':' are allowed in the block context. */ - - parser.simple_key_allowed = (parser.flow_level == 0) - } - - /* Consume the token. */ - - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - /* Create the VALUE token and append it to the queue. */ - - token := yaml_token_t{ - token_type: yaml_VALUE_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - - insert_token(parser, -1, &token) - - return true -} - -/* - * Produce the ALIAS or ANCHOR token. - */ - -func yaml_parser_fetch_anchor(parser *yaml_parser_t, token_type yaml_token_type_t) bool { - - /* An anchor or an alias could be a simple key. */ - - if !yaml_parser_save_simple_key(parser) { - return false - } - - /* A simple key cannot follow an anchor or an alias. */ - - parser.simple_key_allowed = false - - /* Create the ALIAS or ANCHOR token and append it to the queue. */ - var token yaml_token_t - if !yaml_parser_scan_anchor(parser, &token, token_type) { - return false - } - - insert_token(parser, -1, &token) - - return true -} - -/* - * Produce the TAG token. - */ - -func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { - /* A tag could be a simple key. */ - - if !yaml_parser_save_simple_key(parser) { - return false - } - - /* A simple key cannot follow a tag. */ - - parser.simple_key_allowed = false - - /* Create the TAG token and append it to the queue. */ - var token yaml_token_t - if !yaml_parser_scan_tag(parser, &token) { - return false - } - - insert_token(parser, -1, &token) - - return true -} - -/* - * Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. - */ - -func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { - /* Remove any potential simple keys. */ - - if !yaml_parser_remove_simple_key(parser) { - return false - } - - /* A simple key may follow a block scalar. */ - - parser.simple_key_allowed = true - - /* Create the SCALAR token and append it to the queue. */ - var token yaml_token_t - if !yaml_parser_scan_block_scalar(parser, &token, literal) { - return false - } - - insert_token(parser, -1, &token) - - return true -} - -/* - * Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. - */ - -func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { - - /* A plain scalar could be a simple key. */ - - if !yaml_parser_save_simple_key(parser) { - return false - } - - /* A simple key cannot follow a flow scalar. */ - - parser.simple_key_allowed = false - - /* Create the SCALAR token and append it to the queue. */ - var token yaml_token_t - if !yaml_parser_scan_flow_scalar(parser, &token, single) { - return false - } - - insert_token(parser, -1, &token) - - return true -} - -/* - * Produce the SCALAR(...,plain) token. - */ - -func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { - /* A plain scalar could be a simple key. */ - - if !yaml_parser_save_simple_key(parser) { - return false - } - - /* A simple key cannot follow a flow scalar. */ - - parser.simple_key_allowed = false - - /* Create the SCALAR token and append it to the queue. */ - var token yaml_token_t - if !yaml_parser_scan_plain_scalar(parser, &token) { - return false - } - - insert_token(parser, -1, &token) - - return true -} - -/* - * Eat whitespaces and comments until the next token is found. - */ - -func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { - /* Until the next token is not found. */ - - for { - /* Allow the BOM mark to start a line. */ - - if !cache(parser, 1) { - return false - } - - if parser.mark.column == 0 && is_bom_at(parser.buffer, parser.buffer_pos) { - skip(parser) - } - - /* - * Eat whitespaces. - * - * Tabs are allowed: - * - * - in the flow context; - * - in the block context, but not at the beginning of the line or - * after '-', '?', or ':' (complex value). - */ - - if !cache(parser, 1) { - return false - } - - for parser.buffer[parser.buffer_pos] == ' ' || - ((parser.flow_level > 0 || !parser.simple_key_allowed) && - parser.buffer[parser.buffer_pos] == '\t') { - skip(parser) - if !cache(parser, 1) { - return false - } - } - - /* Eat a comment until a line break. */ - - if parser.buffer[parser.buffer_pos] == '#' { - for !is_breakz_at(parser.buffer, parser.buffer_pos) { - skip(parser) - if !cache(parser, 1) { - return false - } - } - } - - /* If it is a line break, eat it. */ - - if is_break_at(parser.buffer, parser.buffer_pos) { - if !cache(parser, 2) { - return false - } - skip_line(parser) - - /* In the block context, a new line may start a simple key. */ - - if parser.flow_level == 0 { - parser.simple_key_allowed = true - } - } else { - /* We have found a token. */ - - break - } - } - - return true -} - -/* - * Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. - * - * Scope: - * %YAML 1.1 # a comment \n - * ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - * %TAG !yaml! tag:yaml.org,2002: \n - * ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - */ - -func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { - /* Eat '%'. */ - - start_mark := parser.mark - - skip(parser) - - /* Scan the directive name. */ - var name []byte - if !yaml_parser_scan_directive_name(parser, start_mark, &name) { - return false - } - - /* Is it a YAML directive? */ - var major, minor int - if bytes.Equal(name, []byte("YAML")) { - /* Scan the VERSION directive value. */ - - if !yaml_parser_scan_version_directive_value(parser, start_mark, - &major, &minor) { - return false - } - - end_mark := parser.mark - - /* Create a VERSION-DIRECTIVE token. */ - - *token = yaml_token_t{ - token_type: yaml_VERSION_DIRECTIVE_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - major: major, - minor: minor, - } - } else if bytes.Equal(name, []byte("TAG")) { - /* Is it a TAG directive? */ - /* Scan the TAG directive value. */ - var handle, prefix []byte - if !yaml_parser_scan_tag_directive_value(parser, start_mark, - &handle, &prefix) { - return false - } - - end_mark := parser.mark - - /* Create a TAG-DIRECTIVE token. */ - - *token = yaml_token_t{ - token_type: yaml_TAG_DIRECTIVE_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: handle, - prefix: prefix, - } - } else { - /* Unknown directive. */ - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "found uknown directive name") - return false - } - - /* Eat the rest of the line including any comments. */ - - if !cache(parser, 1) { - return false - } - - for is_blank(parser.buffer[parser.buffer_pos]) { - skip(parser) - if !cache(parser, 1) { - return false - } - } - - if parser.buffer[parser.buffer_pos] == '#' { - for !is_breakz_at(parser.buffer, parser.buffer_pos) { - skip(parser) - if !cache(parser, 1) { - return false - } - } - } - - /* Check if we are at the end of the line. */ - - if !is_breakz_at(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "did not find expected comment or line break") - return false - } - - /* Eat a line break. */ - - if is_break_at(parser.buffer, parser.buffer_pos) { - if !cache(parser, 2) { - return false - } - skip_line(parser) - } - - return true -} - -/* - * Scan the directive name. - * - * Scope: - * %YAML 1.1 # a comment \n - * ^^^^ - * %TAG !yaml! tag:yaml.org,2002: \n - * ^^^ - */ - -func yaml_parser_scan_directive_name(parser *yaml_parser_t, - start_mark YAML_mark_t, name *[]byte) bool { - - /* Consume the directive name. */ - - if !cache(parser, 1) { - return false - } - - var s []byte - for is_alpha(parser.buffer[parser.buffer_pos]) { - s = read(parser, s) - if !cache(parser, 1) { - return false - } - } - - /* Check if the name is empty. */ - - if len(s) == 0 { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "could not find expected directive name") - return false - } - - /* Check for an blank character after the name. */ - - if !is_blankz_at(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "found unexpected non-alphabetical character") - return false - } - - *name = s - - return true -} - -/* - * Scan the value of VERSION-DIRECTIVE. - * - * Scope: - * %YAML 1.1 # a comment \n - * ^^^^^^ - */ - -func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, - start_mark YAML_mark_t, major *int, minor *int) bool { - /* Eat whitespaces. */ - - if !cache(parser, 1) { - return false - } - - for is_blank(parser.buffer[parser.buffer_pos]) { - skip(parser) - if !cache(parser, 1) { - return false - } - } - - /* Consume the major version number. */ - - if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { - return false - } - - /* Eat '.'. */ - - if parser.buffer[parser.buffer_pos] != '.' { - return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", - start_mark, "did not find expected digit or '.' character") - } - - skip(parser) - - /* Consume the minor version number. */ - - if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { - return false - } - - return true -} - -const MAX_NUMBER_LENGTH = 9 - -/* - * Scan the version number of VERSION-DIRECTIVE. - * - * Scope: - * %YAML 1.1 # a comment \n - * ^ - * %YAML 1.1 # a comment \n - * ^ - */ - -func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, - start_mark YAML_mark_t, number *int) bool { - - /* Repeat while the next character is digit. */ - - if !cache(parser, 1) { - return false - } - - value := 0 - length := 0 - for is_digit(parser.buffer[parser.buffer_pos]) { - /* Check if the number is too long. */ - - length++ - if length > MAX_NUMBER_LENGTH { - return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", - start_mark, "found extremely long version number") - } - - value = value*10 + as_digit(parser.buffer[parser.buffer_pos]) - - skip(parser) - - if !cache(parser, 1) { - return false - } - } - - /* Check if the number was present. */ - - if length == 0 { - return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", - start_mark, "did not find expected version number") - } - - *number = value - - return true -} - -/* - * Scan the value of a TAG-DIRECTIVE token. - * - * Scope: - * %TAG !yaml! tag:yaml.org,2002: \n - * ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - */ - -func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, - start_mark YAML_mark_t, handle, prefix *[]byte) bool { - - /* Eat whitespaces. */ - - if !cache(parser, 1) { - return false - } - - for is_blank(parser.buffer[parser.buffer_pos]) { - skip(parser) - if !cache(parser, 1) { - return false - } - } - - /* Scan a handle. */ - var handle_value []byte - if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { - return false - } - - /* Expect a whitespace. */ - - if !cache(parser, 1) { - return false - } - - if !is_blank(parser.buffer[parser.buffer_pos]) { - yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", - start_mark, "did not find expected whitespace") - return false - } - - /* Eat whitespaces. */ - - for is_blank(parser.buffer[parser.buffer_pos]) { - skip(parser) - if !cache(parser, 1) { - return false - } - } - - /* Scan a prefix. */ - var prefix_value []byte - if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { - return false - } - - /* Expect a whitespace or line break. */ - - if !cache(parser, 1) { - return false - } - - if !is_blankz_at(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", - start_mark, "did not find expected whitespace or line break") - return false - } - - *handle = handle_value - *prefix = prefix_value - - return true -} - -func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, - token_type yaml_token_type_t) bool { - - /* Eat the indicator character. */ - - start_mark := parser.mark - - skip(parser) - - /* Consume the value. */ - - if !cache(parser, 1) { - return false - } - - var s []byte - for is_alpha(parser.buffer[parser.buffer_pos]) { - s = read(parser, s) - if !cache(parser, 1) { - return false - } - } - - end_mark := parser.mark - - /* - * Check if length of the anchor is greater than 0 and it is followed by - * a whitespace character or one of the indicators: - * - * '?', ':', ',', ']', '}', '%', '@', '`'. - */ - - b := parser.buffer[parser.buffer_pos] - if len(s) == 0 || !(is_blankz_at(parser.buffer, parser.buffer_pos) || b == '?' || - b == ':' || b == ',' || - b == ']' || b == '}' || - b == '%' || b == '@' || - b == '`') { - context := "while scanning an anchor" - if token_type != yaml_ANCHOR_TOKEN { - context = "while scanning an alias" - } - yaml_parser_set_scanner_error(parser, context, start_mark, - "did not find expected alphabetic or numeric character") - return false - } - - /* Create a token. */ - *token = yaml_token_t{ - token_type: token_type, - start_mark: start_mark, - end_mark: end_mark, - value: s, - } - - return true -} - -/* - * Scan a TAG token. - */ - -func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { - start_mark := parser.mark - - /* Check if the tag is in the canonical form. */ - - if !cache(parser, 2) { - return false - } - - var handle []byte - var suffix []byte - if parser.buffer[parser.buffer_pos+1] == '<' { - /* Set the handle to '' */ - - /* Eat '!<' */ - - skip(parser) - skip(parser) - - /* Consume the tag value. */ - - if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { - return false - } - - /* Check for '>' and eat it. */ - - if parser.buffer[parser.buffer_pos] != '>' { - yaml_parser_set_scanner_error(parser, "while scanning a tag", - start_mark, "did not find the expected '>'") - return false - } - - skip(parser) - } else if is_blank(parser.buffer[parser.buffer_pos+1]) { - // NON-SPECIFIED - skip(parser) - } else { - /* The tag has either the '!suffix' or the '!handle!suffix' form. */ - - /* First, try to scan a handle. */ - - if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { - return false - } - - /* Check if it is, indeed, handle. */ - - if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { - /* Scan the suffix now. */ - - if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { - return false - } - } else { - /* It wasn't a handle after all. Scan the rest of the tag. */ - - if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { - return false - } - - /* Set the handle to '!'. */ - - handle = []byte{'!'} - - /* - * A special case: the '!' tag. Set the handle to '' and the - * suffix to '!'. - */ - - if len(suffix) == 0 { - handle, suffix = suffix, handle - } - - } - } - - /* Check the character which ends the tag. */ - - if !cache(parser, 1) { - return false - } - - if !is_blankz_at(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a tag", - start_mark, "did not find expected whitespace or line break") - return false - } - - end_mark := parser.mark - - /* Create a token. */ - - *token = yaml_token_t{ - token_type: yaml_TAG_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: handle, - suffix: suffix, - } - - return true -} - -/* - * Scan a tag handle. - */ - -func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, - start_mark YAML_mark_t, handle *[]byte) bool { - - /* Check the initial '!' character. */ - - if !cache(parser, 1) { - return false - } - - if parser.buffer[parser.buffer_pos] != '!' { - yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find expected '!'") - return false - } - - /* Copy the '!' character. */ - var s []byte - s = read(parser, s) - - /* Copy all subsequent alphabetical and numerical characters. */ - - if !cache(parser, 1) { - return false - } - - for is_alpha(parser.buffer[parser.buffer_pos]) { - s = read(parser, s) - if !cache(parser, 1) { - return false - } - } - - /* Check if the trailing character is '!' and copy it. */ - - if parser.buffer[parser.buffer_pos] == '!' { - s = read(parser, s) - } else { - /* - * It's either the '!' tag or not really a tag handle. If it's a %TAG - * directive, it's an error. If it's a tag token, it must be a part of - * URI. - */ - - if directive && !(s[0] == '!' && len(s) == 1) { - yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find expected '!'") - return false - } - } - - *handle = s - - return true -} - -/* - * Scan a tag. - */ - -func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, - head []byte, start_mark YAML_mark_t, uri *[]byte) bool { - - var s []byte - /* - * Copy the head if needed. - * - * Note that we don't copy the leading '!' character. - */ - if len(head) > 1 { - s = append(s, head[1:]...) - } - - /* Scan the tag. */ - if !cache(parser, 1) { - return false - } - - /* - * The set of characters that may appear in URI is as follows: - * - * '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', - * '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', - * '%'. - */ - - b := parser.buffer[parser.buffer_pos] - for is_alpha(b) || b == ';' || - b == '/' || b == '?' || - b == ':' || b == '@' || - b == '&' || b == '=' || - b == '+' || b == '$' || - b == ',' || b == '.' || - b == '!' || b == '~' || - b == '*' || b == '\'' || - b == '(' || b == ')' || - b == '[' || b == ']' || - b == '%' { - /* Check if it is a URI-escape sequence. */ - - if b == '%' { - if !yaml_parser_scan_uri_escapes(parser, - directive, start_mark, &s) { - return false - } - } else { - s = read(parser, s) - } - - if !cache(parser, 1) { - return false - } - b = parser.buffer[parser.buffer_pos] - } - - /* Check if the tag is non-empty. */ - - if len(s) == 0 { - yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find expected tag URI") - return false - } - - *uri = s - - return true -} - -/* - * Decode an URI-escape sequence corresponding to a single UTF-8 character. - */ - -func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, - start_mark YAML_mark_t, s *[]byte) bool { - - /* Decode the required number of characters. */ - w := 10 - for w > 0 { - - /* Check for a URI-escaped octet. */ - - if !cache(parser, 3) { - return false - } - - if !(parser.buffer[parser.buffer_pos] == '%' && - is_hex(parser.buffer[parser.buffer_pos+1]) && - is_hex(parser.buffer[parser.buffer_pos+2])) { - return yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find URI escaped octet") - } - - /* Get the octet. */ - octet := byte((as_hex(parser.buffer[parser.buffer_pos+1]) << 4) + - as_hex(parser.buffer[parser.buffer_pos+2])) - - /* If it is the leading octet, determine the length of the UTF-8 sequence. */ - - if w == 10 { - w = width(octet) - if w == 0 { - return yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "found an incorrect leading UTF-8 octet") - } - } else { - /* Check if the trailing octet is correct. */ - - if (octet & 0xC0) != 0x80 { - return yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "found an incorrect trailing UTF-8 octet") - } - } - - /* Copy the octet and move the pointers. */ - - *s = append(*s, octet) - skip(parser) - skip(parser) - skip(parser) - w-- - } - - return true -} - -/* - * Scan a block scalar. - */ - -func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, - literal bool) bool { - - /* Eat the indicator '|' or '>'. */ - - start_mark := parser.mark - - skip(parser) - - /* Scan the additional block scalar indicators. */ - - if !cache(parser, 1) { - return false - } - - /* Check for a chomping indicator. */ - chomping := 0 - increment := 0 - if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { - /* Set the chomping method and eat the indicator. */ - - if parser.buffer[parser.buffer_pos] == '+' { - chomping = +1 - } else { - chomping = -1 - } - - skip(parser) - - /* Check for an indentation indicator. */ - - if !cache(parser, 1) { - return false - } - - if is_digit(parser.buffer[parser.buffer_pos]) { - /* Check that the indentation is greater than 0. */ - - if parser.buffer[parser.buffer_pos] == '0' { - yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found an indentation indicator equal to 0") - return false - } - - /* Get the indentation level and eat the indicator. */ - - increment = as_digit(parser.buffer[parser.buffer_pos]) - - skip(parser) - } - } else if is_digit(parser.buffer[parser.buffer_pos]) { - - /* Do the same as above, but in the opposite order. */ - if parser.buffer[parser.buffer_pos] == '0' { - yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found an indentation indicator equal to 0") - return false - } - - increment = as_digit(parser.buffer[parser.buffer_pos]) - - skip(parser) - - if !cache(parser, 1) { - return false - } - - if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { - if parser.buffer[parser.buffer_pos] == '+' { - chomping = +1 - } else { - chomping = -1 - } - - skip(parser) - } - } - - /* Eat whitespaces and comments to the end of the line. */ - - if !cache(parser, 1) { - return false - } - - for is_blank(parser.buffer[parser.buffer_pos]) { - skip(parser) - if !cache(parser, 1) { - return false - } - } - - if parser.buffer[parser.buffer_pos] == '#' { - for !is_breakz_at(parser.buffer, parser.buffer_pos) { - skip(parser) - if !cache(parser, 1) { - return false - } - } - } - - /* Check if we are at the end of the line. */ - - if !is_breakz_at(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "did not find expected comment or line break") - return false - } - - /* Eat a line break. */ - - if is_break_at(parser.buffer, parser.buffer_pos) { - if !cache(parser, 2) { - return false - } - - skip_line(parser) - } - - end_mark := parser.mark - - /* Set the indentation level if it was specified. */ - indent := 0 - if increment > 0 { - if parser.indent >= 0 { - indent = parser.indent + increment - } else { - indent = increment - } - } - - /* Scan the leading line breaks and determine the indentation level if needed. */ - var trailing_breaks []byte - if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, - start_mark, &end_mark) { - return false - } - - /* Scan the block scalar content. */ - - if !cache(parser, 1) { - return false - } - - var s []byte - var leading_break []byte - leading_blank := false - trailing_blank := false - for parser.mark.column == indent && !is_z(parser.buffer[parser.buffer_pos]) { - - /* - * We are at the beginning of a non-empty line. - */ - - /* Is it a trailing whitespace? */ - - trailing_blank = is_blank(parser.buffer[parser.buffer_pos]) - - /* Check if we need to fold the leading line break. */ - - if !literal && len(leading_break) > 0 && leading_break[0] == '\n' && - !leading_blank && !trailing_blank { - /* Do we need to join the lines by space? */ - if len(trailing_breaks) == 0 { - s = append(s, ' ') - } - leading_break = leading_break[:0] - } else { - s = append(s, leading_break...) - leading_break = leading_break[:0] - } - - /* Append the remaining line breaks. */ - s = append(s, trailing_breaks...) - trailing_breaks = trailing_breaks[:0] - - /* Is it a leading whitespace? */ - - leading_blank = is_blank(parser.buffer[parser.buffer_pos]) - - /* Consume the current line. */ - - for !is_breakz_at(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if !cache(parser, 1) { - return false - } - } - - /* Consume the line break. */ - - if !cache(parser, 2) { - return false - } - - leading_break = read_line(parser, leading_break) - - /* Eat the following indentation spaces and line breaks. */ - - if !yaml_parser_scan_block_scalar_breaks(parser, - &indent, &trailing_breaks, start_mark, &end_mark) { - return false - } - } - - /* Chomp the tail. */ - - if chomping != -1 { - s = append(s, leading_break...) - } - if chomping == 1 { - s = append(s, trailing_breaks...) - } - - /* Create a token. */ - - *token = yaml_token_t{ - token_type: yaml_SCALAR_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: s, - style: yaml_LITERAL_SCALAR_STYLE, - } - if !literal { - token.style = yaml_FOLDED_SCALAR_STYLE - } - - return true -} - -/* - * Scan indentation spaces and line breaks for a block scalar. Determine the - * indentation level if needed. - */ - -func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, - indent *int, breaks *[]byte, - start_mark YAML_mark_t, end_mark *YAML_mark_t) bool { - - *end_mark = parser.mark - - /* Eat the indentation spaces and line breaks. */ - max_indent := 0 - for { - /* Eat the indentation spaces. */ - - if !cache(parser, 1) { - return false - } - - for (*indent == 0 || parser.mark.column < *indent) && - is_space(parser.buffer[parser.buffer_pos]) { - skip(parser) - if !cache(parser, 1) { - return false - } - } - if parser.mark.column > max_indent { - max_indent = parser.mark.column - } - - /* Check for a tab character messing the indentation. */ - - if (*indent == 0 || parser.mark.column < *indent) && - is_tab(parser.buffer[parser.buffer_pos]) { - return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found a tab character where an indentation space is expected") - } - - /* Have we found a non-empty line? */ - - if !is_break_at(parser.buffer, parser.buffer_pos) { - break - } - - /* Consume the line break. */ - - if !cache(parser, 2) { - return false - } - - *breaks = read_line(parser, *breaks) - *end_mark = parser.mark - } - - /* Determine the indentation level if needed. */ - - if *indent == 0 { - *indent = max_indent - if *indent < parser.indent+1 { - *indent = parser.indent + 1 - } - if *indent < 1 { - *indent = 1 - } - } - - return true -} - -/* - * Scan a quoted scalar. - */ - -func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, - single bool) bool { - - /* Eat the left quote. */ - - start_mark := parser.mark - - skip(parser) - - /* Consume the content of the quoted scalar. */ - var s []byte - var leading_break []byte - var trailing_breaks []byte - var whitespaces []byte - for { - /* Check that there are no document indicators at the beginning of the line. */ - - if !cache(parser, 4) { - return false - } - - if parser.mark.column == 0 && - ((parser.buffer[parser.buffer_pos] == '-' && - parser.buffer[parser.buffer_pos+1] == '-' && - parser.buffer[parser.buffer_pos+2] == '-') || - (parser.buffer[parser.buffer_pos] == '.' && - parser.buffer[parser.buffer_pos+1] == '.' && - parser.buffer[parser.buffer_pos+2] == '.')) && - is_blankz_at(parser.buffer, parser.buffer_pos+3) { - yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", - start_mark, "found unexpected document indicator") - return false - } - - /* Check for EOF. */ - - if is_z(parser.buffer[parser.buffer_pos]) { - yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", - start_mark, "found unexpected end of stream") - return false - } - - /* Consume non-blank characters. */ - - if !cache(parser, 2) { - return false - } - - leading_blanks := false - - for !is_blankz_at(parser.buffer, parser.buffer_pos) { - /* Check for an escaped single quote. */ - - if single && parser.buffer[parser.buffer_pos] == '\'' && - parser.buffer[parser.buffer_pos+1] == '\'' { - // Is is an escaped single quote. - s = append(s, '\'') - skip(parser) - skip(parser) - } else if single && parser.buffer[parser.buffer_pos] == '\'' { - /* Check for the right quote. */ - break - } else if !single && parser.buffer[parser.buffer_pos] == '"' { - /* Check for the right quote. */ - break - } else if !single && parser.buffer[parser.buffer_pos] == '\\' && - is_break_at(parser.buffer, parser.buffer_pos+1) { - - /* Check for an escaped line break. */ - if !cache(parser, 3) { - return false - } - - skip(parser) - skip_line(parser) - leading_blanks = true - break - } else if !single && parser.buffer[parser.buffer_pos] == '\\' { - - /* Check for an escape sequence. */ - - code_length := 0 - - /* Check the escape character. */ - - switch parser.buffer[parser.buffer_pos+1] { - case '0': - s = append(s, 0) - case 'a': - s = append(s, '\x07') - case 'b': - s = append(s, '\x08') - case 't', '\t': - s = append(s, '\x09') - case 'n': - s = append(s, '\x0A') - case 'v': - s = append(s, '\x0B') - case 'f': - s = append(s, '\x0C') - case 'r': - s = append(s, '\x0D') - case 'e': - s = append(s, '\x1B') - case ' ': - s = append(s, '\x20') - case '"': - s = append(s, '"') - case '/': - s = append(s, '/') - case '\\': - s = append(s, '\\') - case 'N': /* NEL (#x85) */ - s = append(s, '\xC2') - s = append(s, '\x85') - case '_': /* #xA0 */ - s = append(s, '\xC2') - s = append(s, '\xA0') - case 'L': /* LS (#x2028) */ - s = append(s, '\xE2') - s = append(s, '\x80') - s = append(s, '\xA8') - case 'P': /* PS (#x2029) */ - s = append(s, '\xE2') - s = append(s, '\x80') - s = append(s, '\xA9') - case 'x': - code_length = 2 - case 'u': - code_length = 4 - case 'U': - code_length = 8 - default: - yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", - start_mark, "found unknown escape character") - return false - } - - skip(parser) - skip(parser) - - /* Consume an arbitrary escape code. */ - - if code_length > 0 { - value := 0 - - /* Scan the character value. */ - - if !cache(parser, code_length) { - return false - } - - for k := 0; k < code_length; k++ { - if !is_hex(parser.buffer[parser.buffer_pos+k]) { - yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", - start_mark, "did not find expected hexdecimal number") - return false - } - value = (value << 4) + as_hex(parser.buffer[parser.buffer_pos+k]) - } - - /* Check the value and write the character. */ - - if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { - yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", - start_mark, "found invalid Unicode character escape code") - return false - } - - if value <= 0x7F { - s = append(s, byte(value)) - } else if value <= 0x7FF { - s = append(s, byte(0xC0+(value>>6))) - s = append(s, byte(0x80+(value&0x3F))) - } else if value <= 0xFFFF { - s = append(s, byte(0xE0+(value>>12))) - s = append(s, byte(0x80+((value>>6)&0x3F))) - s = append(s, byte(0x80+(value&0x3F))) - } else { - s = append(s, byte(0xF0+(value>>18))) - s = append(s, byte(0x80+((value>>12)&0x3F))) - s = append(s, byte(0x80+((value>>6)&0x3F))) - s = append(s, byte(0x80+(value&0x3F))) - } - - /* Advance the pointer. */ - - for k := 0; k < code_length; k++ { - skip(parser) - } - } - } else { - /* It is a non-escaped non-blank character. */ - - s = read(parser, s) - } - - if !cache(parser, 2) { - return false - } - } - - /* Check if we are at the end of the scalar. */ - b := parser.buffer[parser.buffer_pos] - if single { - if b == '\'' { - break - } - } else if b == '"' { - break - } - - /* Consume blank characters. */ - - if !cache(parser, 1) { - return false - } - - for is_blank(parser.buffer[parser.buffer_pos]) || is_break_at(parser.buffer, parser.buffer_pos) { - if is_blank(parser.buffer[parser.buffer_pos]) { - /* Consume a space or a tab character. */ - if !leading_blanks { - whitespaces = read(parser, whitespaces) - } else { - skip(parser) - } - } else { - if !cache(parser, 2) { - return false - } - - /* Check if it is a first line break. */ - if !leading_blanks { - whitespaces = whitespaces[:0] - leading_break = read_line(parser, leading_break) - leading_blanks = true - } else { - trailing_breaks = read_line(parser, trailing_breaks) - } - } - - if !cache(parser, 1) { - return false - } - } - - /* Join the whitespaces or fold line breaks. */ - - if leading_blanks { - /* Do we need to fold line breaks? */ - - if len(leading_break) > 0 && leading_break[0] == '\n' { - if len(trailing_breaks) == 0 { - s = append(s, ' ') - } else { - s = append(s, trailing_breaks...) - trailing_breaks = trailing_breaks[:0] - } - - leading_break = leading_break[:0] - } else { - s = append(s, leading_break...) - s = append(s, trailing_breaks...) - leading_break = leading_break[:0] - trailing_breaks = trailing_breaks[:0] - } - } else { - s = append(s, whitespaces...) - whitespaces = whitespaces[:0] - } - } - - /* Eat the right quote. */ - - skip(parser) - - end_mark := parser.mark - - /* Create a token. */ - - *token = yaml_token_t{ - token_type: yaml_SCALAR_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: s, - style: yaml_SINGLE_QUOTED_SCALAR_STYLE, - } - if !single { - token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - - return true -} - -/* - * Scan a plain scalar. - */ - -func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { - var s []byte - var leading_break []byte - var trailing_breaks []byte - var whitespaces []byte - leading_blanks := false - indent := parser.indent + 1 - - start_mark := parser.mark - end_mark := parser.mark - - /* Consume the content of the plain scalar. */ - - for { - /* Check for a document indicator. */ - - if !cache(parser, 4) { - return false - } - - if parser.mark.column == 0 && - ((parser.buffer[parser.buffer_pos] == '-' && - parser.buffer[parser.buffer_pos+1] == '-' && - parser.buffer[parser.buffer_pos+2] == '-') || - (parser.buffer[parser.buffer_pos] == '.' && - parser.buffer[parser.buffer_pos+1] == '.' && - parser.buffer[parser.buffer_pos+2] == '.')) && - is_blankz_at(parser.buffer, parser.buffer_pos+3) { - break - } - - /* Check for a comment. */ - - if parser.buffer[parser.buffer_pos] == '#' { - break - } - - /* Consume non-blank characters. */ - - for !is_blankz_at(parser.buffer, parser.buffer_pos) { - /* Check for 'x:x' in the flow context. TODO: Fix the test "spec-08-13". */ - - if parser.flow_level > 0 && - parser.buffer[parser.buffer_pos] == ':' && - !is_blankz_at(parser.buffer, parser.buffer_pos+1) { - yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", - start_mark, "found unexpected ':'") - return false - } - - /* Check for indicators that may end a plain scalar. */ - b := parser.buffer[parser.buffer_pos] - if (b == ':' && is_blankz_at(parser.buffer, parser.buffer_pos+1)) || - (parser.flow_level > 0 && - (b == ',' || b == ':' || - b == '?' || b == '[' || - b == ']' || b == '{' || - b == '}')) { - break - } - - /* Check if we need to join whitespaces and breaks. */ - - if leading_blanks || len(whitespaces) > 0 { - if leading_blanks { - /* Do we need to fold line breaks? */ - - if leading_break[0] == '\n' { - if len(trailing_breaks) == 0 { - s = append(s, ' ') - } else { - s = append(s, trailing_breaks...) - trailing_breaks = trailing_breaks[:0] - } - leading_break = leading_break[:0] - } else { - s = append(s, leading_break...) - s = append(s, trailing_breaks...) - leading_break = leading_break[:0] - trailing_breaks = trailing_breaks[:0] - } - - leading_blanks = false - } else { - s = append(s, whitespaces...) - whitespaces = whitespaces[:0] - } - } - - /* Copy the character. */ - - s = read(parser, s) - end_mark = parser.mark - - if !cache(parser, 2) { - return false - } - } - - /* Is it the end? */ - - if !(is_blank(parser.buffer[parser.buffer_pos]) || - is_break_at(parser.buffer, parser.buffer_pos)) { - break - } - - /* Consume blank characters. */ - - if !cache(parser, 1) { - return false - } - - for is_blank(parser.buffer[parser.buffer_pos]) || - is_break_at(parser.buffer, parser.buffer_pos) { - - if is_blank(parser.buffer[parser.buffer_pos]) { - /* Check for tab character that abuse indentation. */ - - if leading_blanks && parser.mark.column < indent && - is_tab(parser.buffer[parser.buffer_pos]) { - yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", - start_mark, "found a tab character that violate indentation") - return false - } - - /* Consume a space or a tab character. */ - - if !leading_blanks { - whitespaces = read(parser, whitespaces) - } else { - skip(parser) - } - } else { - if !cache(parser, 2) { - return false - } - - /* Check if it is a first line break. */ - - if !leading_blanks { - whitespaces = whitespaces[:0] - leading_break = read_line(parser, leading_break) - leading_blanks = true - } else { - trailing_breaks = read_line(parser, trailing_breaks) - } - } - if !cache(parser, 1) { - return false - } - } - - /* Check indentation level. */ - - if parser.flow_level == 0 && parser.mark.column < indent { - break - } - } - - /* Create a token. */ - - *token = yaml_token_t{ - token_type: yaml_SCALAR_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: s, - style: yaml_PLAIN_SCALAR_STYLE, - } - - /* Note that we change the 'simple_key_allowed' flag. */ - - if leading_blanks { - parser.simple_key_allowed = true - } - - return true -} diff --git a/vendor/github.com/cloudfoundry-incubator/candiedyaml/tags.go b/vendor/github.com/cloudfoundry-incubator/candiedyaml/tags.go deleted file mode 100644 index f153aee4..00000000 --- a/vendor/github.com/cloudfoundry-incubator/candiedyaml/tags.go +++ /dev/null @@ -1,360 +0,0 @@ -/* -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package candiedyaml - -import ( - "reflect" - "sort" - "strings" - "sync" - "unicode" -) - -// A field represents a single field found in a struct. -type field struct { - name string - tag bool - index []int - typ reflect.Type - omitEmpty bool - flow bool -} - -// byName sorts field by name, breaking ties with depth, -// then breaking ties with "name came from json tag", then -// breaking ties with index sequence. -type byName []field - -func (x byName) Len() int { return len(x) } - -func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - -func (x byName) Less(i, j int) bool { - if x[i].name != x[j].name { - return x[i].name < x[j].name - } - if len(x[i].index) != len(x[j].index) { - return len(x[i].index) < len(x[j].index) - } - if x[i].tag != x[j].tag { - return x[i].tag - } - return byIndex(x).Less(i, j) -} - -// byIndex sorts field by index sequence. -type byIndex []field - -func (x byIndex) Len() int { return len(x) } - -func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - -func (x byIndex) Less(i, j int) bool { - for k, xik := range x[i].index { - if k >= len(x[j].index) { - return false - } - if xik != x[j].index[k] { - return xik < x[j].index[k] - } - } - return len(x[i].index) < len(x[j].index) -} - -// typeFields returns a list of fields that JSON should recognize for the given type. -// The algorithm is breadth-first search over the set of structs to include - the top struct -// and then any reachable anonymous structs. -func typeFields(t reflect.Type) []field { - // Anonymous fields to explore at the current level and the next. - current := []field{} - next := []field{{typ: t}} - - // Count of queued names for current level and the next. - count := map[reflect.Type]int{} - nextCount := map[reflect.Type]int{} - - // Types already visited at an earlier level. - visited := map[reflect.Type]bool{} - - // Fields found. - var fields []field - - for len(next) > 0 { - current, next = next, current[:0] - count, nextCount = nextCount, map[reflect.Type]int{} - - for _, f := range current { - if visited[f.typ] { - continue - } - visited[f.typ] = true - - // Scan f.typ for fields to include. - for i := 0; i < f.typ.NumField(); i++ { - sf := f.typ.Field(i) - if sf.PkgPath != "" { // unexported - continue - } - tag := sf.Tag.Get("yaml") - if tag == "-" { - continue - } - name, opts := parseTag(tag) - if !isValidTag(name) { - name = "" - } - index := make([]int, len(f.index)+1) - copy(index, f.index) - index[len(f.index)] = i - - ft := sf.Type - if ft.Name() == "" && ft.Kind() == reflect.Ptr { - // Follow pointer. - ft = ft.Elem() - } - - // Record found field and index sequence. - if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { - tagged := name != "" - if name == "" { - name = sf.Name - } - fields = append(fields, field{name, tagged, index, ft, - opts.Contains("omitempty"), opts.Contains("flow")}) - if count[f.typ] > 1 { - // If there were multiple instances, add a second, - // so that the annihilation code will see a duplicate. - // It only cares about the distinction between 1 or 2, - // so don't bother generating any more copies. - fields = append(fields, fields[len(fields)-1]) - } - continue - } - - // Record new anonymous struct to explore in next round. - nextCount[ft]++ - if nextCount[ft] == 1 { - next = append(next, field{name: ft.Name(), index: index, typ: ft}) - } - } - } - } - - sort.Sort(byName(fields)) - - // Delete all fields that are hidden by the Go rules for embedded fields, - // except that fields with JSON tags are promoted. - - // The fields are sorted in primary order of name, secondary order - // of field index length. Loop over names; for each name, delete - // hidden fields by choosing the one dominant field that survives. - out := fields[:0] - for advance, i := 0, 0; i < len(fields); i += advance { - // One iteration per name. - // Find the sequence of fields with the name of this first field. - fi := fields[i] - name := fi.name - for advance = 1; i+advance < len(fields); advance++ { - fj := fields[i+advance] - if fj.name != name { - break - } - } - if advance == 1 { // Only one field with this name - out = append(out, fi) - continue - } - dominant, ok := dominantField(fields[i : i+advance]) - if ok { - out = append(out, dominant) - } - } - - fields = out - sort.Sort(byIndex(fields)) - - return fields -} - -// dominantField looks through the fields, all of which are known to -// have the same name, to find the single field that dominates the -// others using Go's embedding rules, modified by the presence of -// JSON tags. If there are multiple top-level fields, the boolean -// will be false: This condition is an error in Go and we skip all -// the fields. -func dominantField(fields []field) (field, bool) { - // The fields are sorted in increasing index-length order. The winner - // must therefore be one with the shortest index length. Drop all - // longer entries, which is easy: just truncate the slice. - length := len(fields[0].index) - tagged := -1 // Index of first tagged field. - for i, f := range fields { - if len(f.index) > length { - fields = fields[:i] - break - } - if f.tag { - if tagged >= 0 { - // Multiple tagged fields at the same level: conflict. - // Return no field. - return field{}, false - } - tagged = i - } - } - if tagged >= 0 { - return fields[tagged], true - } - // All remaining fields have the same length. If there's more than one, - // we have a conflict (two fields named "X" at the same level) and we - // return no field. - if len(fields) > 1 { - return field{}, false - } - return fields[0], true -} - -var fieldCache struct { - sync.RWMutex - m map[reflect.Type][]field -} - -// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. -func cachedTypeFields(t reflect.Type) []field { - fieldCache.RLock() - f := fieldCache.m[t] - fieldCache.RUnlock() - if f != nil { - return f - } - - // Compute fields without lock. - // Might duplicate effort but won't hold other computations back. - f = typeFields(t) - if f == nil { - f = []field{} - } - - fieldCache.Lock() - if fieldCache.m == nil { - fieldCache.m = map[reflect.Type][]field{} - } - fieldCache.m[t] = f - fieldCache.Unlock() - return f -} - -// tagOptions is the string following a comma in a struct field's "json" -// tag, or the empty string. It does not include the leading comma. -type tagOptions string - -func isValidTag(s string) bool { - if s == "" { - return false - } - for _, c := range s { - switch { - case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c): - // Backslash and quote chars are reserved, but - // otherwise any punctuation chars are allowed - // in a tag name. - default: - if !unicode.IsLetter(c) && !unicode.IsDigit(c) { - return false - } - } - } - return true -} - -func fieldByIndex(v reflect.Value, index []int) reflect.Value { - for _, i := range index { - if v.Kind() == reflect.Ptr { - if v.IsNil() { - return reflect.Value{} - } - v = v.Elem() - } - v = v.Field(i) - } - return v -} - -func typeByIndex(t reflect.Type, index []int) reflect.Type { - for _, i := range index { - if t.Kind() == reflect.Ptr { - t = t.Elem() - } - t = t.Field(i).Type - } - return t -} - -// stringValues is a slice of reflect.Value holding *reflect.StringValue. -// It implements the methods to sort by string. -type stringValues []reflect.Value - -func (sv stringValues) Len() int { return len(sv) } -func (sv stringValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } -func (sv stringValues) Less(i, j int) bool { - av, ak := getElem(sv[i]) - bv, bk := getElem(sv[j]) - if ak == reflect.String && bk == reflect.String { - return av.String() < bv.String() - } - - return ak < bk -} - -func getElem(v reflect.Value) (reflect.Value, reflect.Kind) { - k := v.Kind() - for k == reflect.Interface || k == reflect.Ptr && !v.IsNil() { - v = v.Elem() - k = v.Kind() - } - - return v, k -} - -// parseTag splits a struct field's json tag into its name and -// comma-separated options. -func parseTag(tag string) (string, tagOptions) { - if idx := strings.Index(tag, ","); idx != -1 { - return tag[:idx], tagOptions(tag[idx+1:]) - } - return tag, tagOptions("") -} - -// Contains reports whether a comma-separated list of options -// contains a particular substr flag. substr must be surrounded by a -// string boundary or commas. -func (o tagOptions) Contains(optionName string) bool { - if len(o) == 0 { - return false - } - s := string(o) - for s != "" { - var next string - i := strings.Index(s, ",") - if i >= 0 { - s, next = s[:i], s[i+1:] - } - if s == optionName { - return true - } - s = next - } - return false -} diff --git a/vendor/github.com/cloudfoundry-incubator/candiedyaml/writer.go b/vendor/github.com/cloudfoundry-incubator/candiedyaml/writer.go deleted file mode 100644 index a76b6336..00000000 --- a/vendor/github.com/cloudfoundry-incubator/candiedyaml/writer.go +++ /dev/null @@ -1,128 +0,0 @@ -/* -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package candiedyaml - -/* - * Set the writer error and return 0. - */ - -func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { - emitter.error = yaml_WRITER_ERROR - emitter.problem = problem - - return false -} - -/* - * Flush the output buffer. - */ - -func yaml_emitter_flush(emitter *yaml_emitter_t) bool { - if emitter.write_handler == nil { - panic("Write handler must be set") /* Write handler must be set. */ - } - if emitter.encoding == yaml_ANY_ENCODING { - panic("Encoding must be set") /* Output encoding must be set. */ - } - - /* Check if the buffer is empty. */ - - if emitter.buffer_pos == 0 { - return true - } - - /* If the output encoding is UTF-8, we don't need to recode the buffer. */ - - if emitter.encoding == yaml_UTF8_ENCODING { - if err := emitter.write_handler(emitter, - emitter.buffer[:emitter.buffer_pos]); err != nil { - return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) - } - emitter.buffer_pos = 0 - return true - } - - /* Recode the buffer into the raw buffer. */ - - var low, high int - if emitter.encoding == yaml_UTF16LE_ENCODING { - low, high = 0, 1 - } else { - high, low = 1, 0 - } - - pos := 0 - for pos < emitter.buffer_pos { - - /* - * See the "reader.c" code for more details on UTF-8 encoding. Note - * that we assume that the buffer contains a valid UTF-8 sequence. - */ - - /* Read the next UTF-8 character. */ - - octet := emitter.buffer[pos] - - var w int - var value rune - switch { - case octet&0x80 == 0x00: - w, value = 1, rune(octet&0x7F) - case octet&0xE0 == 0xC0: - w, value = 2, rune(octet&0x1F) - case octet&0xF0 == 0xE0: - w, value = 3, rune(octet&0x0F) - case octet&0xF8 == 0xF0: - w, value = 4, rune(octet&0x07) - } - - for k := 1; k < w; k++ { - octet = emitter.buffer[pos+k] - value = (value << 6) + (rune(octet) & 0x3F) - } - - pos += w - - /* Write the character. */ - - if value < 0x10000 { - var b [2]byte - b[high] = byte(value >> 8) - b[low] = byte(value & 0xFF) - emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1]) - } else { - /* Write the character using a surrogate pair (check "reader.c"). */ - - var b [4]byte - value -= 0x10000 - b[high] = byte(0xD8 + (value >> 18)) - b[low] = byte((value >> 10) & 0xFF) - b[high+2] = byte(0xDC + ((value >> 8) & 0xFF)) - b[low+2] = byte(value & 0xFF) - emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1], b[2], b[3]) - } - } - - /* Write the raw buffer. */ - - // Write the raw buffer. - if err := emitter.write_handler(emitter, emitter.raw_buffer); err != nil { - return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) - } - - emitter.buffer_pos = 0 - emitter.raw_buffer = emitter.raw_buffer[:0] - return true -} diff --git a/vendor/github.com/cloudfoundry-incubator/candiedyaml/yaml_definesh.go b/vendor/github.com/cloudfoundry-incubator/candiedyaml/yaml_definesh.go deleted file mode 100644 index de4c05ad..00000000 --- a/vendor/github.com/cloudfoundry-incubator/candiedyaml/yaml_definesh.go +++ /dev/null @@ -1,22 +0,0 @@ -/* -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package candiedyaml - -const ( - yaml_VERSION_MAJOR = 0 - yaml_VERSION_MINOR = 1 - yaml_VERSION_PATCH = 6 - yaml_VERSION_STRING = "0.1.6" -) diff --git a/vendor/github.com/cloudfoundry-incubator/candiedyaml/yaml_privateh.go b/vendor/github.com/cloudfoundry-incubator/candiedyaml/yaml_privateh.go deleted file mode 100644 index 2b3b7d74..00000000 --- a/vendor/github.com/cloudfoundry-incubator/candiedyaml/yaml_privateh.go +++ /dev/null @@ -1,891 +0,0 @@ -/* -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package candiedyaml - -const ( - INPUT_RAW_BUFFER_SIZE = 1024 - - /* - * The size of the input buffer. - * - * It should be possible to decode the whole raw buffer. - */ - INPUT_BUFFER_SIZE = (INPUT_RAW_BUFFER_SIZE * 3) - - /* - * The size of the output buffer. - */ - - OUTPUT_BUFFER_SIZE = 512 - - /* - * The size of the output raw buffer. - * - * It should be possible to encode the whole output buffer. - */ - - OUTPUT_RAW_BUFFER_SIZE = (OUTPUT_BUFFER_SIZE*2 + 2) - - INITIAL_STACK_SIZE = 16 - INITIAL_QUEUE_SIZE = 16 -) - -func width(b byte) int { - if b&0x80 == 0 { - return 1 - } - - if b&0xE0 == 0xC0 { - return 2 - } - - if b&0xF0 == 0xE0 { - return 3 - } - - if b&0xF8 == 0xF0 { - return 4 - } - - return 0 -} - -func copy_bytes(dest []byte, dest_pos *int, src []byte, src_pos *int) { - w := width(src[*src_pos]) - switch w { - case 4: - dest[*dest_pos+3] = src[*src_pos+3] - fallthrough - case 3: - dest[*dest_pos+2] = src[*src_pos+2] - fallthrough - case 2: - dest[*dest_pos+1] = src[*src_pos+1] - fallthrough - case 1: - dest[*dest_pos] = src[*src_pos] - default: - panic("invalid width") - } - *dest_pos += w - *src_pos += w -} - -// /* -// * Check if the character at the specified position is an alphabetical -// * character, a digit, '_', or '-'. -// */ - -func is_alpha(b byte) bool { - return (b >= '0' && b <= '9') || - (b >= 'A' && b <= 'Z') || - (b >= 'a' && b <= 'z') || - b == '_' || b == '-' -} - -// /* -// * Check if the character at the specified position is a digit. -// */ -// -func is_digit(b byte) bool { - return b >= '0' && b <= '9' -} - -// /* -// * Get the value of a digit. -// */ -// -func as_digit(b byte) int { - return int(b) - '0' -} - -// /* -// * Check if the character at the specified position is a hex-digit. -// */ -// -func is_hex(b byte) bool { - return (b >= '0' && b <= '9') || - (b >= 'A' && b <= 'F') || - (b >= 'a' && b <= 'f') -} - -// -// /* -// * Get the value of a hex-digit. -// */ -// -func as_hex(b byte) int { - if b >= 'A' && b <= 'F' { - return int(b) - 'A' + 10 - } else if b >= 'a' && b <= 'f' { - return int(b) - 'a' + 10 - } - return int(b) - '0' -} - -// #define AS_HEX_AT(string,offset) \ -// (((string).pointer[offset] >= (yaml_char_t) 'A' && \ -// (string).pointer[offset] <= (yaml_char_t) 'F') ? \ -// ((string).pointer[offset] - (yaml_char_t) 'A' + 10) : \ -// ((string).pointer[offset] >= (yaml_char_t) 'a' && \ -// (string).pointer[offset] <= (yaml_char_t) 'f') ? \ -// ((string).pointer[offset] - (yaml_char_t) 'a' + 10) : \ -// ((string).pointer[offset] - (yaml_char_t) '0')) - -// /* -// * Check if the character is a line break, space, tab, or NUL. -// */ -func is_blankz_at(b []byte, i int) bool { - return is_blank(b[i]) || is_breakz_at(b, i) -} - -// /* -// * Check if the character at the specified position is a line break. -// */ -func is_break_at(b []byte, i int) bool { - return b[i] == '\r' || /* CR (#xD)*/ - b[i] == '\n' || /* LF (#xA) */ - (b[i] == 0xC2 && b[i+1] == 0x85) || /* NEL (#x85) */ - (b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8) || /* LS (#x2028) */ - (b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) /* PS (#x2029) */ -} - -func is_breakz_at(b []byte, i int) bool { - return is_break_at(b, i) || is_z(b[i]) -} - -func is_crlf_at(b []byte, i int) bool { - return b[i] == '\r' && b[i+1] == '\n' -} - -// /* -// * Check if the character at the specified position is NUL. -// */ -func is_z(b byte) bool { - return b == 0x0 -} - -// /* -// * Check if the character at the specified position is space. -// */ -func is_space(b byte) bool { - return b == ' ' -} - -// -// /* -// * Check if the character at the specified position is tab. -// */ -func is_tab(b byte) bool { - return b == '\t' -} - -// /* -// * Check if the character at the specified position is blank (space or tab). -// */ -func is_blank(b byte) bool { - return is_space(b) || is_tab(b) -} - -// /* -// * Check if the character is ASCII. -// */ -func is_ascii(b byte) bool { - return b <= '\x7f' -} - -// /* -// * Check if the character can be printed unescaped. -// */ -func is_printable_at(b []byte, i int) bool { - return ((b[i] == 0x0A) || /* . == #x0A */ - (b[i] >= 0x20 && b[i] <= 0x7E) || /* #x20 <= . <= #x7E */ - (b[i] == 0xC2 && b[i+1] >= 0xA0) || /* #0xA0 <= . <= #xD7FF */ - (b[i] > 0xC2 && b[i] < 0xED) || - (b[i] == 0xED && b[i+1] < 0xA0) || - (b[i] == 0xEE) || - (b[i] == 0xEF && /* && . != #xFEFF */ - !(b[i+1] == 0xBB && b[i+2] == 0xBF) && - !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) -} - -func insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { - // collapse the slice - if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { - if parser.tokens_head != len(parser.tokens) { - // move the tokens down - copy(parser.tokens, parser.tokens[parser.tokens_head:]) - } - // readjust the length - parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] - parser.tokens_head = 0 - } - - parser.tokens = append(parser.tokens, *token) - if pos < 0 { - return - } - copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) - parser.tokens[parser.tokens_head+pos] = *token -} - -// /* -// * Check if the character at the specified position is BOM. -// */ -// -func is_bom_at(b []byte, i int) bool { - return b[i] == 0xEF && b[i+1] == 0xBB && b[i+2] == 0xBF -} - -// -// #ifdef HAVE_CONFIG_H -// #include -// #endif -// -// #include "./yaml.h" -// -// #include -// #include -// -// /* -// * Memory management. -// */ -// -// yaml_DECLARE(void *) -// yaml_malloc(size_t size); -// -// yaml_DECLARE(void *) -// yaml_realloc(void *ptr, size_t size); -// -// yaml_DECLARE(void) -// yaml_free(void *ptr); -// -// yaml_DECLARE(yaml_char_t *) -// yaml_strdup(const yaml_char_t *); -// -// /* -// * Reader: Ensure that the buffer contains at least `length` characters. -// */ -// -// yaml_DECLARE(int) -// yaml_parser_update_buffer(yaml_parser_t *parser, size_t length); -// -// /* -// * Scanner: Ensure that the token stack contains at least one token ready. -// */ -// -// yaml_DECLARE(int) -// yaml_parser_fetch_more_tokens(yaml_parser_t *parser); -// -// /* -// * The size of the input raw buffer. -// */ -// -// #define INPUT_RAW_BUFFER_SIZE 16384 -// -// /* -// * The size of the input buffer. -// * -// * It should be possible to decode the whole raw buffer. -// */ -// -// #define INPUT_BUFFER_SIZE (INPUT_RAW_BUFFER_SIZE*3) -// -// /* -// * The size of the output buffer. -// */ -// -// #define OUTPUT_BUFFER_SIZE 16384 -// -// /* -// * The size of the output raw buffer. -// * -// * It should be possible to encode the whole output buffer. -// */ -// -// #define OUTPUT_RAW_BUFFER_SIZE (OUTPUT_BUFFER_SIZE*2+2) -// -// /* -// * The size of other stacks and queues. -// */ -// -// #define INITIAL_STACK_SIZE 16 -// #define INITIAL_QUEUE_SIZE 16 -// #define INITIAL_STRING_SIZE 16 -// -// /* -// * Buffer management. -// */ -// -// #define BUFFER_INIT(context,buffer,size) \ -// (((buffer).start = yaml_malloc(size)) ? \ -// ((buffer).last = (buffer).pointer = (buffer).start, \ -// (buffer).end = (buffer).start+(size), \ -// 1) : \ -// ((context)->error = yaml_MEMORY_ERROR, \ -// 0)) -// -// #define BUFFER_DEL(context,buffer) \ -// (yaml_free((buffer).start), \ -// (buffer).start = (buffer).pointer = (buffer).end = 0) -// -// /* -// * String management. -// */ -// -// typedef struct { -// yaml_char_t *start; -// yaml_char_t *end; -// yaml_char_t *pointer; -// } yaml_string_t; -// -// yaml_DECLARE(int) -// yaml_string_extend(yaml_char_t **start, -// yaml_char_t **pointer, yaml_char_t **end); -// -// yaml_DECLARE(int) -// yaml_string_join( -// yaml_char_t **a_start, yaml_char_t **a_pointer, yaml_char_t **a_end, -// yaml_char_t **b_start, yaml_char_t **b_pointer, yaml_char_t **b_end); -// -// #define NULL_STRING { NULL, NULL, NULL } -// -// #define STRING(string,length) { (string), (string)+(length), (string) } -// -// #define STRING_ASSIGN(value,string,length) \ -// ((value).start = (string), \ -// (value).end = (string)+(length), \ -// (value).pointer = (string)) -// -// #define STRING_INIT(context,string,size) \ -// (((string).start = yaml_malloc(size)) ? \ -// ((string).pointer = (string).start, \ -// (string).end = (string).start+(size), \ -// memset((string).start, 0, (size)), \ -// 1) : \ -// ((context)->error = yaml_MEMORY_ERROR, \ -// 0)) -// -// #define STRING_DEL(context,string) \ -// (yaml_free((string).start), \ -// (string).start = (string).pointer = (string).end = 0) -// -// #define STRING_EXTEND(context,string) \ -// (((string).pointer+5 < (string).end) \ -// || yaml_string_extend(&(string).start, \ -// &(string).pointer, &(string).end)) -// -// #define CLEAR(context,string) \ -// ((string).pointer = (string).start, \ -// memset((string).start, 0, (string).end-(string).start)) -// -// #define JOIN(context,string_a,string_b) \ -// ((yaml_string_join(&(string_a).start, &(string_a).pointer, \ -// &(string_a).end, &(string_b).start, \ -// &(string_b).pointer, &(string_b).end)) ? \ -// ((string_b).pointer = (string_b).start, \ -// 1) : \ -// ((context)->error = yaml_MEMORY_ERROR, \ -// 0)) -// -// /* -// * String check operations. -// */ -// -// /* -// * Check the octet at the specified position. -// */ -// -// #define CHECK_AT(string,octet,offset) \ -// ((string).pointer[offset] == (yaml_char_t)(octet)) -// -// /* -// * Check the current octet in the buffer. -// */ -// -// #define CHECK(string,octet) CHECK_AT((string),(octet),0) -// -// /* -// * Check if the character at the specified position is an alphabetical -// * character, a digit, '_', or '-'. -// */ -// -// #define IS_ALPHA_AT(string,offset) \ -// (((string).pointer[offset] >= (yaml_char_t) '0' && \ -// (string).pointer[offset] <= (yaml_char_t) '9') || \ -// ((string).pointer[offset] >= (yaml_char_t) 'A' && \ -// (string).pointer[offset] <= (yaml_char_t) 'Z') || \ -// ((string).pointer[offset] >= (yaml_char_t) 'a' && \ -// (string).pointer[offset] <= (yaml_char_t) 'z') || \ -// (string).pointer[offset] == '_' || \ -// (string).pointer[offset] == '-') -// -// #define IS_ALPHA(string) IS_ALPHA_AT((string),0) -// -// /* -// * Check if the character at the specified position is a digit. -// */ -// -// #define IS_DIGIT_AT(string,offset) \ -// (((string).pointer[offset] >= (yaml_char_t) '0' && \ -// (string).pointer[offset] <= (yaml_char_t) '9')) -// -// #define IS_DIGIT(string) IS_DIGIT_AT((string),0) -// -// /* -// * Get the value of a digit. -// */ -// -// #define AS_DIGIT_AT(string,offset) \ -// ((string).pointer[offset] - (yaml_char_t) '0') -// -// #define AS_DIGIT(string) AS_DIGIT_AT((string),0) -// -// /* -// * Check if the character at the specified position is a hex-digit. -// */ -// -// #define IS_HEX_AT(string,offset) \ -// (((string).pointer[offset] >= (yaml_char_t) '0' && \ -// (string).pointer[offset] <= (yaml_char_t) '9') || \ -// ((string).pointer[offset] >= (yaml_char_t) 'A' && \ -// (string).pointer[offset] <= (yaml_char_t) 'F') || \ -// ((string).pointer[offset] >= (yaml_char_t) 'a' && \ -// (string).pointer[offset] <= (yaml_char_t) 'f')) -// -// #define IS_HEX(string) IS_HEX_AT((string),0) -// -// /* -// * Get the value of a hex-digit. -// */ -// -// #define AS_HEX_AT(string,offset) \ -// (((string).pointer[offset] >= (yaml_char_t) 'A' && \ -// (string).pointer[offset] <= (yaml_char_t) 'F') ? \ -// ((string).pointer[offset] - (yaml_char_t) 'A' + 10) : \ -// ((string).pointer[offset] >= (yaml_char_t) 'a' && \ -// (string).pointer[offset] <= (yaml_char_t) 'f') ? \ -// ((string).pointer[offset] - (yaml_char_t) 'a' + 10) : \ -// ((string).pointer[offset] - (yaml_char_t) '0')) -// -// #define AS_HEX(string) AS_HEX_AT((string),0) -// -// /* -// * Check if the character is ASCII. -// */ -// -// #define IS_ASCII_AT(string,offset) \ -// ((string).pointer[offset] <= (yaml_char_t) '\x7F') -// -// #define IS_ASCII(string) IS_ASCII_AT((string),0) -// -// /* -// * Check if the character can be printed unescaped. -// */ -// -// #define IS_PRINTABLE_AT(string,offset) \ -// (((string).pointer[offset] == 0x0A) /* . == #x0A */ \ -// || ((string).pointer[offset] >= 0x20 /* #x20 <= . <= #x7E */ \ -// && (string).pointer[offset] <= 0x7E) \ -// || ((string).pointer[offset] == 0xC2 /* #0xA0 <= . <= #xD7FF */ \ -// && (string).pointer[offset+1] >= 0xA0) \ -// || ((string).pointer[offset] > 0xC2 \ -// && (string).pointer[offset] < 0xED) \ -// || ((string).pointer[offset] == 0xED \ -// && (string).pointer[offset+1] < 0xA0) \ -// || ((string).pointer[offset] == 0xEE) \ -// || ((string).pointer[offset] == 0xEF /* #xE000 <= . <= #xFFFD */ \ -// && !((string).pointer[offset+1] == 0xBB /* && . != #xFEFF */ \ -// && (string).pointer[offset+2] == 0xBF) \ -// && !((string).pointer[offset+1] == 0xBF \ -// && ((string).pointer[offset+2] == 0xBE \ -// || (string).pointer[offset+2] == 0xBF)))) -// -// #define IS_PRINTABLE(string) IS_PRINTABLE_AT((string),0) -// -// /* -// * Check if the character at the specified position is NUL. -// */ -// -// #define IS_Z_AT(string,offset) CHECK_AT((string),'\0',(offset)) -// -// #define IS_Z(string) IS_Z_AT((string),0) -// -// /* -// * Check if the character at the specified position is BOM. -// */ -// -// #define IS_BOM_AT(string,offset) \ -// (CHECK_AT((string),'\xEF',(offset)) \ -// && CHECK_AT((string),'\xBB',(offset)+1) \ -// && CHECK_AT((string),'\xBF',(offset)+2)) /* BOM (#xFEFF) */ -// -// #define IS_BOM(string) IS_BOM_AT(string,0) -// -// /* -// * Check if the character at the specified position is space. -// */ -// -// #define IS_SPACE_AT(string,offset) CHECK_AT((string),' ',(offset)) -// -// #define IS_SPACE(string) IS_SPACE_AT((string),0) -// -// /* -// * Check if the character at the specified position is tab. -// */ -// -// #define IS_TAB_AT(string,offset) CHECK_AT((string),'\t',(offset)) -// -// #define IS_TAB(string) IS_TAB_AT((string),0) -// -// /* -// * Check if the character at the specified position is blank (space or tab). -// */ -// -// #define IS_BLANK_AT(string,offset) \ -// (IS_SPACE_AT((string),(offset)) || IS_TAB_AT((string),(offset))) -// -// #define IS_BLANK(string) IS_BLANK_AT((string),0) -// -// /* -// * Check if the character at the specified position is a line break. -// */ -// -// #define IS_BREAK_AT(string,offset) \ -// (CHECK_AT((string),'\r',(offset)) /* CR (#xD)*/ \ -// || CHECK_AT((string),'\n',(offset)) /* LF (#xA) */ \ -// || (CHECK_AT((string),'\xC2',(offset)) \ -// && CHECK_AT((string),'\x85',(offset)+1)) /* NEL (#x85) */ \ -// || (CHECK_AT((string),'\xE2',(offset)) \ -// && CHECK_AT((string),'\x80',(offset)+1) \ -// && CHECK_AT((string),'\xA8',(offset)+2)) /* LS (#x2028) */ \ -// || (CHECK_AT((string),'\xE2',(offset)) \ -// && CHECK_AT((string),'\x80',(offset)+1) \ -// && CHECK_AT((string),'\xA9',(offset)+2))) /* PS (#x2029) */ -// -// #define IS_BREAK(string) IS_BREAK_AT((string),0) -// -// #define IS_CRLF_AT(string,offset) \ -// (CHECK_AT((string),'\r',(offset)) && CHECK_AT((string),'\n',(offset)+1)) -// -// #define IS_CRLF(string) IS_CRLF_AT((string),0) -// -// /* -// * Check if the character is a line break or NUL. -// */ -// -// #define IS_BREAKZ_AT(string,offset) \ -// (IS_BREAK_AT((string),(offset)) || IS_Z_AT((string),(offset))) -// -// #define IS_BREAKZ(string) IS_BREAKZ_AT((string),0) -// -// /* -// * Check if the character is a line break, space, or NUL. -// */ -// -// #define IS_SPACEZ_AT(string,offset) \ -// (IS_SPACE_AT((string),(offset)) || IS_BREAKZ_AT((string),(offset))) -// -// #define IS_SPACEZ(string) IS_SPACEZ_AT((string),0) -// -// /* -// * Check if the character is a line break, space, tab, or NUL. -// */ -// -// #define IS_BLANKZ_AT(string,offset) \ -// (IS_BLANK_AT((string),(offset)) || IS_BREAKZ_AT((string),(offset))) -// -// #define IS_BLANKZ(string) IS_BLANKZ_AT((string),0) -// -// /* -// * Determine the width of the character. -// */ -// -// #define WIDTH_AT(string,offset) \ -// (((string).pointer[offset] & 0x80) == 0x00 ? 1 : \ -// ((string).pointer[offset] & 0xE0) == 0xC0 ? 2 : \ -// ((string).pointer[offset] & 0xF0) == 0xE0 ? 3 : \ -// ((string).pointer[offset] & 0xF8) == 0xF0 ? 4 : 0) -// -// #define WIDTH(string) WIDTH_AT((string),0) -// -// /* -// * Move the string pointer to the next character. -// */ -// -// #define MOVE(string) ((string).pointer += WIDTH((string))) -// -// /* -// * Copy a character and move the pointers of both strings. -// */ -// -// #define COPY(string_a,string_b) \ -// ((*(string_b).pointer & 0x80) == 0x00 ? \ -// (*((string_a).pointer++) = *((string_b).pointer++)) : \ -// (*(string_b).pointer & 0xE0) == 0xC0 ? \ -// (*((string_a).pointer++) = *((string_b).pointer++), \ -// *((string_a).pointer++) = *((string_b).pointer++)) : \ -// (*(string_b).pointer & 0xF0) == 0xE0 ? \ -// (*((string_a).pointer++) = *((string_b).pointer++), \ -// *((string_a).pointer++) = *((string_b).pointer++), \ -// *((string_a).pointer++) = *((string_b).pointer++)) : \ -// (*(string_b).pointer & 0xF8) == 0xF0 ? \ -// (*((string_a).pointer++) = *((string_b).pointer++), \ -// *((string_a).pointer++) = *((string_b).pointer++), \ -// *((string_a).pointer++) = *((string_b).pointer++), \ -// *((string_a).pointer++) = *((string_b).pointer++)) : 0) -// -// /* -// * Stack and queue management. -// */ -// -// yaml_DECLARE(int) -// yaml_stack_extend(void **start, void **top, void **end); -// -// yaml_DECLARE(int) -// yaml_queue_extend(void **start, void **head, void **tail, void **end); -// -// #define STACK_INIT(context,stack,size) \ -// (((stack).start = yaml_malloc((size)*sizeof(*(stack).start))) ? \ -// ((stack).top = (stack).start, \ -// (stack).end = (stack).start+(size), \ -// 1) : \ -// ((context)->error = yaml_MEMORY_ERROR, \ -// 0)) -// -// #define STACK_DEL(context,stack) \ -// (yaml_free((stack).start), \ -// (stack).start = (stack).top = (stack).end = 0) -// -// #define STACK_EMPTY(context,stack) \ -// ((stack).start == (stack).top) -// -// #define PUSH(context,stack,value) \ -// (((stack).top != (stack).end \ -// || yaml_stack_extend((void **)&(stack).start, \ -// (void **)&(stack).top, (void **)&(stack).end)) ? \ -// (*((stack).top++) = value, \ -// 1) : \ -// ((context)->error = yaml_MEMORY_ERROR, \ -// 0)) -// -// #define POP(context,stack) \ -// (*(--(stack).top)) -// -// #define QUEUE_INIT(context,queue,size) \ -// (((queue).start = yaml_malloc((size)*sizeof(*(queue).start))) ? \ -// ((queue).head = (queue).tail = (queue).start, \ -// (queue).end = (queue).start+(size), \ -// 1) : \ -// ((context)->error = yaml_MEMORY_ERROR, \ -// 0)) -// -// #define QUEUE_DEL(context,queue) \ -// (yaml_free((queue).start), \ -// (queue).start = (queue).head = (queue).tail = (queue).end = 0) -// -// #define QUEUE_EMPTY(context,queue) \ -// ((queue).head == (queue).tail) -// -// #define ENQUEUE(context,queue,value) \ -// (((queue).tail != (queue).end \ -// || yaml_queue_extend((void **)&(queue).start, (void **)&(queue).head, \ -// (void **)&(queue).tail, (void **)&(queue).end)) ? \ -// (*((queue).tail++) = value, \ -// 1) : \ -// ((context)->error = yaml_MEMORY_ERROR, \ -// 0)) -// -// #define DEQUEUE(context,queue) \ -// (*((queue).head++)) -// -// #define QUEUE_INSERT(context,queue,index,value) \ -// (((queue).tail != (queue).end \ -// || yaml_queue_extend((void **)&(queue).start, (void **)&(queue).head, \ -// (void **)&(queue).tail, (void **)&(queue).end)) ? \ -// (memmove((queue).head+(index)+1,(queue).head+(index), \ -// ((queue).tail-(queue).head-(index))*sizeof(*(queue).start)), \ -// *((queue).head+(index)) = value, \ -// (queue).tail++, \ -// 1) : \ -// ((context)->error = yaml_MEMORY_ERROR, \ -// 0)) -// -// /* -// * Token initializers. -// */ -// -// #define TOKEN_INIT(token,token_type,token_start_mark,token_end_mark) \ -// (memset(&(token), 0, sizeof(yaml_token_t)), \ -// (token).type = (token_type), \ -// (token).start_mark = (token_start_mark), \ -// (token).end_mark = (token_end_mark)) -// -// #define STREAM_START_TOKEN_INIT(token,token_encoding,start_mark,end_mark) \ -// (TOKEN_INIT((token),yaml_STREAM_START_TOKEN,(start_mark),(end_mark)), \ -// (token).data.stream_start.encoding = (token_encoding)) -// -// #define STREAM_END_TOKEN_INIT(token,start_mark,end_mark) \ -// (TOKEN_INIT((token),yaml_STREAM_END_TOKEN,(start_mark),(end_mark))) -// -// #define ALIAS_TOKEN_INIT(token,token_value,start_mark,end_mark) \ -// (TOKEN_INIT((token),yaml_ALIAS_TOKEN,(start_mark),(end_mark)), \ -// (token).data.alias.value = (token_value)) -// -// #define ANCHOR_TOKEN_INIT(token,token_value,start_mark,end_mark) \ -// (TOKEN_INIT((token),yaml_ANCHOR_TOKEN,(start_mark),(end_mark)), \ -// (token).data.anchor.value = (token_value)) -// -// #define TAG_TOKEN_INIT(token,token_handle,token_suffix,start_mark,end_mark) \ -// (TOKEN_INIT((token),yaml_TAG_TOKEN,(start_mark),(end_mark)), \ -// (token).data.tag.handle = (token_handle), \ -// (token).data.tag.suffix = (token_suffix)) -// -// #define SCALAR_TOKEN_INIT(token,token_value,token_length,token_style,start_mark,end_mark) \ -// (TOKEN_INIT((token),yaml_SCALAR_TOKEN,(start_mark),(end_mark)), \ -// (token).data.scalar.value = (token_value), \ -// (token).data.scalar.length = (token_length), \ -// (token).data.scalar.style = (token_style)) -// -// #define VERSION_DIRECTIVE_TOKEN_INIT(token,token_major,token_minor,start_mark,end_mark) \ -// (TOKEN_INIT((token),yaml_VERSION_DIRECTIVE_TOKEN,(start_mark),(end_mark)), \ -// (token).data.version_directive.major = (token_major), \ -// (token).data.version_directive.minor = (token_minor)) -// -// #define TAG_DIRECTIVE_TOKEN_INIT(token,token_handle,token_prefix,start_mark,end_mark) \ -// (TOKEN_INIT((token),yaml_TAG_DIRECTIVE_TOKEN,(start_mark),(end_mark)), \ -// (token).data.tag_directive.handle = (token_handle), \ -// (token).data.tag_directive.prefix = (token_prefix)) -// -// /* -// * Event initializers. -// */ -// -// #define EVENT_INIT(event,event_type,event_start_mark,event_end_mark) \ -// (memset(&(event), 0, sizeof(yaml_event_t)), \ -// (event).type = (event_type), \ -// (event).start_mark = (event_start_mark), \ -// (event).end_mark = (event_end_mark)) -// -// #define STREAM_START_EVENT_INIT(event,event_encoding,start_mark,end_mark) \ -// (EVENT_INIT((event),yaml_STREAM_START_EVENT,(start_mark),(end_mark)), \ -// (event).data.stream_start.encoding = (event_encoding)) -// -// #define STREAM_END_EVENT_INIT(event,start_mark,end_mark) \ -// (EVENT_INIT((event),yaml_STREAM_END_EVENT,(start_mark),(end_mark))) -// -// #define DOCUMENT_START_EVENT_INIT(event,event_version_directive, \ -// event_tag_directives_start,event_tag_directives_end,event_implicit,start_mark,end_mark) \ -// (EVENT_INIT((event),yaml_DOCUMENT_START_EVENT,(start_mark),(end_mark)), \ -// (event).data.document_start.version_directive = (event_version_directive), \ -// (event).data.document_start.tag_directives.start = (event_tag_directives_start), \ -// (event).data.document_start.tag_directives.end = (event_tag_directives_end), \ -// (event).data.document_start.implicit = (event_implicit)) -// -// #define DOCUMENT_END_EVENT_INIT(event,event_implicit,start_mark,end_mark) \ -// (EVENT_INIT((event),yaml_DOCUMENT_END_EVENT,(start_mark),(end_mark)), \ -// (event).data.document_end.implicit = (event_implicit)) -// -// #define ALIAS_EVENT_INIT(event,event_anchor,start_mark,end_mark) \ -// (EVENT_INIT((event),yaml_ALIAS_EVENT,(start_mark),(end_mark)), \ -// (event).data.alias.anchor = (event_anchor)) -// -// #define SCALAR_EVENT_INIT(event,event_anchor,event_tag,event_value,event_length, \ -// event_plain_implicit, event_quoted_implicit,event_style,start_mark,end_mark) \ -// (EVENT_INIT((event),yaml_SCALAR_EVENT,(start_mark),(end_mark)), \ -// (event).data.scalar.anchor = (event_anchor), \ -// (event).data.scalar.tag = (event_tag), \ -// (event).data.scalar.value = (event_value), \ -// (event).data.scalar.length = (event_length), \ -// (event).data.scalar.plain_implicit = (event_plain_implicit), \ -// (event).data.scalar.quoted_implicit = (event_quoted_implicit), \ -// (event).data.scalar.style = (event_style)) -// -// #define SEQUENCE_START_EVENT_INIT(event,event_anchor,event_tag, \ -// event_implicit,event_style,start_mark,end_mark) \ -// (EVENT_INIT((event),yaml_SEQUENCE_START_EVENT,(start_mark),(end_mark)), \ -// (event).data.sequence_start.anchor = (event_anchor), \ -// (event).data.sequence_start.tag = (event_tag), \ -// (event).data.sequence_start.implicit = (event_implicit), \ -// (event).data.sequence_start.style = (event_style)) -// -// #define SEQUENCE_END_EVENT_INIT(event,start_mark,end_mark) \ -// (EVENT_INIT((event),yaml_SEQUENCE_END_EVENT,(start_mark),(end_mark))) -// -// #define MAPPING_START_EVENT_INIT(event,event_anchor,event_tag, \ -// event_implicit,event_style,start_mark,end_mark) \ -// (EVENT_INIT((event),yaml_MAPPING_START_EVENT,(start_mark),(end_mark)), \ -// (event).data.mapping_start.anchor = (event_anchor), \ -// (event).data.mapping_start.tag = (event_tag), \ -// (event).data.mapping_start.implicit = (event_implicit), \ -// (event).data.mapping_start.style = (event_style)) -// -// #define MAPPING_END_EVENT_INIT(event,start_mark,end_mark) \ -// (EVENT_INIT((event),yaml_MAPPING_END_EVENT,(start_mark),(end_mark))) -// -// /* -// * Document initializer. -// */ -// -// #define DOCUMENT_INIT(document,document_nodes_start,document_nodes_end, \ -// document_version_directive,document_tag_directives_start, \ -// document_tag_directives_end,document_start_implicit, \ -// document_end_implicit,document_start_mark,document_end_mark) \ -// (memset(&(document), 0, sizeof(yaml_document_t)), \ -// (document).nodes.start = (document_nodes_start), \ -// (document).nodes.end = (document_nodes_end), \ -// (document).nodes.top = (document_nodes_start), \ -// (document).version_directive = (document_version_directive), \ -// (document).tag_directives.start = (document_tag_directives_start), \ -// (document).tag_directives.end = (document_tag_directives_end), \ -// (document).start_implicit = (document_start_implicit), \ -// (document).end_implicit = (document_end_implicit), \ -// (document).start_mark = (document_start_mark), \ -// (document).end_mark = (document_end_mark)) -// -// /* -// * Node initializers. -// */ -// -// #define NODE_INIT(node,node_type,node_tag,node_start_mark,node_end_mark) \ -// (memset(&(node), 0, sizeof(yaml_node_t)), \ -// (node).type = (node_type), \ -// (node).tag = (node_tag), \ -// (node).start_mark = (node_start_mark), \ -// (node).end_mark = (node_end_mark)) -// -// #define SCALAR_NODE_INIT(node,node_tag,node_value,node_length, \ -// node_style,start_mark,end_mark) \ -// (NODE_INIT((node),yaml_SCALAR_NODE,(node_tag),(start_mark),(end_mark)), \ -// (node).data.scalar.value = (node_value), \ -// (node).data.scalar.length = (node_length), \ -// (node).data.scalar.style = (node_style)) -// -// #define SEQUENCE_NODE_INIT(node,node_tag,node_items_start,node_items_end, \ -// node_style,start_mark,end_mark) \ -// (NODE_INIT((node),yaml_SEQUENCE_NODE,(node_tag),(start_mark),(end_mark)), \ -// (node).data.sequence.items.start = (node_items_start), \ -// (node).data.sequence.items.end = (node_items_end), \ -// (node).data.sequence.items.top = (node_items_start), \ -// (node).data.sequence.style = (node_style)) -// -// #define MAPPING_NODE_INIT(node,node_tag,node_pairs_start,node_pairs_end, \ -// node_style,start_mark,end_mark) \ -// (NODE_INIT((node),yaml_MAPPING_NODE,(node_tag),(start_mark),(end_mark)), \ -// (node).data.mapping.pairs.start = (node_pairs_start), \ -// (node).data.mapping.pairs.end = (node_pairs_end), \ -// (node).data.mapping.pairs.top = (node_pairs_start), \ -// (node).data.mapping.style = (node_style)) -// diff --git a/vendor/github.com/cloudfoundry-incubator/candiedyaml/yamlh.go b/vendor/github.com/cloudfoundry-incubator/candiedyaml/yamlh.go deleted file mode 100644 index d608dbb3..00000000 --- a/vendor/github.com/cloudfoundry-incubator/candiedyaml/yamlh.go +++ /dev/null @@ -1,953 +0,0 @@ -/* -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package candiedyaml - -import ( - "fmt" - "io" -) - -/** The version directive data. */ -type yaml_version_directive_t struct { - major int // The major version number - minor int // The minor version number -} - -/** The tag directive data. */ -type yaml_tag_directive_t struct { - handle []byte // The tag handle - prefix []byte // The tag prefix -} - -/** The stream encoding. */ -type yaml_encoding_t int - -const ( - /** Let the parser choose the encoding. */ - yaml_ANY_ENCODING yaml_encoding_t = iota - /** The defau lt UTF-8 encoding. */ - yaml_UTF8_ENCODING - /** The UTF-16-LE encoding with BOM. */ - yaml_UTF16LE_ENCODING - /** The UTF-16-BE encoding with BOM. */ - yaml_UTF16BE_ENCODING -) - -/** Line break types. */ -type yaml_break_t int - -const ( - yaml_ANY_BREAK yaml_break_t = iota /** Let the parser choose the break type. */ - yaml_CR_BREAK /** Use CR for line breaks (Mac style). */ - yaml_LN_BREAK /** Use LN for line breaks (Unix style). */ - yaml_CRLN_BREAK /** Use CR LN for line breaks (DOS style). */ -) - -/** Many bad things could happen with the parser and emitter. */ -type YAML_error_type_t int - -const ( - /** No error is produced. */ - yaml_NO_ERROR YAML_error_type_t = iota - - /** Cannot allocate or reallocate a block of memory. */ - yaml_MEMORY_ERROR - - /** Cannot read or decode the input stream. */ - yaml_READER_ERROR - /** Cannot scan the input stream. */ - yaml_SCANNER_ERROR - /** Cannot parse the input stream. */ - yaml_PARSER_ERROR - /** Cannot compose a YAML document. */ - yaml_COMPOSER_ERROR - - /** Cannot write to the output stream. */ - yaml_WRITER_ERROR - /** Cannot emit a YAML stream. */ - yaml_EMITTER_ERROR -) - -/** The pointer position. */ -type YAML_mark_t struct { - /** The position index. */ - index int - - /** The position line. */ - line int - - /** The position column. */ - column int -} - -func (m YAML_mark_t) String() string { - return fmt.Sprintf("line %d, column %d", m.line, m.column) -} - -/** @} */ - -/** - * @defgroup styles Node Styles - * @{ - */ - -type yaml_style_t int - -/** Scalar styles. */ -type yaml_scalar_style_t yaml_style_t - -const ( - /** Let the emitter choose the style. */ - yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota - - /** The plain scalar style. */ - yaml_PLAIN_SCALAR_STYLE - - /** The single-quoted scalar style. */ - yaml_SINGLE_QUOTED_SCALAR_STYLE - /** The double-quoted scalar style. */ - yaml_DOUBLE_QUOTED_SCALAR_STYLE - - /** The literal scalar style. */ - yaml_LITERAL_SCALAR_STYLE - /** The folded scalar style. */ - yaml_FOLDED_SCALAR_STYLE -) - -/** Sequence styles. */ -type yaml_sequence_style_t yaml_style_t - -const ( - /** Let the emitter choose the style. */ - yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota - - /** The block sequence style. */ - yaml_BLOCK_SEQUENCE_STYLE - /** The flow sequence style. */ - yaml_FLOW_SEQUENCE_STYLE -) - -/** Mapping styles. */ -type yaml_mapping_style_t yaml_style_t - -const ( - /** Let the emitter choose the style. */ - yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota - - /** The block mapping style. */ - yaml_BLOCK_MAPPING_STYLE - /** The flow mapping style. */ - yaml_FLOW_MAPPING_STYLE - -/* yaml_FLOW_SET_MAPPING_STYLE */ -) - -/** @} */ - -/** - * @defgroup tokens Tokens - * @{ - */ - -/** Token types. */ -type yaml_token_type_t int - -const ( - /** An empty token. */ - yaml_NO_TOKEN yaml_token_type_t = iota - - /** A STREAM-START token. */ - yaml_STREAM_START_TOKEN - /** A STREAM-END token. */ - yaml_STREAM_END_TOKEN - - /** A VERSION-DIRECTIVE token. */ - yaml_VERSION_DIRECTIVE_TOKEN - /** A TAG-DIRECTIVE token. */ - yaml_TAG_DIRECTIVE_TOKEN - /** A DOCUMENT-START token. */ - yaml_DOCUMENT_START_TOKEN - /** A DOCUMENT-END token. */ - yaml_DOCUMENT_END_TOKEN - - /** A BLOCK-SEQUENCE-START token. */ - yaml_BLOCK_SEQUENCE_START_TOKEN - /** A BLOCK-SEQUENCE-END token. */ - yaml_BLOCK_MAPPING_START_TOKEN - /** A BLOCK-END token. */ - yaml_BLOCK_END_TOKEN - - /** A FLOW-SEQUENCE-START token. */ - yaml_FLOW_SEQUENCE_START_TOKEN - /** A FLOW-SEQUENCE-END token. */ - yaml_FLOW_SEQUENCE_END_TOKEN - /** A FLOW-MAPPING-START token. */ - yaml_FLOW_MAPPING_START_TOKEN - /** A FLOW-MAPPING-END token. */ - yaml_FLOW_MAPPING_END_TOKEN - - /** A BLOCK-ENTRY token. */ - yaml_BLOCK_ENTRY_TOKEN - /** A FLOW-ENTRY token. */ - yaml_FLOW_ENTRY_TOKEN - /** A KEY token. */ - yaml_KEY_TOKEN - /** A VALUE token. */ - yaml_VALUE_TOKEN - - /** An ALIAS token. */ - yaml_ALIAS_TOKEN - /** An ANCHOR token. */ - yaml_ANCHOR_TOKEN - /** A TAG token. */ - yaml_TAG_TOKEN - /** A SCALAR token. */ - yaml_SCALAR_TOKEN -) - -/** The token structure. */ -type yaml_token_t struct { - - /** The token type. */ - token_type yaml_token_type_t - - /** The token data. */ - /** The stream start (for @c yaml_STREAM_START_TOKEN). */ - encoding yaml_encoding_t - - /** The alias (for @c yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN,yaml_TAG_TOKEN ). */ - /** The anchor (for @c ). */ - /** The scalar value (for @c ). */ - value []byte - - /** The tag suffix. */ - suffix []byte - - /** The scalar value (for @c yaml_SCALAR_TOKEN). */ - /** The scalar style. */ - style yaml_scalar_style_t - - /** The version directive (for @c yaml_VERSION_DIRECTIVE_TOKEN). */ - version_directive yaml_version_directive_t - - /** The tag directive (for @c yaml_TAG_DIRECTIVE_TOKEN). */ - prefix []byte - - /** The beginning of the token. */ - start_mark YAML_mark_t - /** The end of the token. */ - end_mark YAML_mark_t - - major, minor int -} - -/** - * @defgroup events Events - * @{ - */ - -/** Event types. */ -type yaml_event_type_t int - -const ( - /** An empty event. */ - yaml_NO_EVENT yaml_event_type_t = iota - - /** A STREAM-START event. */ - yaml_STREAM_START_EVENT - /** A STREAM-END event. */ - yaml_STREAM_END_EVENT - - /** A DOCUMENT-START event. */ - yaml_DOCUMENT_START_EVENT - /** A DOCUMENT-END event. */ - yaml_DOCUMENT_END_EVENT - - /** An ALIAS event. */ - yaml_ALIAS_EVENT - /** A SCALAR event. */ - yaml_SCALAR_EVENT - - /** A SEQUENCE-START event. */ - yaml_SEQUENCE_START_EVENT - /** A SEQUENCE-END event. */ - yaml_SEQUENCE_END_EVENT - - /** A MAPPING-START event. */ - yaml_MAPPING_START_EVENT - /** A MAPPING-END event. */ - yaml_MAPPING_END_EVENT -) - -/** The event structure. */ -type yaml_event_t struct { - - /** The event type. */ - event_type yaml_event_type_t - - /** The stream parameters (for @c yaml_STREAM_START_EVENT). */ - encoding yaml_encoding_t - - /** The document parameters (for @c yaml_DOCUMENT_START_EVENT). */ - version_directive *yaml_version_directive_t - - /** The beginning and end of the tag directives list. */ - tag_directives []yaml_tag_directive_t - - /** The document parameters (for @c yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT,yaml_MAPPING_START_EVENT). */ - /** Is the document indicator implicit? */ - implicit bool - - /** The alias parameters (for @c yaml_ALIAS_EVENT,yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). */ - /** The anchor. */ - anchor []byte - - /** The scalar parameters (for @c yaml_SCALAR_EVENT,yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). */ - /** The tag. */ - tag []byte - /** The scalar value. */ - value []byte - - /** Is the tag optional for the plain style? */ - plain_implicit bool - /** Is the tag optional for any non-plain style? */ - quoted_implicit bool - - /** The sequence parameters (for @c yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). */ - /** The sequence style. */ - /** The scalar style. */ - style yaml_style_t - - /** The beginning of the event. */ - start_mark, end_mark YAML_mark_t -} - -/** - * @defgroup nodes Nodes - * @{ - */ - -const ( - /** The tag @c !!null with the only possible value: @c null. */ - yaml_NULL_TAG = "tag:yaml.org,2002:null" - /** The tag @c !!bool with the values: @c true and @c falce. */ - yaml_BOOL_TAG = "tag:yaml.org,2002:bool" - /** The tag @c !!str for string values. */ - yaml_STR_TAG = "tag:yaml.org,2002:str" - /** The tag @c !!int for integer values. */ - yaml_INT_TAG = "tag:yaml.org,2002:int" - /** The tag @c !!float for float values. */ - yaml_FLOAT_TAG = "tag:yaml.org,2002:float" - /** The tag @c !!timestamp for date and time values. */ - yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" - - /** The tag @c !!seq is used to denote sequences. */ - yaml_SEQ_TAG = "tag:yaml.org,2002:seq" - /** The tag @c !!map is used to denote mapping. */ - yaml_MAP_TAG = "tag:yaml.org,2002:map" - - /** The default scalar tag is @c !!str. */ - yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG - /** The default sequence tag is @c !!seq. */ - yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG - /** The default mapping tag is @c !!map. */ - yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG - - yaml_BINARY_TAG = "tag:yaml.org,2002:binary" -) - -/** Node types. */ -type yaml_node_type_t int - -const ( - /** An empty node. */ - yaml_NO_NODE yaml_node_type_t = iota - - /** A scalar node. */ - yaml_SCALAR_NODE - /** A sequence node. */ - yaml_SEQUENCE_NODE - /** A mapping node. */ - yaml_MAPPING_NODE -) - -/** An element of a sequence node. */ -type yaml_node_item_t int - -/** An element of a mapping node. */ -type yaml_node_pair_t struct { - /** The key of the element. */ - key int - /** The value of the element. */ - value int -} - -/** The node structure. */ -type yaml_node_t struct { - - /** The node type. */ - node_type yaml_node_type_t - - /** The node tag. */ - tag []byte - - /** The scalar parameters (for @c yaml_SCALAR_NODE). */ - scalar struct { - /** The scalar value. */ - value []byte - /** The scalar style. */ - style yaml_scalar_style_t - } - - /** The sequence parameters (for @c yaml_SEQUENCE_NODE). */ - sequence struct { - /** The stack of sequence items. */ - items []yaml_node_item_t - /** The sequence style. */ - style yaml_sequence_style_t - } - - /** The mapping parameters (for @c yaml_MAPPING_NODE). */ - mapping struct { - /** The stack of mapping pairs (key, value). */ - pairs []yaml_node_pair_t - /** The mapping style. */ - style yaml_mapping_style_t - } - - /** The beginning of the node. */ - start_mark YAML_mark_t - /** The end of the node. */ - end_mark YAML_mark_t -} - -/** The document structure. */ -type yaml_document_t struct { - - /** The document nodes. */ - nodes []yaml_node_t - - /** The version directive. */ - version_directive *yaml_version_directive_t - - /** The list of tag directives. */ - tags []yaml_tag_directive_t - - /** Is the document start indicator implicit? */ - start_implicit bool - /** Is the document end indicator implicit? */ - end_implicit bool - - /** The beginning of the document. */ - start_mark YAML_mark_t - /** The end of the document. */ - end_mark YAML_mark_t -} - -/** - * The prototype of a read handler. - * - * The read handler is called when the parser needs to read more bytes from the - * source. The handler should write not more than @a size bytes to the @a - * buffer. The number of written bytes should be set to the @a length variable. - * - * @param[in,out] data A pointer to an application data specified by - * yaml_parser_set_input(). - * @param[out] buffer The buffer to write the data from the source. - * @param[in] size The size of the buffer. - * @param[out] size_read The actual number of bytes read from the source. - * - * @returns On success, the handler should return @c 1. If the handler failed, - * the returned value should be @c 0. On EOF, the handler should set the - * @a size_read to @c 0 and return @c 1. - */ - -type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) - -/** - * This structure holds information about a potential simple key. - */ - -type yaml_simple_key_t struct { - /** Is a simple key possible? */ - possible bool - - /** Is a simple key required? */ - required bool - - /** The number of the token. */ - token_number int - - /** The position mark. */ - mark YAML_mark_t -} - -/** - * The states of the parser. - */ -type yaml_parser_state_t int - -const ( - /** Expect STREAM-START. */ - yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota - /** Expect the beginning of an implicit document. */ - yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE - /** Expect DOCUMENT-START. */ - yaml_PARSE_DOCUMENT_START_STATE - /** Expect the content of a document. */ - yaml_PARSE_DOCUMENT_CONTENT_STATE - /** Expect DOCUMENT-END. */ - yaml_PARSE_DOCUMENT_END_STATE - /** Expect a block node. */ - yaml_PARSE_BLOCK_NODE_STATE - /** Expect a block node or indentless sequence. */ - yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE - /** Expect a flow node. */ - yaml_PARSE_FLOW_NODE_STATE - /** Expect the first entry of a block sequence. */ - yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE - /** Expect an entry of a block sequence. */ - yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE - /** Expect an entry of an indentless sequence. */ - yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE - /** Expect the first key of a block mapping. */ - yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE - /** Expect a block mapping key. */ - yaml_PARSE_BLOCK_MAPPING_KEY_STATE - /** Expect a block mapping value. */ - yaml_PARSE_BLOCK_MAPPING_VALUE_STATE - /** Expect the first entry of a flow sequence. */ - yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE - /** Expect an entry of a flow sequence. */ - yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE - /** Expect a key of an ordered mapping. */ - yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE - /** Expect a value of an ordered mapping. */ - yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE - /** Expect the and of an ordered mapping entry. */ - yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE - /** Expect the first key of a flow mapping. */ - yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE - /** Expect a key of a flow mapping. */ - yaml_PARSE_FLOW_MAPPING_KEY_STATE - /** Expect a value of a flow mapping. */ - yaml_PARSE_FLOW_MAPPING_VALUE_STATE - /** Expect an empty value of a flow mapping. */ - yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE - /** Expect nothing. */ - yaml_PARSE_END_STATE -) - -/** - * This structure holds aliases data. - */ - -type yaml_alias_data_t struct { - /** The anchor. */ - anchor []byte - /** The node id. */ - index int - /** The anchor mark. */ - mark YAML_mark_t -} - -/** - * The parser structure. - * - * All members are internal. Manage the structure using the @c yaml_parser_ - * family of functions. - */ - -type yaml_parser_t struct { - - /** - * @name Error handling - * @{ - */ - - /** Error type. */ - error YAML_error_type_t - /** Error description. */ - problem string - /** The byte about which the problem occured. */ - problem_offset int - /** The problematic value (@c -1 is none). */ - problem_value int - /** The problem position. */ - problem_mark YAML_mark_t - /** The error context. */ - context string - /** The context position. */ - context_mark YAML_mark_t - - /** - * @} - */ - - /** - * @name Reader stuff - * @{ - */ - - /** Read handler. */ - read_handler yaml_read_handler_t - - /** Reader input data. */ - input_reader io.Reader - input []byte - input_pos int - - /** EOF flag */ - eof bool - - /** The working buffer. */ - buffer []byte - buffer_pos int - - /* The number of unread characters in the buffer. */ - unread int - - /** The raw buffer. */ - raw_buffer []byte - raw_buffer_pos int - - /** The input encoding. */ - encoding yaml_encoding_t - - /** The offset of the current position (in bytes). */ - offset int - - /** The mark of the current position. */ - mark YAML_mark_t - - /** - * @} - */ - - /** - * @name Scanner stuff - * @{ - */ - - /** Have we started to scan the input stream? */ - stream_start_produced bool - - /** Have we reached the end of the input stream? */ - stream_end_produced bool - - /** The number of unclosed '[' and '{' indicators. */ - flow_level int - - /** The tokens queue. */ - tokens []yaml_token_t - tokens_head int - - /** The number of tokens fetched from the queue. */ - tokens_parsed int - - /* Does the tokens queue contain a token ready for dequeueing. */ - token_available bool - - /** The indentation levels stack. */ - indents []int - - /** The current indentation level. */ - indent int - - /** May a simple key occur at the current position? */ - simple_key_allowed bool - - /** The stack of simple keys. */ - simple_keys []yaml_simple_key_t - - /** - * @} - */ - - /** - * @name Parser stuff - * @{ - */ - - /** The parser states stack. */ - states []yaml_parser_state_t - - /** The current parser state. */ - state yaml_parser_state_t - - /** The stack of marks. */ - marks []YAML_mark_t - - /** The list of TAG directives. */ - tag_directives []yaml_tag_directive_t - - /** - * @} - */ - - /** - * @name Dumper stuff - * @{ - */ - - /** The alias data. */ - aliases []yaml_alias_data_t - - /** The currently parsed document. */ - document *yaml_document_t - - /** - * @} - */ - -} - -/** - * The prototype of a write handler. - * - * The write handler is called when the emitter needs to flush the accumulated - * characters to the output. The handler should write @a size bytes of the - * @a buffer to the output. - * - * @param[in,out] data A pointer to an application data specified by - * yaml_emitter_set_output(). - * @param[in] buffer The buffer with bytes to be written. - * @param[in] size The size of the buffer. - * - * @returns On success, the handler should return @c 1. If the handler failed, - * the returned value should be @c 0. - */ - -type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error - -/** The emitter states. */ -type yaml_emitter_state_t int - -const ( - /** Expect STREAM-START. */ - yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota - /** Expect the first DOCUMENT-START or STREAM-END. */ - yaml_EMIT_FIRST_DOCUMENT_START_STATE - /** Expect DOCUMENT-START or STREAM-END. */ - yaml_EMIT_DOCUMENT_START_STATE - /** Expect the content of a document. */ - yaml_EMIT_DOCUMENT_CONTENT_STATE - /** Expect DOCUMENT-END. */ - yaml_EMIT_DOCUMENT_END_STATE - /** Expect the first item of a flow sequence. */ - yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE - /** Expect an item of a flow sequence. */ - yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE - /** Expect the first key of a flow mapping. */ - yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE - /** Expect a key of a flow mapping. */ - yaml_EMIT_FLOW_MAPPING_KEY_STATE - /** Expect a value for a simple key of a flow mapping. */ - yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE - /** Expect a value of a flow mapping. */ - yaml_EMIT_FLOW_MAPPING_VALUE_STATE - /** Expect the first item of a block sequence. */ - yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE - /** Expect an item of a block sequence. */ - yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE - /** Expect the first key of a block mapping. */ - yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE - /** Expect the key of a block mapping. */ - yaml_EMIT_BLOCK_MAPPING_KEY_STATE - /** Expect a value for a simple key of a block mapping. */ - yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE - /** Expect a value of a block mapping. */ - yaml_EMIT_BLOCK_MAPPING_VALUE_STATE - /** Expect nothing. */ - yaml_EMIT_END_STATE -) - -/** - * The emitter structure. - * - * All members are internal. Manage the structure using the @c yaml_emitter_ - * family of functions. - */ - -type yaml_emitter_t struct { - - /** - * @name Error handling - * @{ - */ - - /** Error type. */ - error YAML_error_type_t - /** Error description. */ - problem string - - /** - * @} - */ - - /** - * @name Writer stuff - * @{ - */ - - /** Write handler. */ - write_handler yaml_write_handler_t - - /** Standard (string or file) output data. */ - output_buffer *[]byte - output_writer io.Writer - - /** The working buffer. */ - buffer []byte - buffer_pos int - - /** The raw buffer. */ - raw_buffer []byte - raw_buffer_pos int - - /** The stream encoding. */ - encoding yaml_encoding_t - - /** - * @} - */ - - /** - * @name Emitter stuff - * @{ - */ - - /** If the output is in the canonical style? */ - canonical bool - /** The number of indentation spaces. */ - best_indent int - /** The preferred width of the output lines. */ - best_width int - /** Allow unescaped non-ASCII characters? */ - unicode bool - /** The preferred line break. */ - line_break yaml_break_t - - /** The stack of states. */ - states []yaml_emitter_state_t - - /** The current emitter state. */ - state yaml_emitter_state_t - - /** The event queue. */ - events []yaml_event_t - events_head int - - /** The stack of indentation levels. */ - indents []int - - /** The list of tag directives. */ - tag_directives []yaml_tag_directive_t - - /** The current indentation level. */ - indent int - - /** The current flow level. */ - flow_level int - - /** Is it the document root context? */ - root_context bool - /** Is it a sequence context? */ - sequence_context bool - /** Is it a mapping context? */ - mapping_context bool - /** Is it a simple mapping key context? */ - simple_key_context bool - - /** The current line. */ - line int - /** The current column. */ - column int - /** If the last character was a whitespace? */ - whitespace bool - /** If the last character was an indentation character (' ', '-', '?', ':')? */ - indention bool - /** If an explicit document end is required? */ - open_ended bool - - /** Anchor analysis. */ - anchor_data struct { - /** The anchor value. */ - anchor []byte - /** Is it an alias? */ - alias bool - } - - /** Tag analysis. */ - tag_data struct { - /** The tag handle. */ - handle []byte - /** The tag suffix. */ - suffix []byte - } - - /** Scalar analysis. */ - scalar_data struct { - /** The scalar value. */ - value []byte - /** Does the scalar contain line breaks? */ - multiline bool - /** Can the scalar be expessed in the flow plain style? */ - flow_plain_allowed bool - /** Can the scalar be expressed in the block plain style? */ - block_plain_allowed bool - /** Can the scalar be expressed in the single quoted style? */ - single_quoted_allowed bool - /** Can the scalar be expressed in the literal or folded styles? */ - block_allowed bool - /** The output style. */ - style yaml_scalar_style_t - } - - /** - * @} - */ - - /** - * @name Dumper stuff - * @{ - */ - - /** If the stream was already opened? */ - opened bool - /** If the stream was already closed? */ - closed bool - - /** The information associated with the document nodes. */ - anchors *struct { - /** The number of references. */ - references int - /** The anchor id. */ - anchor int - /** If the node has been emitted? */ - serialized bool - } - - /** The last assigned anchor id. */ - last_anchor_id int - - /** The currently emitted document. */ - document *yaml_document_t - - /** - * @} - */ - -} diff --git a/vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go b/vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go index 77115771..bafa775d 100644 --- a/vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go +++ b/vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go @@ -21,9 +21,9 @@ import ( proto "github.com/golang/protobuf/proto" math "math" -) -import io "io" + io "io" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -798,23 +798,23 @@ var ( ) var fileDescriptorAuth = []byte{ - // 280 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x4a, 0x2c, 0x2d, 0xc9, - 0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x03, 0xb1, 0x0b, 0x92, 0xa4, 0x44, 0xd2, 0xf3, - 0xd3, 0xf3, 0xc1, 0x42, 0xfa, 0x20, 0x16, 0x44, 0x56, 0xc9, 0x87, 0x8b, 0x25, 0xb4, 0x38, 0xb5, - 0x48, 0x48, 0x88, 0x8b, 0x25, 0x2f, 0x31, 0x37, 0x55, 0x82, 0x51, 0x81, 0x51, 0x83, 0x27, 0x08, - 0xcc, 0x16, 0x92, 0xe2, 0xe2, 0x28, 0x48, 0x2c, 0x2e, 0x2e, 0xcf, 0x2f, 0x4a, 0x91, 0x60, 0x02, - 0x8b, 0xc3, 0xf9, 0x42, 0x22, 0x5c, 0xac, 0x45, 0xf9, 0x39, 0xa9, 0xc5, 0x12, 0xcc, 0x0a, 0xcc, - 0x1a, 0x9c, 0x41, 0x10, 0x8e, 0xd2, 0x1c, 0x46, 0x2e, 0xae, 0x80, 0xd4, 0xa2, 0xdc, 0xcc, 0xe2, - 0xe2, 0xcc, 0xfc, 0x3c, 0x21, 0x63, 0xa0, 0x01, 0x40, 0x5e, 0x48, 0x65, 0x01, 0xc4, 0x60, 0x3e, - 0x23, 0x71, 0x3d, 0x88, 0x6b, 0xf4, 0x10, 0xaa, 0xf4, 0x40, 0xd2, 0x41, 0x70, 0x85, 0x42, 0x02, - 0x5c, 0xcc, 0xd9, 0xa9, 0x95, 0x50, 0x0b, 0x41, 0x4c, 0x21, 0x69, 0x2e, 0xce, 0xa2, 0xc4, 0xbc, - 0xf4, 0xd4, 0xf8, 0xd4, 0xbc, 0x14, 0xa0, 0x7d, 0x60, 0x87, 0x80, 0x05, 0x5c, 0xf3, 0x52, 0x94, - 0xb4, 0xb8, 0x58, 0xc0, 0xda, 0x38, 0xb8, 0x58, 0x82, 0x5c, 0x1d, 0x5d, 0x04, 0x18, 0x84, 0x38, - 0xb9, 0x58, 0xc3, 0x83, 0x3c, 0x43, 0x5c, 0x05, 0x18, 0x85, 0x78, 0xb9, 0x38, 0x41, 0x82, 0x10, - 0x2e, 0x93, 0x52, 0x08, 0x50, 0x0d, 0xd0, 0x9d, 0x58, 0x3d, 0x6b, 0xc1, 0xc5, 0x0b, 0xb4, 0x0b, - 0xe1, 0x2c, 0xa0, 0x03, 0x98, 0x35, 0xb8, 0x8d, 0x84, 0x30, 0x1d, 0x1c, 0x84, 0xaa, 0xd0, 0x49, - 0xe2, 0xc4, 0x43, 0x39, 0x86, 0x0b, 0x40, 0x7c, 0xe2, 0x91, 0x1c, 0xe3, 0x05, 0x20, 0x7e, 0x00, - 0xc4, 0x33, 0x1e, 0xcb, 0x31, 0x24, 0xb1, 0x81, 0xc3, 0xd8, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, - 0xcc, 0x76, 0x8d, 0x4f, 0x8f, 0x01, 0x00, 0x00, + // 288 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x6c, 0x90, 0xc1, 0x4a, 0xc3, 0x30, + 0x1c, 0xc6, 0x9b, 0xb6, 0x1b, 0xed, 0x5f, 0x27, 0x25, 0x0c, 0x0c, 0x13, 0x42, 0xe9, 0xa9, 0x78, + 0xa8, 0xb0, 0x5d, 0xbc, 0x2a, 0xf6, 0x20, 0x78, 0x90, 0x50, 0xf1, 0x28, 0x1d, 0x0d, 0x75, 0x6c, + 0x6d, 0x4a, 0x32, 0x91, 0xbe, 0x89, 0x07, 0x1f, 0x68, 0xc7, 0x3d, 0x82, 0xab, 0x2f, 0x22, 0x4d, + 0x64, 0x43, 0xdc, 0xed, 0xfb, 0xbe, 0xff, 0x97, 0xe4, 0x97, 0x3f, 0x40, 0xfe, 0xb6, 0x7e, 0x4d, + 0x1a, 0x29, 0xd6, 0x02, 0x0f, 0x7b, 0xdd, 0xcc, 0x27, 0xe3, 0x52, 0x94, 0x42, 0x47, 0x57, 0xbd, + 0x32, 0xd3, 0xe8, 0x01, 0xdc, 0x27, 0xc5, 0x25, 0xc6, 0xe0, 0xd6, 0x79, 0xc5, 0x09, 0x0a, 0x51, + 0x7c, 0xca, 0xb4, 0xc6, 0x13, 0xf0, 0x9a, 0x5c, 0xa9, 0x77, 0x21, 0x0b, 0x62, 0xeb, 0x7c, 0xef, + 0xf1, 0x18, 0x06, 0x52, 0xac, 0xb8, 0x22, 0x4e, 0xe8, 0xc4, 0x3e, 0x33, 0x26, 0xfa, 0x44, 0x00, + 0x8f, 0x5c, 0x56, 0x0b, 0xa5, 0x16, 0xa2, 0xc6, 0x33, 0xf0, 0x1a, 0x2e, 0xab, 0xac, 0x6d, 0xcc, + 0xc5, 0x67, 0xd3, 0xf3, 0xc4, 0xd0, 0x24, 0x87, 0x56, 0xd2, 0x8f, 0xd9, 0xbe, 0x88, 0x03, 0x70, + 0x96, 0xbc, 0xfd, 0x7d, 0xb0, 0x97, 0xf8, 0x02, 0x7c, 0x99, 0xd7, 0x25, 0x7f, 0xe1, 0x75, 0x41, + 0x1c, 0x03, 0xa2, 0x83, 0xb4, 0x2e, 0xa2, 0x4b, 0x70, 0xf5, 0x31, 0x0f, 0x5c, 0x96, 0xde, 0xdc, + 0x05, 0x16, 0xf6, 0x61, 0xf0, 0xcc, 0xee, 0xb3, 0x34, 0x40, 0x78, 0x04, 0x7e, 0x1f, 0x1a, 0x6b, + 0x47, 0x19, 0xb8, 0x4c, 0xac, 0xf8, 0xd1, 0xcf, 0x5e, 0xc3, 0x68, 0xc9, 0xdb, 0x03, 0x16, 0xb1, + 0x43, 0x27, 0x3e, 0x99, 0xe2, 0xff, 0xc0, 0xec, 0x6f, 0xf1, 0x96, 0x6c, 0x76, 0xd4, 0xda, 0xee, + 0xa8, 0xb5, 0xe9, 0x28, 0xda, 0x76, 0x14, 0x7d, 0x75, 0x14, 0x7d, 0x7c, 0x53, 0x6b, 0x3e, 0xd4, + 0x3b, 0x9e, 0xfd, 0x04, 0x00, 0x00, 0xff, 0xff, 0xcc, 0x76, 0x8d, 0x4f, 0x8f, 0x01, 0x00, 0x00, } diff --git a/vendor/github.com/coreos/etcd/client/keys.go b/vendor/github.com/coreos/etcd/client/keys.go index b74b9e0a..4a6c41a7 100644 --- a/vendor/github.com/coreos/etcd/client/keys.go +++ b/vendor/github.com/coreos/etcd/client/keys.go @@ -272,6 +272,10 @@ type Response struct { // Index holds the cluster-level index at the time the Response was generated. // This index is not tied to the Node(s) contained in this Response. Index uint64 `json:"-"` + + // ClusterID holds the cluster-level ID reported by the server. This + // should be different for different etcd clusters. + ClusterID string `json:"-"` } type Node struct { @@ -665,6 +669,7 @@ func unmarshalSuccessfulKeysResponse(header http.Header, body []byte) (*Response return nil, err } } + res.ClusterID = header.Get("X-Etcd-Cluster-ID") return &res, nil } diff --git a/vendor/github.com/coreos/etcd/clientv3/auth.go b/vendor/github.com/coreos/etcd/clientv3/auth.go index 9d981cfb..b995bce8 100644 --- a/vendor/github.com/coreos/etcd/clientv3/auth.go +++ b/vendor/github.com/coreos/etcd/clientv3/auth.go @@ -116,12 +116,12 @@ func NewAuth(c *Client) Auth { } func (auth *auth) AuthEnable(ctx context.Context) (*AuthEnableResponse, error) { - resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{}) + resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{}, grpc.FailFast(false)) return (*AuthEnableResponse)(resp), toErr(ctx, err) } func (auth *auth) AuthDisable(ctx context.Context) (*AuthDisableResponse, error) { - resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{}) + resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{}, grpc.FailFast(false)) return (*AuthDisableResponse)(resp), toErr(ctx, err) } diff --git a/vendor/github.com/coreos/etcd/clientv3/balancer.go b/vendor/github.com/coreos/etcd/clientv3/balancer.go index 011f6cf0..0fef9c54 100644 --- a/vendor/github.com/coreos/etcd/clientv3/balancer.go +++ b/vendor/github.com/coreos/etcd/clientv3/balancer.go @@ -21,8 +21,14 @@ import ( "golang.org/x/net/context" "google.golang.org/grpc" + "google.golang.org/grpc/codes" ) +// ErrNoAddrAvilable is returned by Get() when the balancer does not have +// any active connection to endpoints at the time. +// This error is returned only when opts.BlockingWait is true. +var ErrNoAddrAvilable = grpc.Errorf(codes.Unavailable, "there is no address available") + // simpleBalancer does the bare minimum to expose multiple eps // to the grpc reconnection code path type simpleBalancer struct { @@ -72,7 +78,7 @@ func newSimpleBalancer(eps []string) *simpleBalancer { return sb } -func (b *simpleBalancer) Start(target string) error { return nil } +func (b *simpleBalancer) Start(target string, config grpc.BalancerConfig) error { return nil } func (b *simpleBalancer) ConnectNotify() <-chan struct{} { b.mu.Lock() @@ -162,6 +168,25 @@ func (b *simpleBalancer) Up(addr grpc.Address) func(error) { func (b *simpleBalancer) Get(ctx context.Context, opts grpc.BalancerGetOptions) (grpc.Address, func(), error) { var addr string + + // If opts.BlockingWait is false (for fail-fast RPCs), it should return + // an address it has notified via Notify immediately instead of blocking. + if !opts.BlockingWait { + b.mu.RLock() + closed := b.closed + addr = b.pinAddr + upEps := len(b.upEps) + b.mu.RUnlock() + if closed { + return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing + } + + if upEps == 0 { + return grpc.Address{Addr: ""}, nil, ErrNoAddrAvilable + } + return grpc.Address{Addr: addr}, func() {}, nil + } + for { b.mu.RLock() ch := b.upc diff --git a/vendor/github.com/coreos/etcd/clientv3/client.go b/vendor/github.com/coreos/etcd/clientv3/client.go index d8b04a43..a0c3f3c2 100644 --- a/vendor/github.com/coreos/etcd/clientv3/client.go +++ b/vendor/github.com/coreos/etcd/clientv3/client.go @@ -18,14 +18,13 @@ import ( "crypto/tls" "errors" "fmt" - "io/ioutil" - "log" "net" "net/url" "strings" "time" "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" + prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" "golang.org/x/net/context" "google.golang.org/grpc" @@ -88,6 +87,7 @@ func NewFromConfigFile(path string) (*Client, error) { // Close shuts down the client's etcd connections. func (c *Client) Close() error { c.cancel() + c.Watcher.Close() return toErr(c.ctx, c.conn.Close()) } @@ -151,14 +151,14 @@ func (cred authTokenCredential) GetRequestMetadata(ctx context.Context, s ...str }, nil } -func parseEndpoint(endpoint string) (proto string, host string, scheme bool) { +func parseEndpoint(endpoint string) (proto string, host string, scheme string) { proto = "tcp" host = endpoint url, uerr := url.Parse(endpoint) if uerr != nil || !strings.Contains(endpoint, "://") { return } - scheme = true + scheme = url.Scheme // strip scheme:// prefix since grpc dials by host host = url.Host @@ -172,9 +172,9 @@ func parseEndpoint(endpoint string) (proto string, host string, scheme bool) { return } -func (c *Client) processCreds(protocol string) (creds *credentials.TransportCredentials) { +func (c *Client) processCreds(scheme string) (creds *credentials.TransportCredentials) { creds = c.creds - switch protocol { + switch scheme { case "unix": case "http": creds = nil @@ -213,8 +213,8 @@ func (c *Client) dialSetupOpts(endpoint string, dopts ...grpc.DialOption) (opts opts = append(opts, grpc.WithDialer(f)) creds := c.creds - if proto, _, scheme := parseEndpoint(endpoint); scheme { - creds = c.processCreds(proto) + if _, _, scheme := parseEndpoint(endpoint); len(scheme) != 0 { + creds = c.processCreds(scheme) } if creds != nil { opts = append(opts, grpc.WithTransportCredentials(*creds)) @@ -248,6 +248,10 @@ func (c *Client) dial(endpoint string, dopts ...grpc.DialOption) (*grpc.ClientCo opts = append(opts, grpc.WithPerRPCCredentials(authTokenCredential{token: resp.Token})) } + // add metrics options + opts = append(opts, grpc.WithUnaryInterceptor(prometheus.UnaryClientInterceptor)) + opts = append(opts, grpc.WithStreamInterceptor(prometheus.StreamClientInterceptor)) + conn, err := grpc.Dial(host, opts...) if err != nil { return nil, err @@ -317,12 +321,6 @@ func newClient(cfg *Config) (*Client, error) { client.Watcher = NewWatcher(client) client.Auth = NewAuth(client) client.Maintenance = NewMaintenance(client) - if cfg.Logger != nil { - logger.Set(cfg.Logger) - } else { - // disable client side grpc by default - logger.Set(log.New(ioutil.Discard, "", 0)) - } go client.autoSync() return client, nil diff --git a/vendor/github.com/coreos/etcd/clientv3/cluster.go b/vendor/github.com/coreos/etcd/clientv3/cluster.go index 8b981171..b9bff626 100644 --- a/vendor/github.com/coreos/etcd/clientv3/cluster.go +++ b/vendor/github.com/coreos/etcd/clientv3/cluster.go @@ -78,7 +78,7 @@ func (c *cluster) MemberUpdate(ctx context.Context, id uint64, peerAddrs []strin // it is safe to retry on update. for { r := &pb.MemberUpdateRequest{ID: id, PeerURLs: peerAddrs} - resp, err := c.remote.MemberUpdate(ctx, r) + resp, err := c.remote.MemberUpdate(ctx, r, grpc.FailFast(false)) if err == nil { return (*MemberUpdateResponse)(resp), nil } diff --git a/vendor/github.com/coreos/etcd/clientv3/config.go b/vendor/github.com/coreos/etcd/clientv3/config.go index 4f92d7d2..d1d5f409 100644 --- a/vendor/github.com/coreos/etcd/clientv3/config.go +++ b/vendor/github.com/coreos/etcd/clientv3/config.go @@ -38,9 +38,6 @@ type Config struct { // TLS holds the client secure credentials, if any. TLS *tls.Config - // Logger is the logger used by client library. - Logger Logger - // Username is a username for authentication Username string diff --git a/vendor/github.com/coreos/etcd/clientv3/doc.go b/vendor/github.com/coreos/etcd/clientv3/doc.go index 9ce84aa5..470ca4dc 100644 --- a/vendor/github.com/coreos/etcd/clientv3/doc.go +++ b/vendor/github.com/coreos/etcd/clientv3/doc.go @@ -44,7 +44,7 @@ // etcd client returns 2 types of errors: // // 1. context error: canceled or deadline exceeded. -// 2. gRPC error: see https://github.com/coreos/etcd/blob/master/etcdserver/api/v3rpc/error.go. +// 2. gRPC error: see https://github.com/coreos/etcd/blob/master/etcdserver/api/v3rpc/rpctypes/error.go // // Here is the example code to handle client errors: // diff --git a/vendor/github.com/coreos/etcd/clientv3/kv.go b/vendor/github.com/coreos/etcd/clientv3/kv.go index 834b17d3..c8350f92 100644 --- a/vendor/github.com/coreos/etcd/clientv3/kv.go +++ b/vendor/github.com/coreos/etcd/clientv3/kv.go @@ -105,7 +105,7 @@ func (kv *kv) Delete(ctx context.Context, key string, opts ...OpOption) (*Delete } func (kv *kv) Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error) { - resp, err := kv.remote.Compact(ctx, OpCompact(rev, opts...).toRequest(), grpc.FailFast(false)) + resp, err := kv.remote.Compact(ctx, OpCompact(rev, opts...).toRequest()) if err != nil { return nil, toErr(ctx, err) } @@ -125,6 +125,7 @@ func (kv *kv) Do(ctx context.Context, op Op) (OpResponse, error) { if err == nil { return resp, nil } + if isHaltErr(ctx, err) { return resp, toErr(ctx, err) } diff --git a/vendor/github.com/coreos/etcd/clientv3/lease.go b/vendor/github.com/coreos/etcd/clientv3/lease.go index ed8bb0a5..d3b587e8 100644 --- a/vendor/github.com/coreos/etcd/clientv3/lease.go +++ b/vendor/github.com/coreos/etcd/clientv3/lease.go @@ -195,7 +195,7 @@ func (l *lessor) TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption for { r := toLeaseTimeToLiveRequest(id, opts...) - resp, err := l.remote.LeaseTimeToLive(cctx, r) + resp, err := l.remote.LeaseTimeToLive(cctx, r, grpc.FailFast(false)) if err == nil { gresp := &LeaseTimeToLiveResponse{ ResponseHeader: resp.GetHeader(), diff --git a/vendor/github.com/coreos/etcd/clientv3/logger.go b/vendor/github.com/coreos/etcd/clientv3/logger.go index 6e57c4e7..519db45d 100644 --- a/vendor/github.com/coreos/etcd/clientv3/logger.go +++ b/vendor/github.com/coreos/etcd/clientv3/logger.go @@ -15,13 +15,15 @@ package clientv3 import ( + "io/ioutil" "log" - "os" "sync" "google.golang.org/grpc/grpclog" ) +// Logger is the logger used by client library. +// It implements grpclog.Logger interface. type Logger grpclog.Logger var ( @@ -34,20 +36,36 @@ type settableLogger struct { } func init() { - // use go's standard logger by default like grpc + // disable client side logs by default logger.mu.Lock() - logger.l = log.New(os.Stderr, "", log.LstdFlags) + logger.l = log.New(ioutil.Discard, "", 0) + + // logger has to override the grpclog at initialization so that + // any changes to the grpclog go through logger with locking + // instead of through SetLogger + // + // now updates only happen through settableLogger.set grpclog.SetLogger(&logger) logger.mu.Unlock() } -func (s *settableLogger) Set(l Logger) { +// SetLogger sets client-side Logger. By default, logs are disabled. +func SetLogger(l Logger) { + logger.set(l) +} + +// GetLogger returns the current logger. +func GetLogger() Logger { + return logger.get() +} + +func (s *settableLogger) set(l Logger) { s.mu.Lock() logger.l = l s.mu.Unlock() } -func (s *settableLogger) Get() Logger { +func (s *settableLogger) get() Logger { s.mu.RLock() l := logger.l s.mu.RUnlock() @@ -56,9 +74,9 @@ func (s *settableLogger) Get() Logger { // implement the grpclog.Logger interface -func (s *settableLogger) Fatal(args ...interface{}) { s.Get().Fatal(args...) } -func (s *settableLogger) Fatalf(format string, args ...interface{}) { s.Get().Fatalf(format, args...) } -func (s *settableLogger) Fatalln(args ...interface{}) { s.Get().Fatalln(args...) } -func (s *settableLogger) Print(args ...interface{}) { s.Get().Print(args...) } -func (s *settableLogger) Printf(format string, args ...interface{}) { s.Get().Printf(format, args...) } -func (s *settableLogger) Println(args ...interface{}) { s.Get().Println(args...) } +func (s *settableLogger) Fatal(args ...interface{}) { s.get().Fatal(args...) } +func (s *settableLogger) Fatalf(format string, args ...interface{}) { s.get().Fatalf(format, args...) } +func (s *settableLogger) Fatalln(args ...interface{}) { s.get().Fatalln(args...) } +func (s *settableLogger) Print(args ...interface{}) { s.get().Print(args...) } +func (s *settableLogger) Printf(format string, args ...interface{}) { s.get().Printf(format, args...) } +func (s *settableLogger) Println(args ...interface{}) { s.get().Println(args...) } diff --git a/vendor/github.com/coreos/etcd/clientv3/retry.go b/vendor/github.com/coreos/etcd/clientv3/retry.go index 1084c63d..cad8dbfc 100644 --- a/vendor/github.com/coreos/etcd/clientv3/retry.go +++ b/vendor/github.com/coreos/etcd/clientv3/retry.go @@ -23,29 +23,32 @@ import ( ) type rpcFunc func(ctx context.Context) error -type retryRpcFunc func(context.Context, rpcFunc) +type retryRpcFunc func(context.Context, rpcFunc) error func (c *Client) newRetryWrapper() retryRpcFunc { - return func(rpcCtx context.Context, f rpcFunc) { + return func(rpcCtx context.Context, f rpcFunc) error { for { err := f(rpcCtx) if err == nil { - return + return nil } + // only retry if unavailable if grpc.Code(err) != codes.Unavailable { - return + return err } // always stop retry on etcd errors eErr := rpctypes.Error(err) if _, ok := eErr.(rpctypes.EtcdError); ok { - return + return err } + select { case <-c.balancer.ConnectNotify(): case <-rpcCtx.Done(): + return rpcCtx.Err() case <-c.ctx.Done(): - return + return c.ctx.Err() } } } @@ -62,7 +65,7 @@ func RetryKVClient(c *Client) pb.KVClient { } func (rkv *retryKVClient) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (resp *pb.PutResponse, err error) { - rkv.retryf(ctx, func(rctx context.Context) error { + err = rkv.retryf(ctx, func(rctx context.Context) error { resp, err = rkv.KVClient.Put(rctx, in, opts...) return err }) @@ -70,7 +73,7 @@ func (rkv *retryKVClient) Put(ctx context.Context, in *pb.PutRequest, opts ...gr } func (rkv *retryKVClient) DeleteRange(ctx context.Context, in *pb.DeleteRangeRequest, opts ...grpc.CallOption) (resp *pb.DeleteRangeResponse, err error) { - rkv.retryf(ctx, func(rctx context.Context) error { + err = rkv.retryf(ctx, func(rctx context.Context) error { resp, err = rkv.KVClient.DeleteRange(rctx, in, opts...) return err }) @@ -78,7 +81,7 @@ func (rkv *retryKVClient) DeleteRange(ctx context.Context, in *pb.DeleteRangeReq } func (rkv *retryKVClient) Txn(ctx context.Context, in *pb.TxnRequest, opts ...grpc.CallOption) (resp *pb.TxnResponse, err error) { - rkv.retryf(ctx, func(rctx context.Context) error { + err = rkv.retryf(ctx, func(rctx context.Context) error { resp, err = rkv.KVClient.Txn(rctx, in, opts...) return err }) @@ -86,7 +89,7 @@ func (rkv *retryKVClient) Txn(ctx context.Context, in *pb.TxnRequest, opts ...gr } func (rkv *retryKVClient) Compact(ctx context.Context, in *pb.CompactionRequest, opts ...grpc.CallOption) (resp *pb.CompactionResponse, err error) { - rkv.retryf(ctx, func(rctx context.Context) error { + err = rkv.retryf(ctx, func(rctx context.Context) error { resp, err = rkv.KVClient.Compact(rctx, in, opts...) return err }) @@ -104,7 +107,7 @@ func RetryLeaseClient(c *Client) pb.LeaseClient { } func (rlc *retryLeaseClient) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRequest, opts ...grpc.CallOption) (resp *pb.LeaseGrantResponse, err error) { - rlc.retryf(ctx, func(rctx context.Context) error { + err = rlc.retryf(ctx, func(rctx context.Context) error { resp, err = rlc.LeaseClient.LeaseGrant(rctx, in, opts...) return err }) @@ -113,7 +116,7 @@ func (rlc *retryLeaseClient) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRe } func (rlc *retryLeaseClient) LeaseRevoke(ctx context.Context, in *pb.LeaseRevokeRequest, opts ...grpc.CallOption) (resp *pb.LeaseRevokeResponse, err error) { - rlc.retryf(ctx, func(rctx context.Context) error { + err = rlc.retryf(ctx, func(rctx context.Context) error { resp, err = rlc.LeaseClient.LeaseRevoke(rctx, in, opts...) return err }) @@ -131,7 +134,7 @@ func RetryClusterClient(c *Client) pb.ClusterClient { } func (rcc *retryClusterClient) MemberAdd(ctx context.Context, in *pb.MemberAddRequest, opts ...grpc.CallOption) (resp *pb.MemberAddResponse, err error) { - rcc.retryf(ctx, func(rctx context.Context) error { + err = rcc.retryf(ctx, func(rctx context.Context) error { resp, err = rcc.ClusterClient.MemberAdd(rctx, in, opts...) return err }) @@ -139,7 +142,7 @@ func (rcc *retryClusterClient) MemberAdd(ctx context.Context, in *pb.MemberAddRe } func (rcc *retryClusterClient) MemberRemove(ctx context.Context, in *pb.MemberRemoveRequest, opts ...grpc.CallOption) (resp *pb.MemberRemoveResponse, err error) { - rcc.retryf(ctx, func(rctx context.Context) error { + err = rcc.retryf(ctx, func(rctx context.Context) error { resp, err = rcc.ClusterClient.MemberRemove(rctx, in, opts...) return err }) @@ -147,7 +150,7 @@ func (rcc *retryClusterClient) MemberRemove(ctx context.Context, in *pb.MemberRe } func (rcc *retryClusterClient) MemberUpdate(ctx context.Context, in *pb.MemberUpdateRequest, opts ...grpc.CallOption) (resp *pb.MemberUpdateResponse, err error) { - rcc.retryf(ctx, func(rctx context.Context) error { + err = rcc.retryf(ctx, func(rctx context.Context) error { resp, err = rcc.ClusterClient.MemberUpdate(rctx, in, opts...) return err }) @@ -165,7 +168,7 @@ func RetryAuthClient(c *Client) pb.AuthClient { } func (rac *retryAuthClient) AuthEnable(ctx context.Context, in *pb.AuthEnableRequest, opts ...grpc.CallOption) (resp *pb.AuthEnableResponse, err error) { - rac.retryf(ctx, func(rctx context.Context) error { + err = rac.retryf(ctx, func(rctx context.Context) error { resp, err = rac.AuthClient.AuthEnable(rctx, in, opts...) return err }) @@ -173,7 +176,7 @@ func (rac *retryAuthClient) AuthEnable(ctx context.Context, in *pb.AuthEnableReq } func (rac *retryAuthClient) AuthDisable(ctx context.Context, in *pb.AuthDisableRequest, opts ...grpc.CallOption) (resp *pb.AuthDisableResponse, err error) { - rac.retryf(ctx, func(rctx context.Context) error { + err = rac.retryf(ctx, func(rctx context.Context) error { resp, err = rac.AuthClient.AuthDisable(rctx, in, opts...) return err }) @@ -181,7 +184,7 @@ func (rac *retryAuthClient) AuthDisable(ctx context.Context, in *pb.AuthDisableR } func (rac *retryAuthClient) UserAdd(ctx context.Context, in *pb.AuthUserAddRequest, opts ...grpc.CallOption) (resp *pb.AuthUserAddResponse, err error) { - rac.retryf(ctx, func(rctx context.Context) error { + err = rac.retryf(ctx, func(rctx context.Context) error { resp, err = rac.AuthClient.UserAdd(rctx, in, opts...) return err }) @@ -189,7 +192,7 @@ func (rac *retryAuthClient) UserAdd(ctx context.Context, in *pb.AuthUserAddReque } func (rac *retryAuthClient) UserDelete(ctx context.Context, in *pb.AuthUserDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthUserDeleteResponse, err error) { - rac.retryf(ctx, func(rctx context.Context) error { + err = rac.retryf(ctx, func(rctx context.Context) error { resp, err = rac.AuthClient.UserDelete(rctx, in, opts...) return err }) @@ -197,7 +200,7 @@ func (rac *retryAuthClient) UserDelete(ctx context.Context, in *pb.AuthUserDelet } func (rac *retryAuthClient) UserChangePassword(ctx context.Context, in *pb.AuthUserChangePasswordRequest, opts ...grpc.CallOption) (resp *pb.AuthUserChangePasswordResponse, err error) { - rac.retryf(ctx, func(rctx context.Context) error { + err = rac.retryf(ctx, func(rctx context.Context) error { resp, err = rac.AuthClient.UserChangePassword(rctx, in, opts...) return err }) @@ -205,7 +208,7 @@ func (rac *retryAuthClient) UserChangePassword(ctx context.Context, in *pb.AuthU } func (rac *retryAuthClient) UserGrantRole(ctx context.Context, in *pb.AuthUserGrantRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGrantRoleResponse, err error) { - rac.retryf(ctx, func(rctx context.Context) error { + err = rac.retryf(ctx, func(rctx context.Context) error { resp, err = rac.AuthClient.UserGrantRole(rctx, in, opts...) return err }) @@ -213,7 +216,7 @@ func (rac *retryAuthClient) UserGrantRole(ctx context.Context, in *pb.AuthUserGr } func (rac *retryAuthClient) UserRevokeRole(ctx context.Context, in *pb.AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserRevokeRoleResponse, err error) { - rac.retryf(ctx, func(rctx context.Context) error { + err = rac.retryf(ctx, func(rctx context.Context) error { resp, err = rac.AuthClient.UserRevokeRole(rctx, in, opts...) return err }) @@ -221,7 +224,7 @@ func (rac *retryAuthClient) UserRevokeRole(ctx context.Context, in *pb.AuthUserR } func (rac *retryAuthClient) RoleAdd(ctx context.Context, in *pb.AuthRoleAddRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleAddResponse, err error) { - rac.retryf(ctx, func(rctx context.Context) error { + err = rac.retryf(ctx, func(rctx context.Context) error { resp, err = rac.AuthClient.RoleAdd(rctx, in, opts...) return err }) @@ -229,7 +232,7 @@ func (rac *retryAuthClient) RoleAdd(ctx context.Context, in *pb.AuthRoleAddReque } func (rac *retryAuthClient) RoleDelete(ctx context.Context, in *pb.AuthRoleDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleDeleteResponse, err error) { - rac.retryf(ctx, func(rctx context.Context) error { + err = rac.retryf(ctx, func(rctx context.Context) error { resp, err = rac.AuthClient.RoleDelete(rctx, in, opts...) return err }) @@ -237,7 +240,7 @@ func (rac *retryAuthClient) RoleDelete(ctx context.Context, in *pb.AuthRoleDelet } func (rac *retryAuthClient) RoleGrantPermission(ctx context.Context, in *pb.AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGrantPermissionResponse, err error) { - rac.retryf(ctx, func(rctx context.Context) error { + err = rac.retryf(ctx, func(rctx context.Context) error { resp, err = rac.AuthClient.RoleGrantPermission(rctx, in, opts...) return err }) @@ -245,7 +248,7 @@ func (rac *retryAuthClient) RoleGrantPermission(ctx context.Context, in *pb.Auth } func (rac *retryAuthClient) RoleRevokePermission(ctx context.Context, in *pb.AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleRevokePermissionResponse, err error) { - rac.retryf(ctx, func(rctx context.Context) error { + err = rac.retryf(ctx, func(rctx context.Context) error { resp, err = rac.AuthClient.RoleRevokePermission(rctx, in, opts...) return err }) diff --git a/vendor/github.com/coreos/etcd/clientv3/watch.go b/vendor/github.com/coreos/etcd/clientv3/watch.go index 28eb491e..2b0e657c 100644 --- a/vendor/github.com/coreos/etcd/clientv3/watch.go +++ b/vendor/github.com/coreos/etcd/clientv3/watch.go @@ -92,7 +92,7 @@ func (wr *WatchResponse) Err() error { // IsProgressNotify returns true if the WatchResponse is progress notification. func (wr *WatchResponse) IsProgressNotify() bool { - return len(wr.Events) == 0 && !wr.Canceled && !wr.Created + return len(wr.Events) == 0 && !wr.Canceled && !wr.Created && wr.CompactRevision == 0 && wr.Header.Revision != 0 } // watcher implements the Watcher interface @@ -106,6 +106,7 @@ type watcher struct { streams map[string]*watchGrpcStream } +// watchGrpcStream tracks all watch resources attached to a single grpc stream. type watchGrpcStream struct { owner *watcher remote pb.WatchClient @@ -116,10 +117,10 @@ type watchGrpcStream struct { ctxKey string cancel context.CancelFunc - // mu protects the streams map - mu sync.RWMutex - // streams holds all active watchers - streams map[int64]*watcherStream + // substreams holds all active watchers on this grpc stream + substreams map[int64]*watcherStream + // resuming holds all resuming watchers on this grpc stream + resuming []*watcherStream // reqc sends a watch request from Watch() to the main goroutine reqc chan *watchRequest @@ -134,7 +135,9 @@ type watchGrpcStream struct { // closingc gets the watcherStream of closing watchers closingc chan *watcherStream - // the error that closed the watch stream + // resumec closes to signal that all substreams should begin resuming + resumec chan struct{} + // closeErr is the error that closed the watch stream closeErr error } @@ -162,15 +165,18 @@ type watcherStream struct { initReq watchRequest // outc publishes watch responses to subscriber - outc chan<- WatchResponse + outc chan WatchResponse // recvc buffers watch responses before publishing recvc chan *WatchResponse - id int64 + // donec closes when the watcherStream goroutine stops. + donec chan struct{} + // closing is set to true when stream should be scheduled to shutdown. + closing bool + // id is the registered watch id on the grpc stream + id int64 - // lastRev is revision last successfully sent over outc - lastRev int64 - // resumec indicates the stream must recover at a given revision - resumec chan int64 + // buf holds all events received from etcd but not yet consumed by the client + buf []*WatchResponse } func NewWatcher(c *Client) Watcher { @@ -198,12 +204,12 @@ func (vc *valCtx) Err() error { return nil } func (w *watcher) newWatcherGrpcStream(inctx context.Context) *watchGrpcStream { ctx, cancel := context.WithCancel(&valCtx{inctx}) wgs := &watchGrpcStream{ - owner: w, - remote: w.remote, - ctx: ctx, - ctxKey: fmt.Sprintf("%v", inctx), - cancel: cancel, - streams: make(map[int64]*watcherStream), + owner: w, + remote: w.remote, + ctx: ctx, + ctxKey: fmt.Sprintf("%v", inctx), + cancel: cancel, + substreams: make(map[int64]*watcherStream), respc: make(chan *pb.WatchResponse), reqc: make(chan *watchRequest), @@ -211,6 +217,7 @@ func (w *watcher) newWatcherGrpcStream(inctx context.Context) *watchGrpcStream { donec: make(chan struct{}), errc: make(chan error, 1), closingc: make(chan *watcherStream), + resumec: make(chan struct{}), } go wgs.run() return wgs @@ -220,8 +227,6 @@ func (w *watcher) newWatcherGrpcStream(inctx context.Context) *watchGrpcStream { func (w *watcher) Watch(ctx context.Context, key string, opts ...OpOption) WatchChan { ow := opWatch(key, opts...) - retc := make(chan chan WatchResponse, 1) - var filters []pb.WatchCreateRequest_FilterType if ow.filterPut { filters = append(filters, pb.WatchCreateRequest_NOPUT) @@ -239,7 +244,7 @@ func (w *watcher) Watch(ctx context.Context, key string, opts ...OpOption) Watch progressNotify: ow.progressNotify, filters: filters, prevKV: ow.prevKV, - retc: retc, + retc: make(chan chan WatchResponse, 1), } ok := false @@ -283,7 +288,7 @@ func (w *watcher) Watch(ctx context.Context, key string, opts ...OpOption) Watch // receive channel if ok { select { - case ret := <-retc: + case ret := <-wr.retc: return ret case <-ctx.Done(): case <-donec: @@ -314,12 +319,7 @@ func (w *watcher) Close() (err error) { } func (w *watchGrpcStream) Close() (err error) { - w.mu.Lock() - if w.stopc != nil { - close(w.stopc) - w.stopc = nil - } - w.mu.Unlock() + close(w.stopc) <-w.donec select { case err = <-w.errc: @@ -328,71 +328,57 @@ func (w *watchGrpcStream) Close() (err error) { return toErr(w.ctx, err) } -func (w *watchGrpcStream) addStream(resp *pb.WatchResponse, pendingReq *watchRequest) { - if pendingReq == nil { - // no pending request; ignore - return - } - if resp.Canceled || resp.CompactRevision != 0 { - // a cancel at id creation time means the start revision has - // been compacted out of the store - ret := make(chan WatchResponse, 1) - ret <- WatchResponse{ - Header: *resp.Header, - CompactRevision: resp.CompactRevision, - Canceled: true} - close(ret) - pendingReq.retc <- ret - return - } - - ret := make(chan WatchResponse) - if resp.WatchId == -1 { - // failed; no channel - close(ret) - pendingReq.retc <- ret - return - } - - ws := &watcherStream{ - initReq: *pendingReq, - id: resp.WatchId, - outc: ret, - // buffered so unlikely to block on sending while holding mu - recvc: make(chan *WatchResponse, 4), - resumec: make(chan int64), - } - - if pendingReq.rev == 0 { - // note the header revision so that a put following a current watcher - // disconnect will arrive on the watcher channel after reconnect - ws.initReq.rev = resp.Header.Revision - } - +func (w *watcher) closeStream(wgs *watchGrpcStream) { w.mu.Lock() - w.streams[ws.id] = ws + close(wgs.donec) + wgs.cancel() + if w.streams != nil { + delete(w.streams, wgs.ctxKey) + } w.mu.Unlock() - - // pass back the subscriber channel for the watcher - pendingReq.retc <- ret - - // send messages to subscriber - go w.serveStream(ws) } -func (w *watchGrpcStream) closeStream(ws *watcherStream) bool { - w.mu.Lock() - // cancels request stream; subscriber receives nil channel - close(ws.initReq.retc) - // close subscriber's channel - close(ws.outc) - delete(w.streams, ws.id) - empty := len(w.streams) == 0 - if empty && w.stopc != nil { - w.stopc = nil +func (w *watchGrpcStream) addSubstream(resp *pb.WatchResponse, ws *watcherStream) { + if resp.WatchId == -1 { + // failed; no channel + close(ws.recvc) + return + } + ws.id = resp.WatchId + w.substreams[ws.id] = ws +} + +func (w *watchGrpcStream) sendCloseSubstream(ws *watcherStream, resp *WatchResponse) { + select { + case ws.outc <- *resp: + case <-ws.initReq.ctx.Done(): + case <-time.After(closeSendErrTimeout): + } + close(ws.outc) +} + +func (w *watchGrpcStream) closeSubstream(ws *watcherStream) { + // send channel response in case stream was never established + select { + case ws.initReq.retc <- ws.outc: + default: + } + // close subscriber's channel + if closeErr := w.closeErr; closeErr != nil && ws.initReq.ctx.Err() == nil { + go w.sendCloseSubstream(ws, &WatchResponse{closeErr: w.closeErr}) + } else { + close(ws.outc) + } + if ws.id != -1 { + delete(w.substreams, ws.id) + return + } + for i := range w.resuming { + if w.resuming[i] == ws { + w.resuming[i] = nil + return + } } - w.mu.Unlock() - return empty } // run is the root of the goroutines for managing a watcher client @@ -400,67 +386,81 @@ func (w *watchGrpcStream) run() { var wc pb.Watch_WatchClient var closeErr error - defer func() { - w.owner.mu.Lock() - w.closeErr = closeErr - if w.owner.streams != nil { - delete(w.owner.streams, w.ctxKey) - } - close(w.donec) - w.owner.mu.Unlock() - w.cancel() - }() + // substreams marked to close but goroutine still running; needed for + // avoiding double-closing recvc on grpc stream teardown + closing := make(map[*watcherStream]struct{}) - // already stopped? - w.mu.RLock() - stopc := w.stopc - w.mu.RUnlock() - if stopc == nil { - return - } + defer func() { + w.closeErr = closeErr + // shutdown substreams and resuming substreams + for _, ws := range w.substreams { + if _, ok := closing[ws]; !ok { + close(ws.recvc) + closing[ws] = struct{}{} + } + } + for _, ws := range w.resuming { + if _, ok := closing[ws]; ws != nil && !ok { + close(ws.recvc) + closing[ws] = struct{}{} + } + } + w.joinSubstreams() + for range closing { + w.closeSubstream(<-w.closingc) + } + + w.owner.closeStream(w) + }() // start a stream with the etcd grpc server if wc, closeErr = w.newWatchClient(); closeErr != nil { return } - var pendingReq, failedReq *watchRequest - curReqC := w.reqc cancelSet := make(map[int64]struct{}) for { select { // Watch() requested - case pendingReq = <-curReqC: - // no more watch requests until there's a response - curReqC = nil - if err := wc.Send(pendingReq.toPB()); err == nil { - // pendingReq now waits on w.respc - break + case wreq := <-w.reqc: + outc := make(chan WatchResponse, 1) + ws := &watcherStream{ + initReq: *wreq, + id: -1, + outc: outc, + // unbufffered so resumes won't cause repeat events + recvc: make(chan *WatchResponse), + } + + ws.donec = make(chan struct{}) + go w.serveSubstream(ws, w.resumec) + + // queue up for watcher creation/resume + w.resuming = append(w.resuming, ws) + if len(w.resuming) == 1 { + // head of resume queue, can register a new watcher + wc.Send(ws.initReq.toPB()) } - failedReq = pendingReq // New events from the watch client case pbresp := <-w.respc: switch { case pbresp.Created: - // response to pending req, try to add - w.addStream(pbresp, pendingReq) - pendingReq = nil - curReqC = w.reqc - w.dispatchEvent(pbresp) + // response to head of queue creation + if ws := w.resuming[0]; ws != nil { + w.addSubstream(pbresp, ws) + w.dispatchEvent(pbresp) + w.resuming[0] = nil + } + if ws := w.nextResume(); ws != nil { + wc.Send(ws.initReq.toPB()) + } case pbresp.Canceled: delete(cancelSet, pbresp.WatchId) - // shutdown serveStream, if any - w.mu.Lock() - if ws, ok := w.streams[pbresp.WatchId]; ok { + if ws, ok := w.substreams[pbresp.WatchId]; ok { + // signal to stream goroutine to update closingc close(ws.recvc) - delete(w.streams, ws.id) - } - numStreams := len(w.streams) - w.mu.Unlock() - if numStreams == 0 { - // don't leak watcher streams - return + closing[ws] = struct{}{} } default: // dispatch to appropriate watch stream @@ -481,7 +481,6 @@ func (w *watchGrpcStream) run() { wc.Send(req) } // watch client failed to recv; spawn another if possible - // TODO report watch client errors from errc? case err := <-w.errc: if isHaltErr(w.ctx, err) || toErr(w.ctx, err) == v3rpc.ErrNoLeader { closeErr = err @@ -490,43 +489,41 @@ func (w *watchGrpcStream) run() { if wc, closeErr = w.newWatchClient(); closeErr != nil { return } - curReqC = w.reqc - if pendingReq != nil { - failedReq = pendingReq + if ws := w.nextResume(); ws != nil { + wc.Send(ws.initReq.toPB()) } cancelSet = make(map[int64]struct{}) - case <-stopc: + case <-w.stopc: return case ws := <-w.closingc: - if w.closeStream(ws) { + w.closeSubstream(ws) + delete(closing, ws) + if len(w.substreams)+len(w.resuming) == 0 { + // no more watchers on this stream, shutdown return } } - - // send failed; queue for retry - if failedReq != nil { - go func(wr *watchRequest) { - select { - case w.reqc <- wr: - case <-wr.ctx.Done(): - case <-w.donec: - } - }(pendingReq) - failedReq = nil - pendingReq = nil - } } } +// nextResume chooses the next resuming to register with the grpc stream. Abandoned +// streams are marked as nil in the queue since the head must wait for its inflight registration. +func (w *watchGrpcStream) nextResume() *watcherStream { + for len(w.resuming) != 0 { + if w.resuming[0] != nil { + return w.resuming[0] + } + w.resuming = w.resuming[1:len(w.resuming)] + } + return nil +} + // dispatchEvent sends a WatchResponse to the appropriate watcher stream func (w *watchGrpcStream) dispatchEvent(pbresp *pb.WatchResponse) bool { - w.mu.RLock() - defer w.mu.RUnlock() - ws, ok := w.streams[pbresp.WatchId] + ws, ok := w.substreams[pbresp.WatchId] if !ok { return false } - events := make([]*Event, len(pbresp.Events)) for i, ev := range pbresp.Events { events[i] = (*Event)(ev) @@ -538,7 +535,11 @@ func (w *watchGrpcStream) dispatchEvent(pbresp *pb.WatchResponse) bool { Created: pbresp.Created, Canceled: pbresp.Canceled, } - ws.recvc <- wr + select { + case ws.recvc <- wr: + case <-ws.donec: + return false + } return true } @@ -561,140 +562,138 @@ func (w *watchGrpcStream) serveWatchClient(wc pb.Watch_WatchClient) { } } -// serveStream forwards watch responses from run() to the subscriber -func (w *watchGrpcStream) serveStream(ws *watcherStream) { +// serveSubstream forwards watch responses from run() to the subscriber +func (w *watchGrpcStream) serveSubstream(ws *watcherStream, resumec chan struct{}) { + if ws.closing { + panic("created substream goroutine but substream is closing") + } + + // nextRev is the minimum expected next revision + nextRev := ws.initReq.rev + resuming := false defer func() { - // signal that this watcherStream is finished - select { - case w.closingc <- ws: - case <-w.donec: - w.closeStream(ws) + if !resuming { + ws.closing = true + } + close(ws.donec) + if !resuming { + w.closingc <- ws } }() - var closeErr error emptyWr := &WatchResponse{} - wrs := []*WatchResponse{} - resuming := false - closing := false - for !closing { + for { curWr := emptyWr outc := ws.outc - // ignore created event if create notify is not requested or - // we already sent the initial created event (when we are on the resume path). - if len(wrs) > 0 && wrs[0].Created && - (!ws.initReq.createdNotify || ws.lastRev != 0) { - wrs = wrs[1:] - } - - if len(wrs) > 0 { - curWr = wrs[0] + if len(ws.buf) > 0 { + curWr = ws.buf[0] } else { outc = nil } select { case outc <- *curWr: - if wrs[0].Err() != nil { - closing = true - break - } - var newRev int64 - if len(wrs[0].Events) > 0 { - newRev = wrs[0].Events[len(wrs[0].Events)-1].Kv.ModRevision - } else { - newRev = wrs[0].Header.Revision - } - if newRev != ws.lastRev { - ws.lastRev = newRev - } - wrs[0] = nil - wrs = wrs[1:] - case wr, ok := <-ws.recvc: - if !ok { - // shutdown from closeStream + if ws.buf[0].Err() != nil { return } - // resume up to last seen event if disconnected - if resuming && wr.Err() == nil { - resuming = false - // trim events already seen - for i := 0; i < len(wr.Events); i++ { - if wr.Events[i].Kv.ModRevision > ws.lastRev { - wr.Events = wr.Events[i:] - break + ws.buf[0] = nil + ws.buf = ws.buf[1:] + case wr, ok := <-ws.recvc: + if !ok { + // shutdown from closeSubstream + return + } + + if wr.Created { + if ws.initReq.retc != nil { + ws.initReq.retc <- ws.outc + // to prevent next write from taking the slot in buffered channel + // and posting duplicate create events + ws.initReq.retc = nil + + // send first creation event only if requested + if ws.initReq.createdNotify { + ws.outc <- *wr } } - // only forward new events - if wr.Events[0].Kv.ModRevision == ws.lastRev { - break - } } - resuming = false - // TODO don't keep buffering if subscriber stops reading - wrs = append(wrs, wr) - case resumeRev := <-ws.resumec: - wrs = nil - resuming = true - if resumeRev == -1 { - // pause serving stream while resume gets set up - break + + nextRev = wr.Header.Revision + if len(wr.Events) > 0 { + nextRev = wr.Events[len(wr.Events)-1].Kv.ModRevision + 1 } - if resumeRev != ws.lastRev { - panic("unexpected resume revision") + ws.initReq.rev = nextRev + + // created event is already sent above, + // watcher should not post duplicate events + if wr.Created { + continue } - case <-w.donec: - closing = true - closeErr = w.closeErr + + // TODO pause channel if buffer gets too large + ws.buf = append(ws.buf, wr) + case <-ws.initReq.ctx.Done(): - closing = true + return + case <-resumec: + resuming = true + return } } - - // try to send off close error - if closeErr != nil { - select { - case ws.outc <- WatchResponse{closeErr: w.closeErr}: - case <-w.donec: - case <-time.After(closeSendErrTimeout): - } - } - // lazily send cancel message if events on missing id } func (w *watchGrpcStream) newWatchClient() (pb.Watch_WatchClient, error) { - ws, rerr := w.resume() - if rerr != nil { - return nil, rerr + // connect to grpc stream + wc, err := w.openWatchClient() + if err != nil { + return nil, v3rpc.Error(err) } - go w.serveWatchClient(ws) - return ws, nil -} - -// resume creates a new WatchClient with all current watchers reestablished -func (w *watchGrpcStream) resume() (ws pb.Watch_WatchClient, err error) { - for { - if ws, err = w.openWatchClient(); err != nil { - break - } else if err = w.resumeWatchers(ws); err == nil { - break + // mark all substreams as resuming + if len(w.substreams)+len(w.resuming) > 0 { + close(w.resumec) + w.resumec = make(chan struct{}) + w.joinSubstreams() + for _, ws := range w.substreams { + ws.id = -1 + w.resuming = append(w.resuming, ws) + } + for _, ws := range w.resuming { + if ws == nil || ws.closing { + continue + } + ws.donec = make(chan struct{}) + go w.serveSubstream(ws, w.resumec) + } + } + w.substreams = make(map[int64]*watcherStream) + // receive data from new grpc stream + go w.serveWatchClient(wc) + return wc, nil +} + +// joinSubstream waits for all substream goroutines to complete +func (w *watchGrpcStream) joinSubstreams() { + for _, ws := range w.substreams { + <-ws.donec + } + for _, ws := range w.resuming { + if ws != nil { + <-ws.donec } } - return ws, v3rpc.Error(err) } // openWatchClient retries opening a watchclient until retryConnection fails func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error) { for { - w.mu.Lock() - stopc := w.stopc - w.mu.Unlock() - if stopc == nil { + select { + case <-w.stopc: if err == nil { - err = context.Canceled + return nil, context.Canceled } return nil, err + default: } if ws, err = w.remote.Watch(w.ctx, grpc.FailFast(false)); ws != nil && err == nil { break @@ -706,63 +705,6 @@ func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error) return ws, nil } -// resumeWatchers rebuilds every registered watcher on a new client -func (w *watchGrpcStream) resumeWatchers(wc pb.Watch_WatchClient) error { - w.mu.RLock() - streams := make([]*watcherStream, 0, len(w.streams)) - for _, ws := range w.streams { - streams = append(streams, ws) - } - w.mu.RUnlock() - - for _, ws := range streams { - // drain recvc so no old WatchResponses (e.g., Created messages) - // are processed while resuming - ws.drain() - - // pause serveStream - ws.resumec <- -1 - - // reconstruct watcher from initial request - if ws.lastRev != 0 { - ws.initReq.rev = ws.lastRev - } - if err := wc.Send(ws.initReq.toPB()); err != nil { - return err - } - - // wait for request ack - resp, err := wc.Recv() - if err != nil { - return err - } else if len(resp.Events) != 0 || !resp.Created { - return fmt.Errorf("watcher: unexpected response (%+v)", resp) - } - - // id may be different since new remote watcher; update map - w.mu.Lock() - delete(w.streams, ws.id) - ws.id = resp.WatchId - w.streams[ws.id] = ws - w.mu.Unlock() - - // unpause serveStream - ws.resumec <- ws.lastRev - } - return nil -} - -// drain removes all buffered WatchResponses from the stream's receive channel. -func (ws *watcherStream) drain() { - for { - select { - case <-ws.recvc: - default: - return - } - } -} - // toPB converts an internal watch request structure to its protobuf messagefunc (wr *watchRequest) func (wr *watchRequest) toPB() *pb.WatchRequest { req := &pb.WatchCreateRequest{ diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go index 81008b4c..a6985527 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go +++ b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go @@ -104,9 +104,9 @@ import ( proto "github.com/golang/protobuf/proto" math "math" -) -import io "io" + io "io" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -1013,30 +1013,31 @@ var ( ) var fileDescriptorEtcdserver = []byte{ - // 396 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x5c, 0x92, 0x4f, 0xae, 0xd3, 0x30, - 0x10, 0xc6, 0x9b, 0xc4, 0x4d, 0x1b, 0xf3, 0x80, 0x87, 0x55, 0xa1, 0xd1, 0x13, 0x0a, 0xa8, 0x62, - 0xc1, 0x0a, 0xee, 0xf0, 0x68, 0x17, 0x95, 0x00, 0x95, 0x16, 0xc1, 0xda, 0x24, 0x43, 0x6b, 0xa9, - 0x89, 0x53, 0xdb, 0x09, 0xbd, 0x01, 0x57, 0xe0, 0x48, 0x5d, 0x72, 0x02, 0xc4, 0x9f, 0x8b, 0x60, - 0xbb, 0x4d, 0x31, 0x2c, 0x2c, 0x45, 0xbf, 0xef, 0x9b, 0xf1, 0xe7, 0x99, 0xd0, 0x6b, 0x34, 0x45, - 0xa9, 0x51, 0x75, 0xa8, 0x9e, 0x37, 0x4a, 0x1a, 0xc9, 0xae, 0xfe, 0x92, 0xe6, 0xe3, 0xcd, 0x64, - 0x23, 0x37, 0xd2, 0x0b, 0x2f, 0xdc, 0xd7, 0xc9, 0x33, 0xfd, 0x42, 0xe8, 0x68, 0x85, 0xfb, 0x16, - 0xb5, 0x61, 0x13, 0x1a, 0x2f, 0x66, 0x10, 0x3d, 0x89, 0x9e, 0x91, 0x5b, 0x72, 0xfc, 0xfe, 0x78, - 0xb0, 0x8a, 0xc5, 0x8c, 0x3d, 0xa2, 0xe9, 0x6b, 0x34, 0x5b, 0x59, 0x42, 0x6c, 0x95, 0xec, 0xac, - 0xa4, 0x95, 0x67, 0x0c, 0x28, 0x59, 0x72, 0xb3, 0x85, 0x24, 0xd0, 0x48, 0x63, 0x09, 0x7b, 0x48, - 0x93, 0xf7, 0x7c, 0x07, 0x24, 0x10, 0x92, 0x8e, 0xef, 0x1c, 0x9f, 0x09, 0x05, 0x43, 0xcb, 0xc7, - 0x3d, 0x2f, 0x85, 0x62, 0x53, 0x9a, 0x2d, 0x15, 0x76, 0xb6, 0xa6, 0x45, 0x48, 0x83, 0xaa, 0xac, - 0xe9, 0x71, 0xef, 0x59, 0xd4, 0x25, 0x1e, 0x60, 0x14, 0x04, 0xf5, 0x1e, 0x8f, 0x7b, 0xcf, 0xfc, - 0x20, 0xb4, 0x81, 0xf1, 0xe5, 0x96, 0xe8, 0xe4, 0xf1, 0x98, 0x3d, 0xa5, 0x74, 0x7e, 0x68, 0x84, - 0xe2, 0x46, 0xc8, 0x1a, 0x32, 0x6b, 0x4a, 0xce, 0x8d, 0x28, 0x5e, 0xb8, 0x7b, 0xdb, 0x07, 0x2e, - 0x0c, 0xd0, 0x20, 0x2a, 0xf9, 0x6c, 0x09, 0xbb, 0xa1, 0xc3, 0xb5, 0xa8, 0x0b, 0x84, 0x3b, 0x41, - 0x86, 0xa1, 0x76, 0xc8, 0xdd, 0xbf, 0xc2, 0xa2, 0x55, 0x5a, 0x74, 0x08, 0x57, 0x41, 0x69, 0xa6, - 0x7a, 0xec, 0x66, 0xba, 0x96, 0xca, 0x60, 0x09, 0x77, 0x03, 0x43, 0xaa, 0x3d, 0x73, 0xea, 0xdb, - 0x56, 0xaa, 0xb6, 0x82, 0x7b, 0xa1, 0xba, 0xf7, 0xcc, 0xa5, 0x7a, 0x27, 0x2a, 0x84, 0xfb, 0x41, - 0x6a, 0x62, 0x2c, 0xf1, 0x5d, 0x8d, 0x42, 0x5e, 0xc1, 0xf5, 0x3f, 0x5d, 0x3d, 0x63, 0xb9, 0x5b, - 0xf4, 0x27, 0x85, 0x7a, 0x0b, 0x0f, 0x82, 0xa9, 0x8c, 0xd4, 0x09, 0x4e, 0x5f, 0xd1, 0xb1, 0xdd, - 0x33, 0x2f, 0xb9, 0xe1, 0xae, 0xd3, 0x1b, 0x59, 0xe2, 0x7f, 0x7f, 0x43, 0x5a, 0x7b, 0xe6, 0x5e, - 0xf8, 0x72, 0xd7, 0x6a, 0x83, 0xca, 0x1a, 0xe2, 0x70, 0x0b, 0x45, 0x8f, 0x6f, 0x27, 0xc7, 0x9f, - 0xf9, 0xe0, 0xf8, 0x2b, 0x8f, 0xbe, 0xd9, 0xf3, 0xc3, 0x9e, 0xaf, 0xbf, 0xf3, 0xc1, 0x9f, 0x00, - 0x00, 0x00, 0xff, 0xff, 0x80, 0x62, 0xfc, 0x40, 0xa4, 0x02, 0x00, 0x00, + // 404 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x5c, 0x92, 0x41, 0x6e, 0x13, 0x31, + 0x14, 0x86, 0xe3, 0xc4, 0x99, 0x64, 0x4c, 0x81, 0x62, 0x45, 0xe8, 0xa9, 0x42, 0x43, 0x14, 0xb1, + 0xc8, 0x0a, 0xee, 0x50, 0xd2, 0x45, 0x24, 0x8a, 0x4a, 0x8a, 0xca, 0xda, 0x64, 0x1e, 0x8d, 0xa5, + 0xcc, 0x78, 0x6a, 0xbf, 0x19, 0x72, 0x03, 0xae, 0xc0, 0x91, 0xb2, 0xe4, 0x04, 0x08, 0xc2, 0x45, + 0x90, 0x3d, 0x9d, 0x60, 0xba, 0xb3, 0xbe, 0xff, 0xf7, 0xef, 0xdf, 0xf6, 0x13, 0xa7, 0x48, 0xeb, + 0xdc, 0xa1, 0x6d, 0xd0, 0xbe, 0xae, 0xac, 0x21, 0x23, 0x4f, 0xfe, 0x91, 0xea, 0xf3, 0xd9, 0xe4, + 0xd6, 0xdc, 0x9a, 0x20, 0xbc, 0xf1, 0xab, 0xd6, 0x33, 0xfb, 0xc6, 0xc5, 0x68, 0x85, 0x77, 0x35, + 0x3a, 0x92, 0x13, 0xd1, 0x5f, 0x2e, 0x80, 0x4d, 0xd9, 0x9c, 0x9f, 0xf3, 0xfd, 0xcf, 0x97, 0xbd, + 0x55, 0x5f, 0x2f, 0xe4, 0x0b, 0x91, 0x5c, 0x22, 0x6d, 0x4c, 0x0e, 0xfd, 0x29, 0x9b, 0xa7, 0xf7, + 0x4a, 0x52, 0x04, 0x26, 0x41, 0xf0, 0x2b, 0x45, 0x1b, 0x18, 0x44, 0x1a, 0xaf, 0x14, 0x6d, 0xe4, + 0x73, 0x31, 0xb8, 0x51, 0x5b, 0xe0, 0x91, 0x30, 0x68, 0xd4, 0xd6, 0xf3, 0x85, 0xb6, 0x30, 0x9c, + 0xb2, 0xf9, 0xb8, 0xe3, 0xb9, 0xb6, 0x72, 0x26, 0xd2, 0x2b, 0x8b, 0xcd, 0x8d, 0xda, 0xd6, 0x08, + 0x49, 0xb4, 0x2b, 0xad, 0x3a, 0xdc, 0x79, 0x96, 0x65, 0x8e, 0x3b, 0x18, 0x45, 0x45, 0x83, 0x27, + 0xe0, 0xce, 0x73, 0xb1, 0xd3, 0x8e, 0x60, 0x7c, 0x3c, 0x85, 0xb5, 0x9e, 0x80, 0xe5, 0x2b, 0x21, + 0x2e, 0x76, 0x95, 0xb6, 0x8a, 0xb4, 0x29, 0x21, 0x9d, 0xb2, 0xf9, 0xe0, 0x3e, 0x48, 0xe0, 0x91, + 0xfb, 0xbb, 0x7d, 0x52, 0x9a, 0x40, 0x44, 0x55, 0xf9, 0x57, 0xa5, 0x49, 0x9e, 0x89, 0xe1, 0xb5, + 0x2e, 0xd7, 0x08, 0x8f, 0xa2, 0x0e, 0x43, 0xe7, 0x91, 0x3f, 0x7f, 0x85, 0xeb, 0xda, 0x3a, 0xdd, + 0x20, 0x9c, 0x44, 0x5b, 0x53, 0xdb, 0x61, 0xff, 0xa6, 0xd7, 0xc6, 0x12, 0xe6, 0xf0, 0x38, 0x32, + 0x24, 0x2e, 0x30, 0xaf, 0x7e, 0xa8, 0x8d, 0xad, 0x0b, 0x78, 0x12, 0xab, 0x77, 0x81, 0xf9, 0x56, + 0x1f, 0x75, 0x81, 0xf0, 0x34, 0x6a, 0xcd, 0x49, 0x17, 0x6d, 0x2a, 0x59, 0x54, 0x05, 0x9c, 0xfe, + 0x97, 0x1a, 0x98, 0xcc, 0xfc, 0x47, 0x7f, 0xb1, 0xe8, 0x36, 0xf0, 0x2c, 0x7a, 0x95, 0x91, 0x6d, + 0xe1, 0xec, 0x9d, 0x18, 0x5f, 0x22, 0xa9, 0x5c, 0x91, 0xf2, 0x49, 0xef, 0x4d, 0x8e, 0x0f, 0xa6, + 0x21, 0x29, 0x03, 0xf3, 0x37, 0x7c, 0xbb, 0xad, 0x1d, 0xa1, 0x5d, 0x2e, 0xc2, 0x50, 0x1c, 0x7f, + 0x61, 0xdd, 0xe1, 0xf3, 0xc9, 0xfe, 0x77, 0xd6, 0xdb, 0x1f, 0x32, 0xf6, 0xe3, 0x90, 0xb1, 0x5f, + 0x87, 0x8c, 0x7d, 0xff, 0x93, 0xf5, 0xfe, 0x06, 0x00, 0x00, 0xff, 0xff, 0x80, 0x62, 0xfc, 0x40, + 0xa4, 0x02, 0x00, 0x00, } diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go index 84c114ff..6b3c71e1 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go +++ b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go @@ -10,9 +10,9 @@ import ( proto "github.com/golang/protobuf/proto" math "math" -) -import io "io" + io "io" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -2035,57 +2035,58 @@ var ( ) var fileDescriptorRaftInternal = []byte{ - // 831 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x7c, 0x56, 0xdb, 0x4e, 0xdb, 0x4a, - 0x14, 0x25, 0xe1, 0x9a, 0x49, 0xc8, 0xe1, 0x0c, 0x70, 0xce, 0x9c, 0x20, 0x71, 0x20, 0xa8, 0xf7, - 0x96, 0x56, 0xf0, 0x01, 0x6d, 0x4a, 0x10, 0x20, 0xa1, 0x0a, 0x59, 0x54, 0xaa, 0xd4, 0x07, 0x77, - 0x88, 0xa7, 0xc1, 0xad, 0x63, 0xbb, 0xf6, 0x84, 0xd2, 0x3f, 0xe9, 0x67, 0xf4, 0xf6, 0x11, 0x3c, - 0xf4, 0x42, 0xdb, 0x1f, 0xe8, 0xe5, 0xa5, 0x4f, 0x7d, 0x69, 0x3f, 0xa0, 0x73, 0xf3, 0xd8, 0x4e, - 0xc6, 0x3c, 0x44, 0xb2, 0xd7, 0xac, 0xbd, 0xd6, 0x1e, 0xef, 0x35, 0x0c, 0x60, 0x36, 0xc2, 0x0f, - 0xa9, 0xed, 0xfa, 0x94, 0x44, 0x3e, 0xf6, 0x56, 0xc3, 0x28, 0xa0, 0x01, 0xac, 0x11, 0xda, 0x71, - 0x62, 0x12, 0x1d, 0x91, 0x28, 0x3c, 0x68, 0xcc, 0x75, 0x83, 0x6e, 0x20, 0x16, 0xae, 0xf3, 0x27, - 0xc9, 0x69, 0xcc, 0xa4, 0x1c, 0x85, 0x54, 0xa2, 0xb0, 0x23, 0x1f, 0x9b, 0x0f, 0xc0, 0xb4, 0x45, - 0x9e, 0xf4, 0x49, 0x4c, 0xb7, 0x09, 0x76, 0x48, 0x04, 0xeb, 0xa0, 0xbc, 0xd3, 0x46, 0xa5, 0xa5, - 0xd2, 0xc5, 0x31, 0xab, 0xec, 0xb6, 0x61, 0x03, 0x4c, 0xf5, 0x63, 0x6e, 0xd9, 0x23, 0xa8, 0xcc, - 0xd0, 0x8a, 0xa5, 0xdf, 0xe1, 0x0a, 0x98, 0xc6, 0x7d, 0x7a, 0x68, 0x47, 0xe4, 0xc8, 0x8d, 0xdd, - 0xc0, 0x47, 0xa3, 0xa2, 0xac, 0xc6, 0x41, 0x4b, 0x61, 0xcd, 0x9f, 0x75, 0x30, 0xbb, 0xa3, 0xba, - 0xb6, 0xd8, 0x16, 0x94, 0xdd, 0x90, 0xd1, 0x39, 0x50, 0x3e, 0x5a, 0x13, 0x16, 0xd5, 0xb5, 0xf9, - 0xd5, 0xec, 0xbe, 0x56, 0x55, 0x89, 0xc5, 0x08, 0xf0, 0x06, 0x18, 0x8f, 0xb0, 0xdf, 0x25, 0xc2, - 0xab, 0xba, 0xd6, 0x18, 0x60, 0xf2, 0xa5, 0x84, 0x2e, 0x89, 0xf0, 0x32, 0x18, 0x0d, 0xfb, 0x14, - 0x8d, 0x09, 0x3e, 0xca, 0xf3, 0xf7, 0xfa, 0x49, 0x3f, 0x16, 0x27, 0xc1, 0x0d, 0x50, 0x73, 0x88, - 0x47, 0x28, 0xb1, 0xa5, 0xc9, 0xb8, 0x28, 0x5a, 0xca, 0x17, 0xb5, 0x05, 0x23, 0x67, 0x55, 0x75, - 0x52, 0x8c, 0x1b, 0xd2, 0x63, 0x1f, 0x4d, 0x98, 0x0c, 0xf7, 0x8f, 0x7d, 0x6d, 0xc8, 0x48, 0xf0, - 0x26, 0x00, 0x9d, 0xa0, 0x17, 0xe2, 0x0e, 0xe5, 0xdf, 0x6f, 0x52, 0x94, 0xfc, 0x9f, 0x2f, 0xd9, - 0xd0, 0xeb, 0x49, 0x65, 0xa6, 0x04, 0xde, 0x02, 0x55, 0x8f, 0xe0, 0x98, 0xd8, 0x5d, 0xd6, 0x31, - 0x45, 0x53, 0x26, 0x85, 0x5d, 0x4e, 0xd8, 0xe2, 0xeb, 0x5a, 0xc1, 0xd3, 0x10, 0xdf, 0xb3, 0x54, - 0x60, 0x63, 0x0c, 0x1e, 0x13, 0x54, 0x31, 0xed, 0x59, 0x48, 0x58, 0x82, 0xa0, 0xf7, 0xec, 0xa5, - 0x18, 0x1f, 0x0b, 0xf6, 0x70, 0xd4, 0x43, 0xc0, 0x34, 0x96, 0x16, 0x5f, 0xd2, 0x63, 0x11, 0x44, - 0xb8, 0x0e, 0x26, 0x0e, 0x45, 0xe4, 0x90, 0x23, 0x4a, 0x16, 0x8c, 0x33, 0x97, 0xa9, 0xb4, 0x14, - 0x15, 0xb6, 0x40, 0x55, 0x24, 0x8e, 0xf8, 0xf8, 0xc0, 0x23, 0xe8, 0x87, 0xf1, 0x83, 0xb5, 0x18, - 0x63, 0x53, 0x10, 0xf4, 0x76, 0xb1, 0x86, 0x60, 0x1b, 0x88, 0x7c, 0xda, 0x8e, 0x1b, 0x0b, 0x8d, - 0x5f, 0x93, 0xa6, 0xfd, 0x72, 0x8d, 0xb6, 0x64, 0xe8, 0xfd, 0xe2, 0x14, 0x83, 0x77, 0xa4, 0x0a, - 0xf1, 0xa9, 0xdb, 0xc1, 0x94, 0xa0, 0xdf, 0x52, 0xe5, 0x52, 0x5e, 0x25, 0xc9, 0x7d, 0x2b, 0x43, - 0x4d, 0xe4, 0x72, 0xf5, 0x70, 0x53, 0x1d, 0x25, 0x7e, 0xb6, 0x6c, 0xec, 0x38, 0xe8, 0xed, 0x54, - 0x51, 0x5b, 0x77, 0xd9, 0x5b, 0xcb, 0x71, 0x72, 0x6d, 0x29, 0x8c, 0xb5, 0x35, 0x93, 0xca, 0xc8, - 0x4c, 0xa2, 0x77, 0x52, 0x69, 0xc5, 0xac, 0xa4, 0xc2, 0xac, 0xc4, 0xea, 0x38, 0x07, 0xe7, 0xdb, - 0xea, 0x12, 0x8a, 0xde, 0x9f, 0xd9, 0xd6, 0x16, 0xa1, 0x43, 0x6d, 0x31, 0x0c, 0x76, 0xc1, 0x7f, - 0xa9, 0x4c, 0xe7, 0x90, 0x9f, 0x12, 0x3b, 0xc4, 0x71, 0xfc, 0x34, 0x88, 0x1c, 0xf4, 0x41, 0x4a, - 0x5e, 0x31, 0x4b, 0x6e, 0x08, 0xf6, 0x9e, 0x22, 0x27, 0xea, 0xff, 0x60, 0xe3, 0x32, 0xbc, 0x07, - 0xe6, 0x32, 0xfd, 0xf2, 0x78, 0xdb, 0x51, 0xc0, 0x86, 0x7c, 0x2a, 0x3d, 0xce, 0x17, 0xb4, 0x2d, - 0x8e, 0x46, 0x90, 0x8e, 0xfa, 0x6f, 0x3c, 0xb8, 0x02, 0xef, 0x83, 0xf9, 0x54, 0x59, 0x9e, 0x14, - 0x29, 0xfd, 0x51, 0x4a, 0x5f, 0x30, 0x4b, 0xab, 0x23, 0x93, 0xd1, 0x86, 0x78, 0x68, 0x09, 0x6e, - 0x83, 0x7a, 0x2a, 0xee, 0xb9, 0x31, 0x45, 0x9f, 0xa4, 0xea, 0xb2, 0x59, 0x75, 0x97, 0x51, 0x72, - 0x39, 0x4a, 0x40, 0xad, 0xc4, 0x5b, 0x93, 0x4a, 0x9f, 0x0b, 0x95, 0xb8, 0xf5, 0x90, 0x52, 0x02, - 0xea, 0xd1, 0x0b, 0x25, 0x9e, 0xc8, 0x17, 0x95, 0xa2, 0xd1, 0xf3, 0x9a, 0xc1, 0x44, 0x2a, 0x4c, - 0x27, 0x52, 0xc8, 0xa8, 0x44, 0xbe, 0xac, 0x14, 0x25, 0x92, 0x57, 0x19, 0x12, 0x99, 0xc2, 0xf9, - 0xb6, 0x78, 0x22, 0x5f, 0x9d, 0xd9, 0xd6, 0x60, 0x22, 0x15, 0x06, 0x1f, 0x81, 0x46, 0x46, 0x46, - 0x04, 0x25, 0x24, 0x51, 0xcf, 0x8d, 0xc5, 0x3d, 0xf6, 0x5a, 0x6a, 0x5e, 0x2d, 0xd0, 0xe4, 0xf4, - 0x3d, 0xcd, 0x4e, 0xf4, 0xff, 0xc5, 0xe6, 0x75, 0xd8, 0x03, 0x0b, 0xa9, 0x97, 0x8a, 0x4e, 0xc6, - 0xec, 0x8d, 0x34, 0xbb, 0x66, 0x36, 0x93, 0x29, 0x19, 0x76, 0x43, 0xb8, 0x80, 0xd0, 0xfc, 0x0b, - 0x4c, 0x6f, 0xf6, 0x42, 0xfa, 0xcc, 0x22, 0x71, 0x18, 0xf8, 0x31, 0x69, 0x86, 0x60, 0xe1, 0x8c, - 0x3f, 0x44, 0x10, 0x82, 0x31, 0x71, 0xbb, 0x97, 0xc4, 0xed, 0x2e, 0x9e, 0xf9, 0xad, 0xaf, 0xcf, - 0xa7, 0xba, 0xf5, 0x93, 0x77, 0xb8, 0x0c, 0x6a, 0xb1, 0xdb, 0x0b, 0xd9, 0x5e, 0x28, 0x33, 0x96, - 0x97, 0x7e, 0xc5, 0xaa, 0x4a, 0x6c, 0x9f, 0x43, 0xb7, 0xe7, 0x4e, 0xbe, 0x2e, 0x8e, 0x9c, 0x7c, - 0x5b, 0x2c, 0x9d, 0xb2, 0xdf, 0x17, 0xf6, 0x7b, 0xfe, 0x7d, 0x71, 0xe4, 0x60, 0x42, 0xfc, 0xcb, - 0xb1, 0xfe, 0x27, 0x00, 0x00, 0xff, 0xff, 0x02, 0x23, 0xd2, 0x00, 0xca, 0x08, 0x00, 0x00, + // 837 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x7c, 0x96, 0xdd, 0x52, 0xd3, 0x40, + 0x14, 0xc7, 0x69, 0xf9, 0xec, 0xb6, 0x54, 0x5c, 0x40, 0xd7, 0x32, 0x53, 0xa1, 0x8c, 0x8a, 0x5f, + 0xe8, 0x94, 0x07, 0xd0, 0x4a, 0x19, 0x60, 0x86, 0x61, 0x98, 0x0c, 0xce, 0x38, 0xe3, 0x45, 0x5c, + 0x9a, 0x43, 0x1b, 0x49, 0x93, 0xb8, 0xd9, 0x56, 0x7c, 0x13, 0x1f, 0xc3, 0xaf, 0x87, 0xe0, 0xc2, + 0x0f, 0xd4, 0x17, 0x50, 0xbc, 0xf1, 0xca, 0x1b, 0x7d, 0x00, 0x67, 0x3f, 0x92, 0x34, 0x6d, 0xca, + 0x5d, 0x72, 0xce, 0xff, 0xfc, 0xce, 0xd9, 0xec, 0x7f, 0xbb, 0x45, 0xb3, 0x8c, 0x1e, 0x72, 0xd3, + 0x76, 0x39, 0x30, 0x97, 0x3a, 0xab, 0x3e, 0xf3, 0xb8, 0x87, 0x0b, 0xc0, 0x1b, 0x56, 0x00, 0xac, + 0x0b, 0xcc, 0x3f, 0x28, 0xcd, 0x35, 0xbd, 0xa6, 0x27, 0x13, 0xf7, 0xc4, 0x93, 0xd2, 0x94, 0x66, + 0x62, 0x8d, 0x8e, 0xe4, 0x98, 0xdf, 0x50, 0x8f, 0x95, 0x67, 0x68, 0xda, 0x80, 0x17, 0x1d, 0x08, + 0xf8, 0x16, 0x50, 0x0b, 0x18, 0x2e, 0xa2, 0xec, 0x76, 0x9d, 0x64, 0x16, 0x33, 0x2b, 0x63, 0x46, + 0xd6, 0xae, 0xe3, 0x12, 0x9a, 0xea, 0x04, 0xa2, 0x65, 0x1b, 0x48, 0x76, 0x31, 0xb3, 0x92, 0x33, + 0xa2, 0x77, 0xbc, 0x8c, 0xa6, 0x69, 0x87, 0xb7, 0x4c, 0x06, 0x5d, 0x3b, 0xb0, 0x3d, 0x97, 0x8c, + 0xca, 0xb2, 0x82, 0x08, 0x1a, 0x3a, 0x56, 0xf9, 0x53, 0x44, 0xb3, 0xdb, 0x7a, 0x6a, 0x83, 0x1e, + 0x72, 0xdd, 0x6e, 0xa0, 0xd1, 0x35, 0x94, 0xed, 0x56, 0x65, 0x8b, 0x7c, 0x75, 0x7e, 0xb5, 0x77, + 0x5d, 0xab, 0xba, 0xc4, 0xc8, 0x76, 0xab, 0xf8, 0x3e, 0x1a, 0x67, 0xd4, 0x6d, 0x82, 0xec, 0x95, + 0xaf, 0x96, 0xfa, 0x94, 0x22, 0x15, 0xca, 0x95, 0x10, 0xdf, 0x42, 0xa3, 0x7e, 0x87, 0x93, 0x31, + 0xa9, 0x27, 0x49, 0xfd, 0x5e, 0x27, 0x9c, 0xc7, 0x10, 0x22, 0xbc, 0x8e, 0x0a, 0x16, 0x38, 0xc0, + 0xc1, 0x54, 0x4d, 0xc6, 0x65, 0xd1, 0x62, 0xb2, 0xa8, 0x2e, 0x15, 0x89, 0x56, 0x79, 0x2b, 0x8e, + 0x89, 0x86, 0xfc, 0xd8, 0x25, 0x13, 0x69, 0x0d, 0xf7, 0x8f, 0xdd, 0xa8, 0x21, 0x3f, 0x76, 0xf1, + 0x03, 0x84, 0x1a, 0x5e, 0xdb, 0xa7, 0x0d, 0x2e, 0xbe, 0xdf, 0xa4, 0x2c, 0xb9, 0x9a, 0x2c, 0x59, + 0x8f, 0xf2, 0x61, 0x65, 0x4f, 0x09, 0x7e, 0x88, 0xf2, 0x0e, 0xd0, 0x00, 0xcc, 0x26, 0xa3, 0x2e, + 0x27, 0x53, 0x69, 0x84, 0x1d, 0x21, 0xd8, 0x14, 0xf9, 0x88, 0xe0, 0x44, 0x21, 0xb1, 0x66, 0x45, + 0x60, 0xd0, 0xf5, 0x8e, 0x80, 0xe4, 0xd2, 0xd6, 0x2c, 0x11, 0x86, 0x14, 0x44, 0x6b, 0x76, 0xe2, + 0x98, 0xd8, 0x16, 0xea, 0x50, 0xd6, 0x26, 0x28, 0x6d, 0x5b, 0x6a, 0x22, 0x15, 0x6d, 0x8b, 0x14, + 0xe2, 0x35, 0x34, 0xd1, 0x92, 0x96, 0x23, 0x96, 0x2c, 0x59, 0x48, 0xdd, 0x73, 0xe5, 0x4a, 0x43, + 0x4b, 0x71, 0x0d, 0xe5, 0xa5, 0xe3, 0xc0, 0xa5, 0x07, 0x0e, 0x90, 0xdf, 0xa9, 0x1f, 0xac, 0xd6, + 0xe1, 0xad, 0x0d, 0x29, 0x88, 0x96, 0x4b, 0xa3, 0x10, 0xae, 0x23, 0xe9, 0x4f, 0xd3, 0xb2, 0x03, + 0xc9, 0xf8, 0x3b, 0x99, 0xb6, 0x5e, 0xc1, 0xa8, 0x2b, 0x45, 0xb4, 0x5e, 0x1a, 0xc7, 0xf0, 0xae, + 0xa2, 0x80, 0xcb, 0xed, 0x06, 0xe5, 0x40, 0xfe, 0x29, 0xca, 0xcd, 0x24, 0x25, 0xf4, 0x7d, 0xad, + 0x47, 0x1a, 0xe2, 0x12, 0xf5, 0x78, 0x43, 0x1f, 0x25, 0x71, 0xb6, 0x4c, 0x6a, 0x59, 0xe4, 0xe3, + 0xd4, 0xb0, 0xb1, 0x1e, 0x07, 0xc0, 0x6a, 0x96, 0x95, 0x18, 0x4b, 0xc7, 0xf0, 0x2e, 0x9a, 0x89, + 0x31, 0xca, 0x93, 0xe4, 0x93, 0x22, 0x2d, 0xa7, 0x93, 0xb4, 0x99, 0x35, 0xac, 0x48, 0x13, 0xe1, + 0xe4, 0x58, 0x4d, 0xe0, 0xe4, 0xf3, 0xb9, 0x63, 0x6d, 0x02, 0x1f, 0x18, 0x6b, 0x13, 0x38, 0x6e, + 0xa2, 0x2b, 0x31, 0xa6, 0xd1, 0x12, 0xa7, 0xc4, 0xf4, 0x69, 0x10, 0xbc, 0xf4, 0x98, 0x45, 0xbe, + 0x28, 0xe4, 0xed, 0x74, 0xe4, 0xba, 0x54, 0xef, 0x69, 0x71, 0x48, 0xbf, 0x44, 0x53, 0xd3, 0xf8, + 0x09, 0x9a, 0xeb, 0x99, 0x57, 0xd8, 0xdb, 0x64, 0x9e, 0x03, 0xe4, 0x54, 0xf5, 0xb8, 0x3e, 0x64, + 0x6c, 0x79, 0x34, 0xbc, 0x78, 0xab, 0x2f, 0xd2, 0xfe, 0x0c, 0x7e, 0x8a, 0xe6, 0x63, 0xb2, 0x3a, + 0x29, 0x0a, 0xfd, 0x55, 0xa1, 0x6f, 0xa4, 0xa3, 0xf5, 0x91, 0xe9, 0x61, 0x63, 0x3a, 0x90, 0xc2, + 0x5b, 0xa8, 0x18, 0xc3, 0x1d, 0x3b, 0xe0, 0xe4, 0x9b, 0xa2, 0x2e, 0xa5, 0x53, 0x77, 0xec, 0x80, + 0x27, 0x7c, 0x14, 0x06, 0x23, 0x92, 0x18, 0x4d, 0x91, 0xbe, 0x0f, 0x25, 0x89, 0xd6, 0x03, 0xa4, + 0x30, 0x18, 0x6d, 0xbd, 0x24, 0x09, 0x47, 0xbe, 0xc9, 0x0d, 0xdb, 0x7a, 0x51, 0xd3, 0xef, 0x48, + 0x1d, 0x8b, 0x1c, 0x29, 0x31, 0xda, 0x91, 0x6f, 0x73, 0xc3, 0x1c, 0x29, 0xaa, 0x52, 0x1c, 0x19, + 0x87, 0x93, 0x63, 0x09, 0x47, 0xbe, 0x3b, 0x77, 0xac, 0x7e, 0x47, 0xea, 0x18, 0x7e, 0x8e, 0x4a, + 0x3d, 0x18, 0x69, 0x14, 0x1f, 0x58, 0xdb, 0x0e, 0xe4, 0x3d, 0xf6, 0x5e, 0x31, 0xef, 0x0c, 0x61, + 0x0a, 0xf9, 0x5e, 0xa4, 0x0e, 0xf9, 0x97, 0x69, 0x7a, 0x1e, 0xb7, 0xd1, 0x42, 0xdc, 0x4b, 0x5b, + 0xa7, 0xa7, 0xd9, 0x07, 0xd5, 0xec, 0x6e, 0x7a, 0x33, 0xe5, 0x92, 0xc1, 0x6e, 0x84, 0x0e, 0x11, + 0x54, 0x2e, 0xa0, 0xe9, 0x8d, 0xb6, 0xcf, 0x5f, 0x19, 0x10, 0xf8, 0x9e, 0x1b, 0x40, 0xc5, 0x47, + 0x0b, 0xe7, 0xfc, 0x10, 0x61, 0x8c, 0xc6, 0xe4, 0xed, 0x9e, 0x91, 0xb7, 0xbb, 0x7c, 0x16, 0xb7, + 0x7e, 0x74, 0x3e, 0xf5, 0xad, 0x1f, 0xbe, 0xe3, 0x25, 0x54, 0x08, 0xec, 0xb6, 0xef, 0x80, 0xc9, + 0xbd, 0x23, 0x50, 0x97, 0x7e, 0xce, 0xc8, 0xab, 0xd8, 0xbe, 0x08, 0x3d, 0x9a, 0x3b, 0xf9, 0x59, + 0x1e, 0x39, 0x39, 0x2b, 0x67, 0x4e, 0xcf, 0xca, 0x99, 0x1f, 0x67, 0xe5, 0xcc, 0xeb, 0x5f, 0xe5, + 0x91, 0x83, 0x09, 0xf9, 0x97, 0x63, 0xed, 0x7f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x02, 0x23, 0xd2, + 0x00, 0xca, 0x08, 0x00, 0x00, } diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go index 6a48b643..fdc7ffd7 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go +++ b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go @@ -11,16 +11,15 @@ import ( math "math" + mvccpb "github.com/coreos/etcd/mvcc/mvccpb" + authpb "github.com/coreos/etcd/auth/authpb" - io "io" -) - -import mvccpb "github.com/coreos/etcd/mvcc/mvccpb" - -import ( context "golang.org/x/net/context" + grpc "google.golang.org/grpc" + + io "io" ) // Reference imports to suppress errors if they are not otherwise used. @@ -15981,207 +15980,217 @@ var ( ) var fileDescriptorRpc = []byte{ - // 3228 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x3a, 0x4d, 0x73, 0x1b, 0xc7, - 0xb1, 0x5a, 0x00, 0x04, 0x88, 0x06, 0x08, 0x51, 0x43, 0x4a, 0x06, 0x57, 0x14, 0x45, 0x8d, 0x3e, - 0x6d, 0xd9, 0xc4, 0x33, 0xed, 0xf7, 0x0e, 0xef, 0xbd, 0x72, 0x15, 0x48, 0xc0, 0x32, 0x43, 0x9a, - 0x94, 0x97, 0x14, 0xed, 0x54, 0xb9, 0xc2, 0x5a, 0x02, 0x2b, 0x12, 0x45, 0x7c, 0x79, 0x77, 0x01, - 0x89, 0x4e, 0x52, 0x95, 0x72, 0xc5, 0x87, 0xe4, 0x18, 0x1f, 0x92, 0x38, 0xc7, 0xfc, 0x86, 0xdc, - 0xf2, 0x03, 0x52, 0xb9, 0xc4, 0x55, 0xf9, 0x03, 0xa9, 0x24, 0x87, 0x1c, 0x72, 0x4f, 0xe5, 0x90, - 0x4a, 0xe6, 0x73, 0x77, 0x76, 0x31, 0x0b, 0xd2, 0xd9, 0xf8, 0x20, 0x71, 0xa7, 0xa7, 0xa7, 0xbf, - 0x66, 0xba, 0xa7, 0xbb, 0x07, 0x50, 0x74, 0x87, 0xad, 0xb5, 0xa1, 0x3b, 0xf0, 0x07, 0xa8, 0xec, - 0xf8, 0xad, 0xb6, 0xe7, 0xb8, 0x63, 0xc7, 0x1d, 0x1e, 0x9b, 0x8b, 0x27, 0x83, 0x93, 0x01, 0x9b, - 0xa8, 0xd1, 0x2f, 0x8e, 0x63, 0x2e, 0x51, 0x9c, 0x5a, 0x6f, 0xdc, 0x6a, 0xb1, 0xff, 0x86, 0xc7, - 0xb5, 0xb3, 0xb1, 0x98, 0xba, 0xc9, 0xa6, 0xec, 0x91, 0x7f, 0xca, 0xfe, 0x23, 0x53, 0xf4, 0x8f, - 0x98, 0x5c, 0x3e, 0x19, 0x0c, 0x4e, 0xba, 0x4e, 0xcd, 0x1e, 0x76, 0x6a, 0x76, 0xbf, 0x3f, 0xf0, - 0x6d, 0xbf, 0x33, 0xe8, 0x7b, 0x7c, 0x16, 0x7f, 0x6e, 0x40, 0xc5, 0x72, 0xbc, 0x21, 0x81, 0x38, - 0xef, 0x39, 0x76, 0xdb, 0x71, 0xd1, 0x2d, 0x80, 0x56, 0x77, 0xe4, 0xf9, 0x8e, 0x7b, 0xd4, 0x69, - 0x57, 0x8d, 0x55, 0xe3, 0x51, 0xce, 0x2a, 0x0a, 0xc8, 0x56, 0x1b, 0xdd, 0x84, 0x62, 0xcf, 0xe9, - 0x1d, 0xf3, 0xd9, 0x0c, 0x9b, 0x9d, 0xe5, 0x00, 0x32, 0x69, 0xc2, 0xac, 0xeb, 0x8c, 0x3b, 0x1e, - 0xe1, 0x50, 0xcd, 0x92, 0xb9, 0xac, 0x15, 0x8c, 0xe9, 0x42, 0xd7, 0x7e, 0xee, 0x1f, 0x11, 0x32, - 0xbd, 0x6a, 0x8e, 0x2f, 0xa4, 0x80, 0x03, 0x32, 0xc6, 0x5f, 0x65, 0xa1, 0x6c, 0xd9, 0xfd, 0x13, - 0xc7, 0x72, 0x3e, 0x19, 0x39, 0x9e, 0x8f, 0xe6, 0x21, 0x7b, 0xe6, 0x9c, 0x33, 0xf6, 0x65, 0x8b, - 0x7e, 0xf2, 0xf5, 0x04, 0xe3, 0xc8, 0xe9, 0x73, 0xc6, 0x65, 0xba, 0x9e, 0x00, 0x9a, 0xfd, 0x36, - 0x5a, 0x84, 0x99, 0x6e, 0xa7, 0xd7, 0xf1, 0x05, 0x57, 0x3e, 0x88, 0x88, 0x93, 0x8b, 0x89, 0xb3, - 0x09, 0xe0, 0x0d, 0x5c, 0xff, 0x68, 0xe0, 0x12, 0xa5, 0xab, 0x33, 0x64, 0xb6, 0xb2, 0x7e, 0x6f, - 0x4d, 0xdd, 0x88, 0x35, 0x55, 0xa0, 0xb5, 0x7d, 0x82, 0xbc, 0x47, 0x71, 0xad, 0xa2, 0x27, 0x3f, - 0xd1, 0xbb, 0x50, 0x62, 0x44, 0x7c, 0xdb, 0x3d, 0x71, 0xfc, 0x6a, 0x9e, 0x51, 0xb9, 0x7f, 0x01, - 0x95, 0x03, 0x86, 0x6c, 0x31, 0xf6, 0xfc, 0x1b, 0x61, 0x28, 0x13, 0xfc, 0x8e, 0xdd, 0xed, 0x7c, - 0x6a, 0x1f, 0x77, 0x9d, 0x6a, 0x81, 0x10, 0x9a, 0xb5, 0x22, 0x30, 0xaa, 0x3f, 0x31, 0x83, 0x77, - 0x34, 0xe8, 0x77, 0xcf, 0xab, 0xb3, 0x0c, 0x61, 0x96, 0x02, 0xf6, 0xc8, 0x98, 0x6d, 0xda, 0x60, - 0xd4, 0xf7, 0xf9, 0x6c, 0x91, 0xcd, 0x16, 0x19, 0x84, 0x4e, 0xe3, 0x35, 0x28, 0x06, 0xf2, 0xa3, - 0x59, 0xc8, 0xed, 0xee, 0xed, 0x36, 0xe7, 0xaf, 0x20, 0x80, 0x7c, 0x7d, 0x7f, 0xb3, 0xb9, 0xdb, - 0x98, 0x37, 0x50, 0x09, 0x0a, 0x8d, 0x26, 0x1f, 0x64, 0xf0, 0x06, 0x40, 0x28, 0x29, 0x2a, 0x40, - 0x76, 0xbb, 0xf9, 0x6d, 0x82, 0x4f, 0x70, 0x0e, 0x9b, 0xd6, 0xfe, 0xd6, 0xde, 0x2e, 0x59, 0x40, - 0x16, 0x6f, 0x5a, 0xcd, 0xfa, 0x41, 0x73, 0x3e, 0x43, 0x31, 0xde, 0xdf, 0x6b, 0xcc, 0x67, 0x51, - 0x11, 0x66, 0x0e, 0xeb, 0x3b, 0xcf, 0x9a, 0xf3, 0x39, 0xfc, 0x85, 0x01, 0x73, 0x42, 0x77, 0x7e, - 0xbe, 0xd0, 0xdb, 0x90, 0x3f, 0x65, 0x67, 0x8c, 0x6d, 0x6b, 0x69, 0x7d, 0x39, 0x66, 0xa8, 0xc8, - 0x39, 0xb4, 0x04, 0x2e, 0xb1, 0x4d, 0xf6, 0x6c, 0xec, 0x91, 0x1d, 0xcf, 0x92, 0x25, 0xf3, 0x6b, - 0xfc, 0xf0, 0xaf, 0x6d, 0x3b, 0xe7, 0x87, 0x76, 0x77, 0xe4, 0x58, 0x74, 0x12, 0x21, 0xc8, 0xf5, - 0x06, 0xae, 0xc3, 0x76, 0x7f, 0xd6, 0x62, 0xdf, 0xf4, 0x48, 0x30, 0x03, 0x88, 0x9d, 0xe7, 0x03, - 0xdc, 0x02, 0x78, 0x3a, 0xf2, 0x93, 0x4f, 0x19, 0x59, 0x35, 0xa6, 0x74, 0xc5, 0x09, 0xe3, 0x03, - 0x76, 0xbc, 0x1c, 0xdb, 0x73, 0x82, 0xe3, 0x45, 0x07, 0xe8, 0x15, 0x28, 0x0c, 0xc9, 0x79, 0x3a, - 0x3a, 0x1b, 0x33, 0x1e, 0xb3, 0x56, 0x9e, 0x0e, 0xb7, 0xc7, 0xb8, 0x0f, 0x25, 0xc6, 0x24, 0x95, - 0xde, 0xaf, 0x86, 0xd4, 0x33, 0x6c, 0xd9, 0xa4, 0xee, 0x92, 0xdf, 0xc7, 0x80, 0x1a, 0x4e, 0xd7, - 0xf1, 0x9d, 0x34, 0x2e, 0xa4, 0x68, 0x93, 0x8d, 0x68, 0xf3, 0x13, 0x03, 0x16, 0x22, 0xe4, 0x53, - 0xa9, 0x55, 0x85, 0x42, 0x9b, 0x11, 0xe3, 0x12, 0x64, 0x2d, 0x39, 0x44, 0x8f, 0x61, 0x56, 0x08, - 0xe0, 0x11, 0x09, 0xf4, 0xbb, 0x5d, 0xe0, 0x32, 0x79, 0xf8, 0xaf, 0x06, 0x14, 0x85, 0xa2, 0x7b, - 0x43, 0x54, 0x87, 0x39, 0x97, 0x0f, 0x8e, 0x98, 0x3e, 0x42, 0x22, 0x33, 0xd9, 0x13, 0xdf, 0xbb, - 0x62, 0x95, 0xc5, 0x12, 0x06, 0x46, 0xff, 0x07, 0x25, 0x49, 0x62, 0x38, 0xf2, 0x85, 0xc9, 0xab, - 0x51, 0x02, 0xe1, 0xc9, 0x21, 0xcb, 0x41, 0xa0, 0x13, 0x20, 0x3a, 0x80, 0x45, 0xb9, 0x98, 0x6b, - 0x23, 0xc4, 0xc8, 0x32, 0x2a, 0xab, 0x51, 0x2a, 0x93, 0x5b, 0x45, 0xa8, 0x21, 0xb1, 0x5e, 0x99, - 0xdc, 0x28, 0x42, 0x41, 0x40, 0xf1, 0xdf, 0x0c, 0x00, 0x69, 0x50, 0xa2, 0x6f, 0x03, 0x2a, 0xae, - 0x18, 0x45, 0x14, 0xbe, 0xa9, 0x55, 0x58, 0xec, 0xc3, 0x15, 0x6b, 0x4e, 0x2e, 0xe2, 0x2a, 0xbf, - 0x03, 0xe5, 0x80, 0x4a, 0xa8, 0xf3, 0x92, 0x46, 0xe7, 0x80, 0x42, 0x49, 0x2e, 0xa0, 0x5a, 0x7f, - 0x08, 0xd7, 0x83, 0xf5, 0x1a, 0xb5, 0xef, 0x4c, 0x51, 0x3b, 0x20, 0xb8, 0x20, 0x29, 0xa8, 0x8a, - 0x03, 0x8d, 0xdb, 0x1c, 0x8c, 0xbf, 0xcc, 0x42, 0x61, 0x73, 0xd0, 0x1b, 0xda, 0x2e, 0xdd, 0xa3, - 0x3c, 0x81, 0x8f, 0xba, 0x3e, 0x53, 0xb7, 0xb2, 0x7e, 0x37, 0xca, 0x41, 0xa0, 0xc9, 0xbf, 0x16, - 0x43, 0xb5, 0xc4, 0x12, 0xba, 0x58, 0x84, 0xe9, 0xcc, 0x25, 0x16, 0x8b, 0x20, 0x2d, 0x96, 0x48, - 0x5f, 0xca, 0x86, 0xbe, 0x64, 0x42, 0x81, 0x2c, 0x0c, 0xaf, 0x16, 0xa2, 0x8b, 0x04, 0x10, 0xd7, - 0xbd, 0xda, 0x72, 0x1d, 0x9b, 0xda, 0x43, 0x5e, 0x3f, 0x33, 0x02, 0xa7, 0xc2, 0x27, 0x2c, 0x79, - 0x0d, 0xdd, 0x85, 0x72, 0x6f, 0xd0, 0x0e, 0xf1, 0xf2, 0x02, 0xaf, 0x44, 0xa0, 0x01, 0xd2, 0x0d, - 0x19, 0x94, 0xe8, 0xbd, 0x50, 0x26, 0xb3, 0x7c, 0x88, 0xdf, 0x84, 0xb9, 0x88, 0xae, 0x34, 0xfc, - 0x36, 0x3f, 0x78, 0x56, 0xdf, 0xe1, 0xb1, 0xfa, 0x09, 0x0b, 0xcf, 0x16, 0x89, 0xd5, 0x24, 0xe4, - 0xef, 0x34, 0xf7, 0xf7, 0x49, 0x64, 0xff, 0xff, 0x60, 0x89, 0x08, 0xee, 0x4a, 0x4c, 0xbf, 0xa2, - 0xc4, 0x74, 0x43, 0xc6, 0xf4, 0x4c, 0x18, 0xd3, 0xb3, 0x1b, 0x15, 0x28, 0x73, 0x83, 0x1c, 0x8d, - 0xfa, 0x44, 0x30, 0xfc, 0x4b, 0x72, 0x2c, 0x0f, 0x5e, 0xf6, 0x65, 0xc4, 0xa9, 0x41, 0xa1, 0xc5, - 0x89, 0x93, 0x0d, 0xa2, 0x0e, 0x7c, 0x5d, 0x6b, 0x63, 0x4b, 0x62, 0xa1, 0x37, 0xa1, 0xe0, 0x8d, - 0x5a, 0x2d, 0xc7, 0x93, 0xf1, 0xfd, 0x95, 0x78, 0x0c, 0x11, 0x1e, 0x6e, 0x49, 0x3c, 0xba, 0xe4, - 0xb9, 0xdd, 0xe9, 0x8e, 0x58, 0xb4, 0x9f, 0xbe, 0x44, 0xe0, 0xe1, 0x9f, 0x1b, 0x50, 0x62, 0x52, - 0xa6, 0x0a, 0x5c, 0xcb, 0x50, 0x64, 0x32, 0x38, 0x6d, 0x11, 0xba, 0xc8, 0x0d, 0x1b, 0x00, 0xd0, - 0xff, 0x90, 0xd0, 0x2a, 0xd6, 0xc9, 0xe8, 0x55, 0xd5, 0x93, 0x25, 0x92, 0x85, 0xa8, 0x78, 0x1b, - 0xae, 0x31, 0xab, 0xb4, 0x68, 0x56, 0x26, 0xed, 0xa8, 0xe6, 0x2d, 0x46, 0x2c, 0x6f, 0x21, 0x73, - 0xc3, 0xd3, 0x73, 0xaf, 0xd3, 0xb2, 0xbb, 0x42, 0x8a, 0x60, 0x8c, 0xbf, 0x05, 0x48, 0x25, 0x96, - 0x46, 0x5d, 0x3c, 0x07, 0xa5, 0xf7, 0x6c, 0xef, 0x54, 0x88, 0x84, 0x3f, 0x82, 0x32, 0x1f, 0xa6, - 0xb2, 0x21, 0xb9, 0xa7, 0x4f, 0x09, 0x15, 0x26, 0xf8, 0x9c, 0xc5, 0xbe, 0xf1, 0x35, 0xb8, 0xba, - 0xdf, 0xb7, 0x87, 0xde, 0xe9, 0x40, 0x06, 0x57, 0x9a, 0x95, 0xce, 0x87, 0xb0, 0x54, 0x1c, 0x1f, - 0xc2, 0x55, 0xd7, 0xe9, 0xd9, 0x9d, 0x7e, 0xa7, 0x7f, 0x72, 0x74, 0x7c, 0xee, 0x3b, 0x9e, 0x48, - 0x5a, 0x2b, 0x01, 0x78, 0x83, 0x42, 0xa9, 0x68, 0xc7, 0xdd, 0xc1, 0xb1, 0x70, 0x71, 0xf6, 0x8d, - 0x7f, 0x65, 0x40, 0xf9, 0x43, 0xdb, 0x6f, 0x49, 0x2b, 0xa0, 0x2d, 0xa8, 0x04, 0x8e, 0xcd, 0x20, - 0x42, 0x96, 0x58, 0x84, 0x67, 0x6b, 0x36, 0x85, 0xa3, 0xcb, 0x08, 0x3f, 0xd7, 0x52, 0x01, 0x8c, - 0x94, 0xdd, 0x6f, 0x39, 0xdd, 0x80, 0x54, 0x26, 0x99, 0x14, 0x43, 0x54, 0x49, 0xa9, 0x80, 0x8d, - 0xab, 0xe1, 0xed, 0xc7, 0xdd, 0xf2, 0xcb, 0x0c, 0xa0, 0x49, 0x19, 0xbe, 0x6e, 0x42, 0x70, 0x1f, - 0x2a, 0x1e, 0xf1, 0x76, 0xff, 0x28, 0x96, 0xd2, 0xcf, 0x31, 0x68, 0x10, 0x9c, 0x88, 0x85, 0x49, - 0x2d, 0x71, 0x42, 0x8e, 0xb4, 0x77, 0x44, 0xca, 0x8b, 0xce, 0xf3, 0x73, 0x91, 0x0d, 0x55, 0x24, - 0x78, 0x97, 0x41, 0x51, 0x93, 0x78, 0x6e, 0xa7, 0x4b, 0xd2, 0x7f, 0x8f, 0x44, 0xc3, 0x2c, 0x89, - 0xc0, 0x8f, 0x2f, 0xb2, 0xda, 0xda, 0xbb, 0x0c, 0xff, 0xe0, 0x7c, 0x48, 0x62, 0x86, 0x58, 0xab, - 0xe6, 0x29, 0xf9, 0x48, 0x9e, 0x72, 0x1f, 0x20, 0xc4, 0xa7, 0x51, 0x6b, 0x77, 0xef, 0xe9, 0xb3, - 0x03, 0x12, 0xd5, 0xca, 0x30, 0xbb, 0xbb, 0xd7, 0x68, 0xee, 0x34, 0x69, 0x5c, 0xc3, 0x35, 0x69, - 0x1b, 0xd5, 0x86, 0x68, 0x09, 0x66, 0x5f, 0x50, 0xa8, 0xac, 0x79, 0x48, 0x5e, 0xc2, 0xc6, 0x5b, - 0x6d, 0xfc, 0x17, 0x92, 0xc8, 0x8a, 0x53, 0x90, 0xea, 0x28, 0xaa, 0x2c, 0x32, 0x11, 0x16, 0x34, - 0x29, 0xe2, 0xa7, 0xa3, 0x2d, 0x72, 0x2f, 0x39, 0xa4, 0xee, 0xce, 0x37, 0x9b, 0x4c, 0x71, 0xb3, - 0x06, 0x63, 0x72, 0xcd, 0xcc, 0xb7, 0xb8, 0xbb, 0xc7, 0xee, 0x19, 0xeb, 0xaa, 0x80, 0x07, 0x9b, - 0x74, 0x1f, 0xf2, 0xce, 0xd8, 0xe9, 0xfb, 0x5e, 0xb5, 0xc4, 0x62, 0xd3, 0x9c, 0xcc, 0xac, 0x9a, - 0x14, 0x6a, 0x89, 0x49, 0xfc, 0xdf, 0x70, 0x6d, 0x87, 0xa6, 0xb6, 0x4f, 0xc8, 0x21, 0x50, 0x93, - 0xe4, 0x83, 0x83, 0x1d, 0x61, 0x95, 0xac, 0x7f, 0xb0, 0x83, 0x2a, 0x90, 0xd9, 0x6a, 0x08, 0x1d, - 0x32, 0x9d, 0x06, 0xfe, 0xcc, 0x00, 0xa4, 0xae, 0x4b, 0x65, 0xa6, 0x18, 0x71, 0xc9, 0x3e, 0x1b, - 0xb2, 0x27, 0xd9, 0xb8, 0xe3, 0xba, 0x03, 0x97, 0x19, 0xa4, 0x68, 0xf1, 0x01, 0xbe, 0x27, 0x64, - 0x20, 0x3a, 0x0f, 0xce, 0x82, 0x33, 0xcf, 0xa9, 0x19, 0x81, 0xa8, 0xdb, 0xb0, 0x10, 0xc1, 0x4a, - 0x15, 0x23, 0x1f, 0xc2, 0x75, 0x46, 0x6c, 0xdb, 0x71, 0x86, 0xf5, 0x6e, 0x67, 0x9c, 0xc8, 0x75, - 0x08, 0x37, 0xe2, 0x88, 0xdf, 0xac, 0x8d, 0xf0, 0x29, 0xe4, 0xdf, 0x67, 0x55, 0xb9, 0x22, 0x4b, - 0x8e, 0xe1, 0x92, 0x40, 0xd7, 0xb7, 0x7b, 0xbc, 0xc0, 0x29, 0x5a, 0xec, 0x9b, 0x5d, 0x2a, 0x8e, - 0xe3, 0x3e, 0xb3, 0x76, 0xf8, 0xe5, 0x55, 0xb4, 0x82, 0x31, 0x5a, 0xa1, 0xfd, 0x80, 0x0e, 0x39, - 0x1e, 0x6c, 0x36, 0xc7, 0x66, 0x15, 0x08, 0xa9, 0x2d, 0xe7, 0x39, 0xa7, 0x7a, 0xbb, 0xad, 0x5c, - 0x60, 0x01, 0x3d, 0x23, 0x4a, 0x0f, 0xbf, 0x80, 0x6b, 0x0a, 0x7e, 0x2a, 0x33, 0xbc, 0x0e, 0x79, - 0xde, 0x7a, 0x10, 0xb1, 0x73, 0x31, 0xba, 0x8a, 0xb3, 0xb1, 0x04, 0x0e, 0x89, 0x0f, 0x0b, 0x02, - 0xe2, 0xf4, 0x06, 0xba, 0xbd, 0x62, 0xf6, 0xc1, 0x3b, 0xb0, 0x18, 0x45, 0x4b, 0x75, 0x44, 0xea, - 0x92, 0xe9, 0xb3, 0x61, 0x5b, 0x09, 0xc5, 0xf1, 0x4d, 0x51, 0x0d, 0x96, 0x89, 0x19, 0x2c, 0x10, - 0x48, 0x92, 0x48, 0x25, 0xd0, 0x82, 0x34, 0xff, 0x4e, 0xc7, 0x0b, 0x2e, 0xdc, 0x4f, 0x01, 0xa9, - 0xc0, 0x54, 0x9b, 0xb2, 0x06, 0x05, 0x6e, 0x70, 0x99, 0xd3, 0xe9, 0x77, 0x45, 0x22, 0x51, 0x81, - 0x1a, 0xce, 0x73, 0xd7, 0x3e, 0xe9, 0x39, 0x41, 0xcc, 0xa1, 0x99, 0x8c, 0x0a, 0x4c, 0xa5, 0xf1, - 0xef, 0xc8, 0x2d, 0x5e, 0xef, 0xda, 0x6e, 0x4f, 0x1a, 0xff, 0x1d, 0xc8, 0xf3, 0x14, 0x49, 0x94, - 0x11, 0x0f, 0xa2, 0x64, 0x54, 0x5c, 0x3e, 0xa8, 0xf3, 0x84, 0x4a, 0xac, 0xa2, 0x9b, 0x25, 0x3a, - 0x5e, 0x8d, 0x58, 0x07, 0xac, 0x81, 0xde, 0x80, 0x19, 0x9b, 0x2e, 0x61, 0xbe, 0x58, 0x89, 0x27, - 0xa7, 0x8c, 0x1a, 0xbb, 0xce, 0x38, 0x16, 0x7e, 0x1b, 0x4a, 0x0a, 0x07, 0x9a, 0x73, 0x3f, 0x69, - 0x8a, 0x2b, 0xab, 0xbe, 0x79, 0xb0, 0x75, 0xc8, 0x53, 0xf1, 0x0a, 0x40, 0xa3, 0x19, 0x8c, 0x33, - 0x24, 0x19, 0xe3, 0xab, 0x84, 0x87, 0xab, 0xf2, 0x18, 0x49, 0xf2, 0x64, 0x2e, 0x25, 0xcf, 0x4b, - 0x98, 0x13, 0xea, 0xa7, 0x3a, 0x03, 0x6f, 0x12, 0x0b, 0x53, 0x32, 0xf2, 0x08, 0x2c, 0x69, 0xd8, - 0x4a, 0xef, 0xe4, 0x88, 0x98, 0x24, 0x31, 0xfb, 0xbe, 0xed, 0x8f, 0x3c, 0x79, 0x04, 0x7e, 0x6b, - 0x40, 0x45, 0x42, 0xd2, 0x76, 0x1c, 0x64, 0xa5, 0xc6, 0x63, 0x5e, 0x50, 0xa7, 0xdd, 0x80, 0x7c, - 0xfb, 0x78, 0xbf, 0xf3, 0xa9, 0xec, 0xeb, 0x88, 0x11, 0x85, 0x77, 0x39, 0x1f, 0xde, 0xa7, 0x14, - 0x23, 0x5a, 0x02, 0xd0, 0x8e, 0xe5, 0x56, 0xbf, 0xed, 0xbc, 0x64, 0x37, 0x6d, 0xce, 0x0a, 0x01, - 0x2c, 0x6b, 0x17, 0xfd, 0x4c, 0x96, 0x99, 0xa8, 0xfd, 0x4d, 0x72, 0xc8, 0xeb, 0x23, 0xff, 0xb4, - 0xd9, 0xa7, 0xad, 0x3c, 0xa9, 0xe1, 0x22, 0x20, 0x0a, 0x6c, 0x74, 0x3c, 0x15, 0xda, 0x84, 0x05, - 0x0a, 0x25, 0xe7, 0x9e, 0xe4, 0xf4, 0x61, 0xc4, 0x90, 0x61, 0xdb, 0x88, 0x85, 0x6d, 0xdb, 0xf3, - 0x5e, 0x0c, 0xdc, 0xb6, 0x50, 0x2d, 0x18, 0xe3, 0x06, 0x27, 0xfe, 0xcc, 0x8b, 0x04, 0xe6, 0xaf, - 0x4b, 0xe5, 0x51, 0x48, 0xe5, 0x89, 0xe3, 0x4f, 0xa1, 0x82, 0x1f, 0xc3, 0x75, 0x89, 0x29, 0x4a, - 0xf9, 0x29, 0xc8, 0x7b, 0x70, 0x4b, 0x22, 0x6f, 0x9e, 0xd2, 0x7c, 0xf3, 0xa9, 0x60, 0xf8, 0xef, - 0xca, 0xb9, 0x01, 0xd5, 0x40, 0x4e, 0x96, 0x83, 0x0c, 0xba, 0xaa, 0x00, 0x23, 0x4f, 0x9c, 0x19, - 0x42, 0x8b, 0x7e, 0x53, 0x98, 0x4b, 0x50, 0xe4, 0x25, 0x48, 0xbf, 0xf1, 0x26, 0x2c, 0x49, 0x1a, - 0x22, 0x3b, 0x88, 0x12, 0x99, 0x10, 0x48, 0x47, 0x44, 0x18, 0x8c, 0x2e, 0x9d, 0x6e, 0x76, 0x15, - 0x33, 0x6a, 0x5a, 0x46, 0xd3, 0x50, 0x68, 0x5e, 0xe7, 0x27, 0x82, 0x0a, 0xa6, 0x06, 0x6d, 0x01, - 0xa6, 0x04, 0x54, 0xb0, 0xd8, 0x08, 0x0a, 0x9e, 0xd8, 0x88, 0x09, 0xd2, 0x1f, 0xc3, 0x4a, 0x20, - 0x04, 0xb5, 0xdb, 0x53, 0x72, 0x58, 0x3b, 0x9e, 0xa7, 0xd4, 0xa2, 0x3a, 0xc5, 0x1f, 0x40, 0x6e, - 0xe8, 0x88, 0x98, 0x52, 0x5a, 0x47, 0x6b, 0xfc, 0xd5, 0x61, 0x4d, 0x59, 0xcc, 0xe6, 0x71, 0x1b, - 0x6e, 0x4b, 0xea, 0xdc, 0xa2, 0x5a, 0xf2, 0x71, 0xa1, 0x64, 0x9d, 0xc2, 0xcd, 0x3a, 0x59, 0xa7, - 0x64, 0xf9, 0xde, 0xcb, 0x3a, 0x85, 0xde, 0x15, 0xaa, 0x6f, 0xa5, 0xba, 0x2b, 0xb6, 0xb9, 0x4d, - 0x03, 0x97, 0x4c, 0x45, 0xec, 0x18, 0x16, 0xa3, 0x9e, 0x9c, 0x2a, 0x8c, 0x91, 0xac, 0xd7, 0x27, - 0x26, 0x94, 0x41, 0x8c, 0x0f, 0xa4, 0xc0, 0x81, 0x9b, 0xa7, 0x12, 0xd8, 0x0e, 0x89, 0xb1, 0x23, - 0x99, 0x56, 0x5e, 0xba, 0x9b, 0x32, 0x9f, 0xe1, 0x03, 0xbc, 0x0b, 0x37, 0xe2, 0x61, 0x22, 0x95, - 0xc8, 0x87, 0xfc, 0x00, 0xeb, 0x22, 0x49, 0x2a, 0xba, 0x1f, 0x84, 0xc1, 0x40, 0x09, 0x28, 0xa9, - 0x48, 0x5a, 0x60, 0xea, 0xe2, 0xcb, 0x7f, 0xe2, 0xbc, 0x06, 0xe1, 0x26, 0x15, 0x31, 0x2f, 0x24, - 0x96, 0x7e, 0xfb, 0xc3, 0x18, 0x91, 0x9d, 0x1a, 0x23, 0x84, 0x93, 0x84, 0x51, 0xec, 0x1b, 0x38, - 0x74, 0x82, 0x47, 0x18, 0x40, 0xd3, 0xf2, 0xa0, 0x77, 0x48, 0xc0, 0x83, 0x0d, 0xe4, 0xc1, 0x56, - 0xc3, 0x6e, 0xaa, 0xcd, 0xf8, 0x30, 0x8c, 0x9d, 0x13, 0x91, 0x39, 0x15, 0xe1, 0x8f, 0x60, 0x35, - 0x39, 0x28, 0xa7, 0xa1, 0xfc, 0x1a, 0x86, 0x62, 0x90, 0x50, 0x2a, 0xaf, 0x8c, 0x25, 0x28, 0xec, - 0xee, 0xed, 0x3f, 0xad, 0x6f, 0x92, 0x54, 0x76, 0xfd, 0x1f, 0x59, 0xc8, 0x6c, 0x1f, 0xa2, 0xef, - 0xc0, 0x0c, 0x7f, 0x83, 0x98, 0xf2, 0x44, 0x63, 0x4e, 0x7b, 0xcd, 0xc0, 0xcb, 0x9f, 0xfd, 0xfe, - 0xcf, 0x5f, 0x64, 0x6e, 0xe0, 0x6b, 0xb5, 0xf1, 0x5b, 0x76, 0x77, 0x78, 0x6a, 0xd7, 0xce, 0xc6, - 0x35, 0x76, 0x27, 0xfc, 0xaf, 0xf1, 0x1a, 0x3a, 0x84, 0x2c, 0x7d, 0xa1, 0x48, 0x7c, 0xbf, 0x31, - 0x93, 0x5f, 0x39, 0xb0, 0xc9, 0x28, 0x2f, 0xe2, 0xab, 0x2a, 0xe5, 0xe1, 0xc8, 0xa7, 0x74, 0xc7, - 0x50, 0x52, 0x1e, 0x2a, 0xd0, 0x85, 0x2f, 0x3b, 0xe6, 0xc5, 0x8f, 0x20, 0x18, 0x33, 0x7e, 0xcb, - 0xf8, 0x15, 0x95, 0x1f, 0x7f, 0x4f, 0x51, 0xf5, 0x39, 0x78, 0xd9, 0x8f, 0xeb, 0x13, 0xb6, 0xde, - 0xe3, 0xfa, 0x28, 0xed, 0x6e, 0xbd, 0x3e, 0xfe, 0xcb, 0x3e, 0xa5, 0x3b, 0x10, 0x8f, 0x2b, 0x2d, - 0x1f, 0xdd, 0xd6, 0xf4, 0xea, 0xd5, 0xae, 0xb4, 0xb9, 0x9a, 0x8c, 0x20, 0x38, 0xdd, 0x61, 0x9c, - 0x6e, 0xe2, 0x1b, 0x2a, 0xa7, 0x56, 0x80, 0x47, 0x18, 0xae, 0x9f, 0xc2, 0x0c, 0xeb, 0xa5, 0xa1, - 0x23, 0xf9, 0x61, 0x6a, 0xba, 0x80, 0x09, 0x27, 0x20, 0xd2, 0x85, 0xc3, 0x4b, 0x8c, 0xdb, 0x02, - 0xae, 0x04, 0xdc, 0x58, 0x3b, 0x8d, 0x70, 0x79, 0x64, 0xfc, 0x97, 0xb1, 0xfe, 0xf7, 0x0c, 0xcc, - 0xb0, 0xa6, 0x0b, 0x1a, 0x02, 0x84, 0xdd, 0xa9, 0xb8, 0x9e, 0x13, 0xfd, 0xae, 0xb8, 0x9e, 0x93, - 0x8d, 0x2d, 0x7c, 0x9b, 0x71, 0x5e, 0xc2, 0x8b, 0x01, 0x67, 0xf6, 0x20, 0x5c, 0x3b, 0xa1, 0x58, - 0xd4, 0xac, 0x2f, 0xa0, 0xa4, 0x74, 0x99, 0x90, 0x8e, 0x62, 0xa4, 0x4d, 0x15, 0x3f, 0x26, 0x9a, - 0x16, 0x15, 0xbe, 0xcb, 0x98, 0xde, 0xc2, 0x55, 0xd5, 0xb8, 0x9c, 0xaf, 0xcb, 0x30, 0x29, 0xe3, - 0x1f, 0x92, 0xa2, 0x29, 0xda, 0x69, 0x42, 0x77, 0x35, 0xa4, 0xe3, 0x0d, 0x2b, 0xf3, 0xde, 0x74, - 0xa4, 0x44, 0x11, 0x38, 0xff, 0x33, 0x82, 0x69, 0x53, 0x4c, 0x69, 0xfb, 0x7f, 0xd2, 0x47, 0x3b, - 0xfe, 0x93, 0x11, 0xe4, 0x43, 0x31, 0xe8, 0xf7, 0xa0, 0x15, 0x5d, 0x2f, 0x20, 0x4c, 0x94, 0xcd, - 0xdb, 0x89, 0xf3, 0x42, 0x84, 0x07, 0x4c, 0x84, 0x55, 0x7c, 0x33, 0x10, 0x41, 0xfc, 0x34, 0xa5, - 0xc6, 0x4b, 0xde, 0x9a, 0xdd, 0x6e, 0x53, 0x43, 0xfc, 0x80, 0x14, 0xfd, 0x6a, 0x1b, 0x07, 0xdd, - 0xd1, 0x76, 0x21, 0xd4, 0x4e, 0x90, 0x89, 0xa7, 0xa1, 0x08, 0xfe, 0xaf, 0x32, 0xfe, 0x77, 0xf1, - 0x4a, 0x12, 0x7f, 0x97, 0xe1, 0x47, 0x45, 0xe0, 0x8d, 0x1b, 0xbd, 0x08, 0x91, 0xbe, 0x90, 0x5e, - 0x84, 0x68, 0xdf, 0xe7, 0x62, 0x11, 0x46, 0x0c, 0x9f, 0x8a, 0xf0, 0x12, 0x20, 0xec, 0xeb, 0x20, - 0xad, 0x71, 0x95, 0xd2, 0x21, 0x7e, 0xf2, 0x27, 0x5b, 0x42, 0xf8, 0x21, 0xe3, 0x7d, 0x07, 0x2f, - 0x27, 0xf1, 0xee, 0x12, 0x6c, 0xea, 0xe7, 0xbf, 0xce, 0x41, 0xe9, 0x7d, 0xbb, 0xd3, 0xf7, 0x9d, - 0x3e, 0x6d, 0x57, 0xa3, 0x13, 0x98, 0x61, 0x77, 0x43, 0xdc, 0xdd, 0xd5, 0x66, 0x4b, 0xdc, 0xdd, - 0x23, 0x9d, 0x08, 0x7c, 0x9f, 0xb1, 0xbe, 0x8d, 0xcd, 0x80, 0x75, 0x2f, 0xa4, 0x5f, 0x63, 0x5d, - 0x04, 0xaa, 0xf2, 0x19, 0xe4, 0x79, 0xd7, 0x00, 0xc5, 0xa8, 0x45, 0xba, 0x0b, 0xe6, 0xb2, 0x7e, - 0x32, 0xf1, 0x94, 0xa9, 0xbc, 0x3c, 0x86, 0x4c, 0x99, 0x7d, 0x17, 0x20, 0x6c, 0x53, 0xc5, 0xed, - 0x3b, 0xd1, 0xd5, 0x32, 0x57, 0x93, 0x11, 0x04, 0xe3, 0xd7, 0x18, 0xe3, 0x7b, 0xf8, 0xb6, 0x96, - 0x71, 0x3b, 0x58, 0x40, 0x99, 0xb7, 0x20, 0x47, 0x9f, 0xe4, 0x50, 0x2c, 0xf4, 0x2b, 0xaf, 0x76, - 0xa6, 0xa9, 0x9b, 0x12, 0xac, 0xee, 0x31, 0x56, 0x2b, 0x78, 0x49, 0xcb, 0x8a, 0x3e, 0xcd, 0x51, - 0x26, 0x23, 0x98, 0x95, 0x2f, 0x71, 0xe8, 0x56, 0xcc, 0x66, 0xd1, 0x57, 0x3b, 0x73, 0x25, 0x69, - 0x5a, 0x30, 0x7c, 0xc4, 0x18, 0x62, 0x7c, 0x4b, 0x6f, 0x54, 0x81, 0x4e, 0x98, 0x92, 0x00, 0xf2, - 0xe3, 0x79, 0xc8, 0xd1, 0x2c, 0x85, 0xc6, 0xee, 0xb0, 0xb8, 0x8b, 0x5b, 0x78, 0xa2, 0xa5, 0x12, - 0xb7, 0xf0, 0x64, 0x5d, 0xa8, 0x89, 0xdd, 0xec, 0x87, 0x73, 0x0e, 0xc3, 0xa2, 0x1a, 0xfb, 0x50, - 0x52, 0x4a, 0x40, 0xa4, 0xa1, 0x18, 0x6d, 0xd8, 0xc4, 0x63, 0xb7, 0xa6, 0x7e, 0xc4, 0xab, 0x8c, - 0xa9, 0x89, 0xaf, 0x47, 0x99, 0xb6, 0x39, 0x1a, 0xe5, 0xfa, 0x3d, 0x28, 0xab, 0xb5, 0x22, 0xd2, - 0x10, 0x8d, 0x75, 0x84, 0xe2, 0xb1, 0x42, 0x57, 0x6a, 0x6a, 0x9c, 0x26, 0xf8, 0x99, 0xa0, 0xc4, - 0xa5, 0xdc, 0x3f, 0x81, 0x82, 0xa8, 0x20, 0x75, 0xfa, 0x46, 0x7b, 0x48, 0x3a, 0x7d, 0x63, 0xe5, - 0xa7, 0x26, 0x11, 0x60, 0x6c, 0x69, 0xa6, 0x2c, 0x03, 0xb4, 0x60, 0x49, 0x0a, 0x8d, 0x24, 0x96, - 0x61, 0x57, 0x24, 0x89, 0xa5, 0x52, 0xa5, 0x4c, 0x65, 0x79, 0xe2, 0xf8, 0xe2, 0x2c, 0xcb, 0x12, - 0x00, 0x25, 0x50, 0x54, 0xa3, 0x21, 0x9e, 0x86, 0x92, 0x98, 0xbb, 0x85, 0x5c, 0x45, 0x28, 0x44, - 0xdf, 0x07, 0x08, 0xcb, 0xdd, 0xf8, 0x75, 0xac, 0xed, 0x99, 0xc5, 0xaf, 0x63, 0x7d, 0xc5, 0xac, - 0xf1, 0xe0, 0x90, 0x39, 0xcf, 0x1f, 0x29, 0xfb, 0x9f, 0x1a, 0x80, 0x26, 0xcb, 0x63, 0xf4, 0x58, - 0xcf, 0x42, 0xdb, 0x8e, 0x33, 0x5f, 0xbf, 0x1c, 0x72, 0x62, 0xf4, 0x0c, 0xe5, 0x6a, 0xb1, 0x25, - 0xc3, 0x17, 0x54, 0xb2, 0xcf, 0x0d, 0x98, 0x8b, 0x14, 0xd8, 0xe8, 0x41, 0xc2, 0x3e, 0xc7, 0x5a, - 0x7a, 0xe6, 0xc3, 0x0b, 0xf1, 0x12, 0x33, 0x16, 0xe5, 0x54, 0xc8, 0x6c, 0xed, 0x47, 0x24, 0x69, - 0x8a, 0x56, 0xe5, 0x28, 0x81, 0xc1, 0x44, 0x5f, 0xd0, 0x7c, 0x74, 0x31, 0xe2, 0x25, 0x76, 0x2b, - 0x4c, 0xe0, 0x88, 0x5b, 0x88, 0x62, 0x5e, 0xe7, 0x16, 0xd1, 0xb6, 0xa2, 0xce, 0x2d, 0x62, 0x9d, - 0x80, 0x24, 0xb7, 0xa0, 0x75, 0xb1, 0xe2, 0x89, 0xa2, 0xe4, 0x4f, 0x62, 0x39, 0xdd, 0x13, 0x63, - 0xfd, 0x82, 0xa9, 0x2c, 0x43, 0x4f, 0x94, 0x05, 0x3f, 0x4a, 0xa0, 0x78, 0x81, 0x27, 0xc6, 0xfb, - 0x05, 0x49, 0x9e, 0xc8, 0xb8, 0x2a, 0x9e, 0x18, 0xd6, 0xe7, 0x3a, 0x4f, 0x9c, 0x68, 0x9a, 0xea, - 0x3c, 0x71, 0xb2, 0xc4, 0x4f, 0xda, 0x5b, 0xc6, 0x3c, 0xe2, 0x89, 0x0b, 0x9a, 0x7a, 0x1e, 0xbd, - 0x9e, 0x60, 0x53, 0x6d, 0x43, 0xd6, 0x7c, 0xe3, 0x92, 0xd8, 0xd3, 0x3d, 0x80, 0xef, 0x86, 0xf4, - 0x80, 0x5f, 0x18, 0xb0, 0xa8, 0x6b, 0x08, 0xa0, 0x04, 0x66, 0x09, 0xdd, 0x5c, 0x73, 0xed, 0xb2, - 0xe8, 0x97, 0xb0, 0x5b, 0xe0, 0x13, 0x1b, 0xf3, 0xbf, 0xf9, 0xe3, 0x8a, 0xf1, 0x15, 0xf9, 0xf7, - 0x07, 0xf2, 0xef, 0x67, 0x7f, 0x5a, 0xb9, 0x72, 0x9c, 0x67, 0xbf, 0x5e, 0x7f, 0xeb, 0x5f, 0x01, - 0x00, 0x00, 0xff, 0xff, 0x66, 0x40, 0x0b, 0xdf, 0x44, 0x2f, 0x00, 0x00, + // 3391 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x5b, 0xcf, 0x6f, 0x1b, 0xc7, + 0xf5, 0xd7, 0x92, 0x14, 0x29, 0x3e, 0x52, 0x14, 0x3d, 0x92, 0x6d, 0x6a, 0x6d, 0xcb, 0xf2, 0xf8, + 0x97, 0x6c, 0x27, 0x52, 0xa2, 0xe4, 0xfb, 0x3d, 0xb8, 0x41, 0x00, 0x59, 0x62, 0x2c, 0x55, 0xb2, + 0xe4, 0xac, 0x64, 0x27, 0x05, 0x82, 0x0a, 0x2b, 0x72, 0x2c, 0x2e, 0x44, 0xee, 0x32, 0xbb, 0x4b, + 0x5a, 0x4a, 0x5b, 0xa0, 0x48, 0x13, 0x14, 0xed, 0xb1, 0x39, 0xb4, 0x4d, 0x8f, 0x45, 0xff, 0x84, + 0xde, 0xfa, 0x07, 0x14, 0xbd, 0xb4, 0x40, 0xff, 0x81, 0x22, 0xed, 0xa1, 0x87, 0xde, 0x7b, 0x2a, + 0x5a, 0xcc, 0xaf, 0xdd, 0xd9, 0xe5, 0x2e, 0xe5, 0x74, 0x9b, 0x8b, 0xb5, 0x33, 0xf3, 0xe6, 0x7d, + 0xde, 0x7b, 0x33, 0xef, 0xcd, 0x9b, 0x37, 0x34, 0x94, 0xdd, 0x7e, 0x6b, 0xb9, 0xef, 0x3a, 0xbe, + 0x83, 0xaa, 0xc4, 0x6f, 0xb5, 0x3d, 0xe2, 0x0e, 0x89, 0xdb, 0x3f, 0xd2, 0xe7, 0x8e, 0x9d, 0x63, + 0x87, 0x0d, 0xac, 0xd0, 0x2f, 0x4e, 0xa3, 0xcf, 0x53, 0x9a, 0x95, 0xde, 0xb0, 0xd5, 0x62, 0xff, + 0xf4, 0x8f, 0x56, 0x4e, 0x86, 0x62, 0xe8, 0x0a, 0x1b, 0x32, 0x07, 0x7e, 0x87, 0xfd, 0xd3, 0x3f, + 0x62, 0x7f, 0xc4, 0xe0, 0xd5, 0x63, 0xc7, 0x39, 0xee, 0x92, 0x15, 0xb3, 0x6f, 0xad, 0x98, 0xb6, + 0xed, 0xf8, 0xa6, 0x6f, 0x39, 0xb6, 0xc7, 0x47, 0xf1, 0xe7, 0x1a, 0xd4, 0x0c, 0xe2, 0xf5, 0x1d, + 0xdb, 0x23, 0x9b, 0xc4, 0x6c, 0x13, 0x17, 0x5d, 0x03, 0x68, 0x75, 0x07, 0x9e, 0x4f, 0xdc, 0x43, + 0xab, 0xdd, 0xd0, 0x16, 0xb5, 0xa5, 0x82, 0x51, 0x16, 0x3d, 0x5b, 0x6d, 0x74, 0x05, 0xca, 0x3d, + 0xd2, 0x3b, 0xe2, 0xa3, 0x39, 0x36, 0x3a, 0xc5, 0x3b, 0xb6, 0xda, 0x48, 0x87, 0x29, 0x97, 0x0c, + 0x2d, 0xcf, 0x72, 0xec, 0x46, 0x7e, 0x51, 0x5b, 0xca, 0x1b, 0x41, 0x9b, 0x4e, 0x74, 0xcd, 0x17, + 0xfe, 0xa1, 0x4f, 0xdc, 0x5e, 0xa3, 0xc0, 0x27, 0xd2, 0x8e, 0x03, 0xe2, 0xf6, 0xf0, 0x67, 0x93, + 0x50, 0x35, 0x4c, 0xfb, 0x98, 0x18, 0xe4, 0xe3, 0x01, 0xf1, 0x7c, 0x54, 0x87, 0xfc, 0x09, 0x39, + 0x63, 0xf0, 0x55, 0x83, 0x7e, 0xf2, 0xf9, 0xf6, 0x31, 0x39, 0x24, 0x36, 0x07, 0xae, 0xd2, 0xf9, + 0xf6, 0x31, 0x69, 0xda, 0x6d, 0x34, 0x07, 0x93, 0x5d, 0xab, 0x67, 0xf9, 0x02, 0x95, 0x37, 0x22, + 0xe2, 0x14, 0x62, 0xe2, 0xac, 0x03, 0x78, 0x8e, 0xeb, 0x1f, 0x3a, 0x6e, 0x9b, 0xb8, 0x8d, 0xc9, + 0x45, 0x6d, 0xa9, 0xb6, 0x7a, 0x6b, 0x59, 0x5d, 0x88, 0x65, 0x55, 0xa0, 0xe5, 0x7d, 0xc7, 0xf5, + 0xf7, 0x28, 0xad, 0x51, 0xf6, 0xe4, 0x27, 0x7a, 0x0f, 0x2a, 0x8c, 0x89, 0x6f, 0xba, 0xc7, 0xc4, + 0x6f, 0x14, 0x19, 0x97, 0xdb, 0xe7, 0x70, 0x39, 0x60, 0xc4, 0x06, 0x83, 0xe7, 0xdf, 0x08, 0x43, + 0xd5, 0x23, 0xae, 0x65, 0x76, 0xad, 0x4f, 0xcc, 0xa3, 0x2e, 0x69, 0x94, 0x16, 0xb5, 0xa5, 0x29, + 0x23, 0xd2, 0x47, 0xf5, 0x3f, 0x21, 0x67, 0xde, 0xa1, 0x63, 0x77, 0xcf, 0x1a, 0x53, 0x8c, 0x60, + 0x8a, 0x76, 0xec, 0xd9, 0xdd, 0x33, 0xb6, 0x68, 0xce, 0xc0, 0xf6, 0xf9, 0x68, 0x99, 0x8d, 0x96, + 0x59, 0x0f, 0x1b, 0x5e, 0x82, 0x7a, 0xcf, 0xb2, 0x0f, 0x7b, 0x4e, 0xfb, 0x30, 0x30, 0x08, 0x30, + 0x83, 0xd4, 0x7a, 0x96, 0xfd, 0xc4, 0x69, 0x1b, 0xd2, 0x2c, 0x94, 0xd2, 0x3c, 0x8d, 0x52, 0x56, + 0x04, 0xa5, 0x79, 0xaa, 0x52, 0x2e, 0xc3, 0x2c, 0xe5, 0xd9, 0x72, 0x89, 0xe9, 0x93, 0x90, 0xb8, + 0xca, 0x88, 0x2f, 0xf4, 0x2c, 0x7b, 0x9d, 0x8d, 0x44, 0xe8, 0xcd, 0xd3, 0x11, 0xfa, 0x69, 0x41, + 0x6f, 0x9e, 0x46, 0xe9, 0xf1, 0x32, 0x94, 0x03, 0x9b, 0xa3, 0x29, 0x28, 0xec, 0xee, 0xed, 0x36, + 0xeb, 0x13, 0x08, 0xa0, 0xb8, 0xb6, 0xbf, 0xde, 0xdc, 0xdd, 0xa8, 0x6b, 0xa8, 0x02, 0xa5, 0x8d, + 0x26, 0x6f, 0xe4, 0xf0, 0x23, 0x80, 0xd0, 0xba, 0xa8, 0x04, 0xf9, 0xed, 0xe6, 0x77, 0xea, 0x13, + 0x94, 0xe6, 0x79, 0xd3, 0xd8, 0xdf, 0xda, 0xdb, 0xad, 0x6b, 0x74, 0xf2, 0xba, 0xd1, 0x5c, 0x3b, + 0x68, 0xd6, 0x73, 0x94, 0xe2, 0xc9, 0xde, 0x46, 0x3d, 0x8f, 0xca, 0x30, 0xf9, 0x7c, 0x6d, 0xe7, + 0x59, 0xb3, 0x5e, 0xc0, 0x5f, 0x68, 0x30, 0x2d, 0xd6, 0x8b, 0xfb, 0x04, 0x7a, 0x1b, 0x8a, 0x1d, + 0xe6, 0x17, 0x6c, 0x2b, 0x56, 0x56, 0xaf, 0xc6, 0x16, 0x37, 0xe2, 0x3b, 0x86, 0xa0, 0x45, 0x18, + 0xf2, 0x27, 0x43, 0xaf, 0x91, 0x5b, 0xcc, 0x2f, 0x55, 0x56, 0xeb, 0xcb, 0xdc, 0x61, 0x97, 0xb7, + 0xc9, 0xd9, 0x73, 0xb3, 0x3b, 0x20, 0x06, 0x1d, 0x44, 0x08, 0x0a, 0x3d, 0xc7, 0x25, 0x6c, 0xc7, + 0x4e, 0x19, 0xec, 0x9b, 0x6e, 0x63, 0xb6, 0x68, 0x62, 0xb7, 0xf2, 0x06, 0x6e, 0x01, 0x3c, 0x1d, + 0xf8, 0xe9, 0x9e, 0x31, 0x07, 0x93, 0x43, 0xca, 0x57, 0x78, 0x05, 0x6f, 0x30, 0x97, 0x20, 0xa6, + 0x47, 0x02, 0x97, 0xa0, 0x0d, 0x74, 0x19, 0x4a, 0x7d, 0x97, 0x0c, 0x0f, 0x4f, 0x86, 0x0c, 0x63, + 0xca, 0x28, 0xd2, 0xe6, 0xf6, 0x10, 0xdb, 0x50, 0x61, 0x20, 0x99, 0xf4, 0xbe, 0x17, 0x72, 0xcf, + 0xb1, 0x69, 0xa3, 0xba, 0x4b, 0xbc, 0x8f, 0x00, 0x6d, 0x90, 0x2e, 0xf1, 0x49, 0x16, 0xb7, 0x57, + 0xb4, 0xc9, 0x47, 0xb4, 0xf9, 0x99, 0x06, 0xb3, 0x11, 0xf6, 0x99, 0xd4, 0x6a, 0x40, 0xa9, 0xcd, + 0x98, 0x71, 0x09, 0xf2, 0x86, 0x6c, 0xa2, 0x07, 0x30, 0x25, 0x04, 0xf0, 0x1a, 0xf9, 0x94, 0xd5, + 0x2e, 0x71, 0x99, 0x3c, 0xfc, 0x0f, 0x0d, 0xca, 0x42, 0xd1, 0xbd, 0x3e, 0x5a, 0x83, 0x69, 0x97, + 0x37, 0x0e, 0x99, 0x3e, 0x42, 0x22, 0x3d, 0x3d, 0x7a, 0x6c, 0x4e, 0x18, 0x55, 0x31, 0x85, 0x75, + 0xa3, 0x6f, 0x41, 0x45, 0xb2, 0xe8, 0x0f, 0x7c, 0x61, 0xf2, 0x46, 0x94, 0x41, 0xb8, 0x73, 0x36, + 0x27, 0x0c, 0x10, 0xe4, 0x4f, 0x07, 0x3e, 0x3a, 0x80, 0x39, 0x39, 0x99, 0x6b, 0x23, 0xc4, 0xc8, + 0x33, 0x2e, 0x8b, 0x51, 0x2e, 0xa3, 0x4b, 0xb5, 0x39, 0x61, 0x20, 0x31, 0x5f, 0x19, 0x7c, 0x54, + 0x86, 0x92, 0xe8, 0xc5, 0xff, 0xd4, 0x00, 0xa4, 0x41, 0xf7, 0xfa, 0x68, 0x03, 0x6a, 0xae, 0x68, + 0x45, 0x14, 0xbe, 0x92, 0xa8, 0xb0, 0x58, 0x87, 0x09, 0x63, 0x5a, 0x4e, 0xe2, 0x2a, 0xbf, 0x0b, + 0xd5, 0x80, 0x4b, 0xa8, 0xf3, 0x7c, 0x82, 0xce, 0x01, 0x87, 0x8a, 0x9c, 0x40, 0xb5, 0xfe, 0x00, + 0x2e, 0x06, 0xf3, 0x13, 0xd4, 0xbe, 0x31, 0x46, 0xed, 0x80, 0xe1, 0xac, 0xe4, 0xa0, 0x2a, 0x0e, + 0xf4, 0xac, 0xe1, 0xdd, 0xf8, 0xcb, 0x3c, 0x94, 0xd6, 0x9d, 0x5e, 0xdf, 0x74, 0xe9, 0x1a, 0x15, + 0x5d, 0xe2, 0x0d, 0xba, 0x3e, 0x53, 0xb7, 0xb6, 0x7a, 0x33, 0x8a, 0x20, 0xc8, 0xe4, 0x5f, 0x83, + 0x91, 0x1a, 0x62, 0x0a, 0x9d, 0x2c, 0x8e, 0x96, 0xdc, 0x2b, 0x4c, 0x16, 0x07, 0x8b, 0x98, 0x22, + 0x7d, 0x29, 0x1f, 0xfa, 0x92, 0x0e, 0xa5, 0x21, 0x71, 0xc3, 0xe3, 0x70, 0x73, 0xc2, 0x90, 0x1d, + 0xe8, 0x1e, 0xcc, 0xc4, 0x43, 0xf3, 0xa4, 0xa0, 0xa9, 0xb5, 0xa2, 0x91, 0xfc, 0x26, 0x54, 0x23, + 0xe7, 0x43, 0x51, 0xd0, 0x55, 0x7a, 0xca, 0xf1, 0x70, 0x49, 0x06, 0x25, 0x7a, 0x96, 0x55, 0x37, + 0x27, 0x44, 0x58, 0xc2, 0x6f, 0xc2, 0x74, 0x44, 0x57, 0x1a, 0x7e, 0x9b, 0xef, 0x3f, 0x5b, 0xdb, + 0xe1, 0xb1, 0xfa, 0x31, 0x0b, 0xcf, 0x46, 0x5d, 0xa3, 0x21, 0x7f, 0xa7, 0xb9, 0xbf, 0x5f, 0xcf, + 0xe1, 0x77, 0x82, 0x29, 0x22, 0xb8, 0x2b, 0x31, 0x7d, 0x42, 0x89, 0xe9, 0x9a, 0x8c, 0xe9, 0xb9, + 0x30, 0xa6, 0xe7, 0x1f, 0xd5, 0xa0, 0xca, 0x0d, 0x72, 0x38, 0xb0, 0xe9, 0xb9, 0xf2, 0x6b, 0x0d, + 0xe0, 0xe0, 0xd4, 0x96, 0x11, 0x67, 0x05, 0x4a, 0x2d, 0xce, 0xbc, 0xa1, 0x31, 0x07, 0xbe, 0x98, + 0x68, 0x63, 0x43, 0x52, 0xa1, 0x37, 0xa1, 0xe4, 0x0d, 0x5a, 0x2d, 0xe2, 0xc9, 0xf8, 0x7e, 0x39, + 0x1e, 0x43, 0x84, 0x87, 0x1b, 0x92, 0x8e, 0x4e, 0x79, 0x61, 0x5a, 0xdd, 0x01, 0x8b, 0xf6, 0xe3, + 0xa7, 0x08, 0x3a, 0xfc, 0x4b, 0x0d, 0x2a, 0x4c, 0xca, 0x4c, 0x81, 0xeb, 0x2a, 0x94, 0x99, 0x0c, + 0xa4, 0x2d, 0x42, 0xd7, 0x94, 0x11, 0x76, 0xa0, 0xff, 0x87, 0xb2, 0xdc, 0xb2, 0x32, 0x7a, 0x35, + 0x92, 0xd9, 0xee, 0xf5, 0x8d, 0x90, 0x14, 0x6f, 0xc3, 0x05, 0x66, 0x95, 0x16, 0xcd, 0x24, 0xa5, + 0x1d, 0xd5, 0x5c, 0x4b, 0x8b, 0xe5, 0x5a, 0x3a, 0x4c, 0xf5, 0x3b, 0x67, 0x9e, 0xd5, 0x32, 0xbb, + 0x42, 0x8a, 0xa0, 0x8d, 0xbf, 0x0d, 0x48, 0x65, 0x96, 0x45, 0x5d, 0x3c, 0x0d, 0x95, 0x4d, 0xd3, + 0xeb, 0x08, 0x91, 0xf0, 0x87, 0x50, 0xe5, 0xcd, 0x4c, 0x36, 0x44, 0x50, 0xe8, 0x98, 0x5e, 0x87, + 0x09, 0x3e, 0x6d, 0xb0, 0x6f, 0x7c, 0x01, 0x66, 0xf6, 0x6d, 0xb3, 0xef, 0x75, 0x1c, 0x19, 0x5c, + 0x69, 0x26, 0x5d, 0x0f, 0xfb, 0x32, 0x21, 0xde, 0x85, 0x19, 0x97, 0xf4, 0x4c, 0xcb, 0xb6, 0xec, + 0xe3, 0xc3, 0xa3, 0x33, 0x9f, 0x78, 0x22, 0xd1, 0xae, 0x05, 0xdd, 0x8f, 0x68, 0x2f, 0x15, 0xed, + 0xa8, 0xeb, 0x1c, 0x09, 0x17, 0x67, 0xdf, 0xf8, 0xb7, 0x1a, 0x54, 0x3f, 0x30, 0xfd, 0x96, 0xb4, + 0x02, 0xda, 0x82, 0x5a, 0xe0, 0xd8, 0xac, 0x47, 0xc8, 0x12, 0x8b, 0xf0, 0x6c, 0x8e, 0x4c, 0xc1, + 0x64, 0x84, 0x9f, 0x6e, 0xa9, 0x1d, 0x8c, 0x95, 0x69, 0xb7, 0x48, 0x37, 0x60, 0x95, 0x4b, 0x67, + 0xc5, 0x08, 0x55, 0x56, 0x6a, 0xc7, 0xa3, 0x99, 0xf0, 0xf4, 0xe3, 0x6e, 0xf9, 0x65, 0x0e, 0xd0, + 0xa8, 0x0c, 0x5f, 0x37, 0x21, 0xb8, 0x0d, 0x35, 0xcf, 0x37, 0x5d, 0xff, 0x30, 0x76, 0x0d, 0x99, + 0x66, 0xbd, 0x41, 0x70, 0xba, 0x0b, 0x33, 0x7d, 0xd7, 0x39, 0x76, 0x89, 0xe7, 0x1d, 0xda, 0x8e, + 0x6f, 0xbd, 0x38, 0x13, 0xd9, 0x50, 0x4d, 0x76, 0xef, 0xb2, 0x5e, 0xd4, 0x84, 0xd2, 0x0b, 0xab, + 0xeb, 0x13, 0xd7, 0x6b, 0x4c, 0x2e, 0xe6, 0x97, 0x6a, 0xab, 0x0f, 0xce, 0xb3, 0xda, 0xf2, 0x7b, + 0x8c, 0xfe, 0xe0, 0xac, 0x4f, 0x0c, 0x39, 0x57, 0xcd, 0x53, 0x8a, 0x91, 0x3c, 0xe5, 0x36, 0x40, + 0x48, 0x4f, 0xa3, 0xd6, 0xee, 0xde, 0xd3, 0x67, 0x07, 0xf5, 0x09, 0x54, 0x85, 0xa9, 0xdd, 0xbd, + 0x8d, 0xe6, 0x4e, 0x93, 0xc6, 0x35, 0xbc, 0x22, 0x6d, 0xa3, 0xda, 0x10, 0xcd, 0xc3, 0xd4, 0x4b, + 0xda, 0x2b, 0xef, 0x69, 0x79, 0xa3, 0xc4, 0xda, 0x5b, 0x6d, 0xfc, 0x77, 0x0d, 0xa6, 0xc5, 0x2e, + 0xc8, 0xb4, 0x15, 0x55, 0x88, 0x5c, 0x04, 0x82, 0x26, 0x45, 0x7c, 0x77, 0xb4, 0x45, 0xee, 0x25, + 0x9b, 0xd4, 0xdd, 0xf9, 0x62, 0x93, 0xb6, 0x30, 0x6b, 0xd0, 0x46, 0xf7, 0xa0, 0xde, 0xe2, 0xee, + 0x1e, 0x3b, 0x67, 0x8c, 0x19, 0xd1, 0x1f, 0x2c, 0xd2, 0x6d, 0x28, 0x92, 0x21, 0xb1, 0x7d, 0xaf, + 0x51, 0x61, 0xb1, 0x69, 0x5a, 0x66, 0x56, 0x4d, 0xda, 0x6b, 0x88, 0x41, 0xfc, 0x7f, 0x70, 0x61, + 0x87, 0xa6, 0xb6, 0x8f, 0x5d, 0xd3, 0x56, 0x93, 0xe4, 0x83, 0x83, 0x1d, 0x61, 0x95, 0xbc, 0x7f, + 0xb0, 0x83, 0x6a, 0x90, 0xdb, 0xda, 0x10, 0x3a, 0xe4, 0xac, 0x0d, 0xfc, 0xa9, 0x06, 0x48, 0x9d, + 0x97, 0xc9, 0x4c, 0x31, 0xe6, 0x12, 0x3e, 0x1f, 0xc2, 0xcf, 0xc1, 0x24, 0x71, 0x5d, 0xc7, 0x65, + 0x06, 0x29, 0x1b, 0xbc, 0x81, 0x6f, 0x09, 0x19, 0x0c, 0x32, 0x74, 0x4e, 0x82, 0x3d, 0xcf, 0xb9, + 0x69, 0x81, 0xa8, 0xdb, 0x30, 0x1b, 0xa1, 0xca, 0x14, 0x23, 0xef, 0xc2, 0x45, 0xc6, 0x6c, 0x9b, + 0x90, 0xfe, 0x5a, 0xd7, 0x1a, 0xa6, 0xa2, 0xf6, 0xe1, 0x52, 0x9c, 0xf0, 0x9b, 0xb5, 0x11, 0x7e, + 0x47, 0x20, 0x1e, 0x58, 0x3d, 0x72, 0xe0, 0xec, 0xa4, 0xcb, 0x46, 0x03, 0x1f, 0xbd, 0xfa, 0x8a, + 0xc3, 0x84, 0x7d, 0xe3, 0xdf, 0x68, 0x70, 0x79, 0x64, 0xfa, 0x37, 0xbc, 0xaa, 0x0b, 0x00, 0xc7, + 0x74, 0xfb, 0x90, 0x36, 0x1d, 0xe0, 0x97, 0x36, 0xa5, 0x27, 0x90, 0x93, 0xc6, 0x8e, 0xaa, 0x90, + 0xb3, 0x03, 0xc5, 0x27, 0xac, 0x5e, 0xa2, 0x68, 0x55, 0x90, 0x5a, 0xd9, 0x66, 0x8f, 0x5f, 0xe3, + 0xca, 0x06, 0xfb, 0x66, 0x47, 0x27, 0x21, 0xee, 0x33, 0x63, 0x87, 0x1f, 0xd1, 0x65, 0x23, 0x68, + 0x53, 0xf4, 0x56, 0xd7, 0x22, 0xb6, 0xcf, 0x46, 0x0b, 0x6c, 0x54, 0xe9, 0xc1, 0xcb, 0x50, 0xe7, + 0x48, 0x6b, 0xed, 0xb6, 0x72, 0x4c, 0x07, 0xfc, 0xb4, 0x28, 0x3f, 0xfc, 0x12, 0x2e, 0x28, 0xf4, + 0x99, 0x4c, 0xf7, 0x1a, 0x14, 0x79, 0x51, 0x48, 0x9c, 0x10, 0x73, 0xd1, 0x59, 0x1c, 0xc6, 0x10, + 0x34, 0xf8, 0x36, 0xcc, 0x8a, 0x1e, 0xd2, 0x73, 0x92, 0x56, 0x9d, 0xd9, 0x07, 0xef, 0xc0, 0x5c, + 0x94, 0x2c, 0x93, 0x23, 0xac, 0x49, 0xd0, 0x67, 0xfd, 0xb6, 0x72, 0xe0, 0xc4, 0x17, 0x45, 0x35, + 0x58, 0x2e, 0x66, 0xb0, 0x40, 0x20, 0xc9, 0x22, 0x93, 0x40, 0xb3, 0xd2, 0xfc, 0x3b, 0x96, 0x17, + 0xa4, 0x15, 0x9f, 0x00, 0x52, 0x3b, 0x33, 0x2d, 0xca, 0x32, 0x94, 0xb8, 0xc1, 0x65, 0xe6, 0x9a, + 0xbc, 0x2a, 0x92, 0x88, 0x0a, 0xb4, 0x41, 0x5e, 0xb8, 0xe6, 0x71, 0x8f, 0x04, 0x91, 0x95, 0xe6, + 0x6b, 0x6a, 0x67, 0x26, 0x8d, 0xff, 0xa8, 0x41, 0x75, 0xad, 0x6b, 0xba, 0x3d, 0x69, 0xfc, 0x77, + 0xa1, 0xc8, 0x13, 0x41, 0x71, 0x59, 0xba, 0x13, 0x65, 0xa3, 0xd2, 0xf2, 0xc6, 0x1a, 0x4f, 0x1b, + 0xc5, 0x2c, 0xba, 0x58, 0xa2, 0x16, 0xb9, 0x11, 0xab, 0x4d, 0x6e, 0xa0, 0xd7, 0x61, 0xd2, 0xa4, + 0x53, 0x98, 0xff, 0xd6, 0xe2, 0x29, 0x38, 0xe3, 0xc6, 0x0e, 0x6d, 0x4e, 0x85, 0xdf, 0x86, 0x8a, + 0x82, 0x40, 0x6f, 0x16, 0x8f, 0x9b, 0xe2, 0x60, 0x5e, 0x5b, 0x3f, 0xd8, 0x7a, 0xce, 0x2f, 0x1c, + 0x35, 0x80, 0x8d, 0x66, 0xd0, 0xce, 0xe1, 0x0f, 0xc5, 0x2c, 0xe1, 0xe1, 0xaa, 0x3c, 0x5a, 0x9a, + 0x3c, 0xb9, 0x57, 0x92, 0xe7, 0x14, 0xa6, 0x85, 0xfa, 0x99, 0xf6, 0xc0, 0x9b, 0x50, 0x64, 0xfc, + 0xe4, 0x16, 0x98, 0x4f, 0x80, 0x95, 0xde, 0xc9, 0x09, 0xf1, 0x0c, 0x4c, 0xef, 0xfb, 0xa6, 0x3f, + 0xf0, 0xe4, 0x16, 0xf8, 0x83, 0x06, 0x35, 0xd9, 0x93, 0xb5, 0xae, 0x22, 0xef, 0xa3, 0x3c, 0xe6, + 0x05, 0xb7, 0xd1, 0x4b, 0x50, 0x6c, 0x1f, 0xed, 0x5b, 0x9f, 0xc8, 0xea, 0x95, 0x68, 0xd1, 0xfe, + 0x2e, 0xc7, 0xe1, 0x15, 0x64, 0xd1, 0xa2, 0x17, 0x1d, 0xd7, 0x7c, 0xe1, 0x6f, 0xd9, 0x6d, 0x72, + 0xca, 0xf2, 0x89, 0x82, 0x11, 0x76, 0xb0, 0xbb, 0x89, 0xa8, 0x34, 0xb3, 0xfc, 0x4b, 0xad, 0x3c, + 0xcf, 0xc2, 0x85, 0xb5, 0x81, 0xdf, 0x69, 0xda, 0xe6, 0x51, 0x57, 0x06, 0x01, 0x3c, 0x07, 0x88, + 0x76, 0x6e, 0x58, 0x9e, 0xda, 0xdb, 0x84, 0x59, 0xda, 0x4b, 0x6c, 0xdf, 0x6a, 0x29, 0x11, 0x43, + 0x86, 0x6d, 0x2d, 0x16, 0xb6, 0x4d, 0xcf, 0x7b, 0xe9, 0xb8, 0x6d, 0xa1, 0x5a, 0xd0, 0xc6, 0x1b, + 0x9c, 0xf9, 0x33, 0x2f, 0x12, 0x98, 0xbf, 0x2e, 0x97, 0xa5, 0x90, 0xcb, 0x63, 0xe2, 0x8f, 0xe1, + 0x82, 0x1f, 0xc0, 0x45, 0x49, 0x29, 0x0a, 0x16, 0x63, 0x88, 0xf7, 0xe0, 0x9a, 0x24, 0x5e, 0xef, + 0xd0, 0xac, 0xfa, 0xa9, 0x00, 0xfc, 0x6f, 0xe5, 0x7c, 0x04, 0x8d, 0x40, 0x4e, 0x96, 0x69, 0x39, + 0x5d, 0x55, 0x80, 0x81, 0x27, 0xf6, 0x4c, 0xd9, 0x60, 0xdf, 0xb4, 0xcf, 0x75, 0xba, 0xc1, 0x21, + 0x48, 0xbf, 0xf1, 0x3a, 0xcc, 0x4b, 0x1e, 0x22, 0x07, 0x8a, 0x32, 0x19, 0x11, 0x28, 0x89, 0x89, + 0x30, 0x18, 0x9d, 0x3a, 0xde, 0xec, 0x2a, 0x65, 0xd4, 0xb4, 0x8c, 0xa7, 0xa6, 0xf0, 0xbc, 0xc8, + 0x77, 0x04, 0x15, 0x4c, 0x0d, 0xda, 0xa2, 0x9b, 0x32, 0x50, 0xbb, 0xc5, 0x42, 0xd0, 0xee, 0x91, + 0x85, 0x18, 0x61, 0xfd, 0x11, 0x2c, 0x04, 0x42, 0x50, 0xbb, 0x3d, 0x25, 0x6e, 0xcf, 0xf2, 0x3c, + 0xe5, 0xc6, 0x9d, 0xa4, 0xf8, 0x1d, 0x28, 0xf4, 0x89, 0x88, 0x29, 0x95, 0x55, 0xb4, 0xcc, 0xdf, + 0x83, 0x96, 0x95, 0xc9, 0x6c, 0x1c, 0xb7, 0xe1, 0xba, 0xe4, 0xce, 0x2d, 0x9a, 0xc8, 0x3e, 0x2e, + 0x94, 0xbc, 0x8d, 0x71, 0xb3, 0x8e, 0xde, 0xc6, 0xf2, 0x7c, 0xed, 0xe5, 0x6d, 0x8c, 0x9e, 0x15, + 0xaa, 0x6f, 0x65, 0x3a, 0x2b, 0xb6, 0xb9, 0x4d, 0x03, 0x97, 0xcc, 0xc4, 0xec, 0x08, 0xe6, 0xa2, + 0x9e, 0x9c, 0x29, 0x8c, 0xcd, 0xc1, 0xa4, 0xef, 0x9c, 0x10, 0x19, 0xc4, 0x78, 0x43, 0x0a, 0x1c, + 0xb8, 0x79, 0x26, 0x81, 0xcd, 0x90, 0x19, 0xdb, 0x92, 0x59, 0xe5, 0xa5, 0xab, 0x29, 0xf3, 0x19, + 0xde, 0xc0, 0xbb, 0x70, 0x29, 0x1e, 0x26, 0x32, 0x89, 0xfc, 0x9c, 0x6f, 0xe0, 0xa4, 0x48, 0x92, + 0x89, 0xef, 0xfb, 0x61, 0x30, 0x50, 0x02, 0x4a, 0x26, 0x96, 0x06, 0xe8, 0x49, 0xf1, 0xe5, 0x7f, + 0xb1, 0x5f, 0x83, 0x70, 0x93, 0x89, 0x99, 0x17, 0x32, 0xcb, 0xbe, 0xfc, 0x61, 0x8c, 0xc8, 0x8f, + 0x8d, 0x11, 0xc2, 0x49, 0xc2, 0x28, 0xf6, 0x0d, 0x6c, 0x3a, 0x81, 0x11, 0x06, 0xd0, 0xac, 0x18, + 0xf4, 0x0c, 0x09, 0x30, 0x58, 0x43, 0x6e, 0x6c, 0x35, 0xec, 0x66, 0x5a, 0x8c, 0x0f, 0xc2, 0xd8, + 0x39, 0x12, 0x99, 0x33, 0x31, 0xfe, 0x10, 0x16, 0xd3, 0x83, 0x72, 0x16, 0xce, 0xf7, 0x31, 0x94, + 0x83, 0x84, 0x52, 0x79, 0x4b, 0xad, 0x40, 0x69, 0x77, 0x6f, 0xff, 0xe9, 0xda, 0x7a, 0xb3, 0xae, + 0xad, 0xfe, 0x2b, 0x0f, 0xb9, 0xed, 0xe7, 0xe8, 0xbb, 0x30, 0xc9, 0x5f, 0x5a, 0xc6, 0x3c, 0x44, + 0xe9, 0xe3, 0xde, 0x6c, 0xf0, 0xd5, 0x4f, 0xff, 0xfc, 0xb7, 0x2f, 0x72, 0x97, 0xf0, 0x85, 0x95, + 0xe1, 0x5b, 0x66, 0xb7, 0xdf, 0x31, 0x57, 0x4e, 0x86, 0x2b, 0xec, 0x4c, 0x78, 0xa8, 0xdd, 0x47, + 0xcf, 0x21, 0xff, 0x74, 0xe0, 0xa3, 0xd4, 0x57, 0x2a, 0x3d, 0xfd, 0x2d, 0x07, 0xeb, 0x8c, 0xf3, + 0x1c, 0x9e, 0x51, 0x39, 0xf7, 0x07, 0x3e, 0xe5, 0x3b, 0x84, 0x8a, 0xf2, 0x1c, 0x83, 0xce, 0x7d, + 0xbf, 0xd2, 0xcf, 0x7f, 0xea, 0xc1, 0x98, 0xe1, 0x5d, 0xc5, 0x97, 0x55, 0x3c, 0xfe, 0x6a, 0xa4, + 0xea, 0x73, 0x70, 0x6a, 0xc7, 0xf5, 0x09, 0x1f, 0x18, 0xe2, 0xfa, 0x28, 0x45, 0xfd, 0x64, 0x7d, + 0xfc, 0x53, 0x9b, 0xf2, 0x75, 0xc4, 0x13, 0x52, 0xcb, 0x47, 0xd7, 0x13, 0x5e, 0x24, 0xd4, 0xda, + 0xbb, 0xbe, 0x98, 0x4e, 0x20, 0x90, 0x6e, 0x30, 0xa4, 0x2b, 0xf8, 0x92, 0x8a, 0xd4, 0x0a, 0xe8, + 0x1e, 0x6a, 0xf7, 0x57, 0x3b, 0x30, 0xc9, 0x2a, 0x86, 0xe8, 0x50, 0x7e, 0xe8, 0x09, 0xb5, 0xce, + 0x94, 0x1d, 0x10, 0xa9, 0x35, 0xe2, 0x79, 0x86, 0x36, 0x8b, 0x6b, 0x01, 0x1a, 0x2b, 0x1a, 0x3e, + 0xd4, 0xee, 0x2f, 0x69, 0x6f, 0x68, 0xab, 0x3f, 0x2a, 0xc0, 0x24, 0xab, 0xd4, 0xa0, 0x3e, 0x40, + 0x58, 0x83, 0x8b, 0xeb, 0x39, 0x52, 0xd5, 0x8b, 0xeb, 0x39, 0x5a, 0xbe, 0xc3, 0xd7, 0x19, 0xf2, + 0x3c, 0x9e, 0x0b, 0x90, 0xd9, 0xb3, 0xf7, 0x0a, 0xab, 0xc9, 0x50, 0xb3, 0xbe, 0x84, 0x8a, 0x52, + 0x4b, 0x43, 0x49, 0x1c, 0x23, 0xc5, 0xb8, 0xf8, 0x36, 0x49, 0x28, 0xc4, 0xe1, 0x9b, 0x0c, 0xf4, + 0x1a, 0x6e, 0xa8, 0xc6, 0xe5, 0xb8, 0x2e, 0xa3, 0xa4, 0xc0, 0x9f, 0x69, 0x50, 0x8b, 0xd6, 0xd3, + 0xd0, 0xcd, 0x04, 0xd6, 0xf1, 0xb2, 0x9c, 0x7e, 0x6b, 0x3c, 0x51, 0xaa, 0x08, 0x1c, 0xff, 0x84, + 0x90, 0xbe, 0x49, 0x29, 0x85, 0xed, 0xd1, 0x8f, 0x35, 0x98, 0x89, 0x55, 0xc9, 0x50, 0x12, 0xc4, + 0x48, 0x0d, 0x4e, 0xbf, 0x7d, 0x0e, 0x95, 0x90, 0xe4, 0x2e, 0x93, 0xe4, 0x06, 0xbe, 0x3a, 0x6a, + 0x0c, 0xdf, 0xea, 0x11, 0xdf, 0x11, 0xd2, 0xac, 0xfe, 0x3b, 0x0f, 0xa5, 0x75, 0xfe, 0xb3, 0x22, + 0xe4, 0x43, 0x39, 0xa8, 0x3c, 0xa1, 0x85, 0xa4, 0xaa, 0x44, 0x98, 0xb2, 0xeb, 0xd7, 0x53, 0xc7, + 0x85, 0x08, 0x77, 0x98, 0x08, 0x8b, 0xf8, 0x4a, 0x20, 0x82, 0xf8, 0xf9, 0xd2, 0x0a, 0xbf, 0x7c, + 0xaf, 0x98, 0xed, 0x36, 0x5d, 0x92, 0x1f, 0x6a, 0x50, 0x55, 0x0b, 0x4a, 0xe8, 0x46, 0x62, 0x3d, + 0x44, 0xad, 0x49, 0xe9, 0x78, 0x1c, 0x89, 0xc0, 0xbf, 0xc7, 0xf0, 0x6f, 0xe2, 0x85, 0x34, 0x7c, + 0x97, 0xd1, 0x47, 0x45, 0xe0, 0x25, 0xa4, 0x64, 0x11, 0x22, 0x15, 0xaa, 0x64, 0x11, 0xa2, 0x15, + 0xa8, 0xf3, 0x45, 0x18, 0x30, 0x7a, 0x2a, 0xc2, 0x29, 0x40, 0x58, 0x61, 0x42, 0x89, 0xc6, 0x55, + 0x2e, 0x31, 0x71, 0x1f, 0x1c, 0x2d, 0x4e, 0x25, 0xec, 0x80, 0x18, 0x76, 0xd7, 0xf2, 0xa8, 0x2f, + 0xae, 0xfe, 0xae, 0x00, 0x95, 0x27, 0xa6, 0x65, 0xfb, 0xc4, 0x36, 0xed, 0x16, 0x41, 0xc7, 0x30, + 0xc9, 0x4e, 0xa9, 0x78, 0xe0, 0x51, 0xcb, 0x3e, 0xf1, 0xc0, 0x13, 0xa9, 0x89, 0xe0, 0xdb, 0x0c, + 0xfa, 0x3a, 0xd6, 0x03, 0xe8, 0x5e, 0xc8, 0x7f, 0x85, 0xd5, 0x33, 0xa8, 0xca, 0x27, 0x50, 0xe4, + 0xf5, 0x0b, 0x14, 0xe3, 0x16, 0xa9, 0x73, 0xe8, 0x57, 0x93, 0x07, 0x53, 0x77, 0x99, 0x8a, 0xe5, + 0x31, 0x62, 0x0a, 0xf6, 0x3d, 0x80, 0xb0, 0x60, 0x16, 0xb7, 0xef, 0x48, 0x7d, 0x4d, 0x5f, 0x4c, + 0x27, 0x10, 0xc0, 0xf7, 0x19, 0xf0, 0x2d, 0x7c, 0x3d, 0x11, 0xb8, 0x1d, 0x4c, 0xa0, 0xe0, 0x2d, + 0x28, 0x6c, 0x9a, 0x5e, 0x07, 0xc5, 0x0e, 0x21, 0xe5, 0x95, 0x54, 0xd7, 0x93, 0x86, 0x04, 0xd4, + 0x2d, 0x06, 0xb5, 0x80, 0xe7, 0x13, 0xa1, 0x3a, 0xa6, 0x47, 0x63, 0x3a, 0x1a, 0xc0, 0x94, 0x7c, + 0xf9, 0x44, 0xd7, 0x62, 0x36, 0x8b, 0xbe, 0x92, 0xea, 0x0b, 0x69, 0xc3, 0x02, 0x70, 0x89, 0x01, + 0x62, 0x7c, 0x2d, 0xd9, 0xa8, 0x82, 0xfc, 0xa1, 0x76, 0xff, 0x0d, 0x6d, 0xf5, 0xa7, 0x75, 0x28, + 0xd0, 0x7c, 0x89, 0x9e, 0x22, 0xe1, 0x35, 0x33, 0x6e, 0xe1, 0x91, 0xe2, 0x4e, 0xdc, 0xc2, 0xa3, + 0x37, 0xd4, 0x84, 0x53, 0x84, 0xfd, 0xb8, 0x92, 0x30, 0x2a, 0xaa, 0xb1, 0x0f, 0x15, 0xe5, 0x32, + 0x8a, 0x12, 0x38, 0x46, 0x4b, 0x47, 0xf1, 0x53, 0x24, 0xe1, 0x26, 0x8b, 0x17, 0x19, 0xa8, 0x8e, + 0x2f, 0x46, 0x41, 0xdb, 0x9c, 0x8c, 0xa2, 0x7e, 0x1f, 0xaa, 0xea, 0xad, 0x15, 0x25, 0x30, 0x8d, + 0xd5, 0xa6, 0xe2, 0xb1, 0x22, 0xe9, 0xd2, 0x9b, 0xe0, 0x34, 0xc1, 0x4f, 0x49, 0x25, 0x2d, 0x45, + 0xff, 0x18, 0x4a, 0xe2, 0x2e, 0x9b, 0xa4, 0x6f, 0xb4, 0x9a, 0x95, 0xa4, 0x6f, 0xec, 0x22, 0x9c, + 0x90, 0x92, 0x30, 0x58, 0x9a, 0xb3, 0xcb, 0x00, 0x2d, 0x20, 0x1f, 0x13, 0x3f, 0x0d, 0x32, 0xac, + 0xcf, 0xa4, 0x41, 0x2a, 0xf7, 0xa5, 0xb1, 0x90, 0xc7, 0xc4, 0x17, 0x7b, 0x59, 0x5e, 0x46, 0x50, + 0x0a, 0x47, 0x35, 0x1a, 0xe2, 0x71, 0x24, 0xa9, 0x59, 0x64, 0x88, 0x2a, 0x42, 0x21, 0xfa, 0x01, + 0x40, 0x78, 0xf1, 0x8e, 0x27, 0x06, 0x89, 0xd5, 0xbb, 0x78, 0x62, 0x90, 0x7c, 0x77, 0x4f, 0xf0, + 0xe0, 0x10, 0x9c, 0x67, 0xb2, 0x14, 0xfe, 0xe7, 0x1a, 0xa0, 0xd1, 0x8b, 0x3a, 0x7a, 0x90, 0x0c, + 0x91, 0x58, 0x18, 0xd4, 0x5f, 0x7b, 0x35, 0xe2, 0xd4, 0xe8, 0x19, 0xca, 0xd5, 0x62, 0x53, 0xfa, + 0x2f, 0xa9, 0x64, 0x9f, 0x6b, 0x30, 0x1d, 0xb9, 0xea, 0xa3, 0x3b, 0x29, 0xeb, 0x1c, 0x2b, 0x2e, + 0xea, 0x77, 0xcf, 0xa5, 0x4b, 0xcd, 0x9d, 0x94, 0x5d, 0x21, 0xf3, 0xc6, 0x9f, 0x68, 0x50, 0x8b, + 0xd6, 0x07, 0x50, 0x0a, 0xc0, 0x48, 0x85, 0x52, 0x5f, 0x3a, 0x9f, 0xf0, 0x15, 0x56, 0x2b, 0x4c, + 0x25, 0x3f, 0x86, 0x92, 0x28, 0x2b, 0x24, 0xb9, 0x45, 0xb4, 0xc0, 0x99, 0xe4, 0x16, 0xb1, 0x9a, + 0x44, 0x9a, 0x5b, 0xd0, 0x1b, 0xba, 0xe2, 0x89, 0xa2, 0xf8, 0x90, 0x06, 0x39, 0xde, 0x13, 0x63, + 0x95, 0x8b, 0xb1, 0x90, 0xa1, 0x27, 0xca, 0xd2, 0x03, 0x4a, 0xe1, 0x78, 0x8e, 0x27, 0xc6, 0x2b, + 0x17, 0x69, 0x9e, 0xc8, 0x50, 0x15, 0x4f, 0x0c, 0x2b, 0x05, 0x49, 0x9e, 0x38, 0x52, 0xbe, 0x4d, + 0xf2, 0xc4, 0xd1, 0x62, 0x43, 0xda, 0xda, 0x32, 0xf0, 0x88, 0x27, 0xce, 0x26, 0x54, 0x16, 0xd0, + 0x6b, 0x29, 0x36, 0x4d, 0x2c, 0x0d, 0xeb, 0xaf, 0xbf, 0x22, 0xf5, 0x78, 0x0f, 0xe0, 0xab, 0x21, + 0x3d, 0xe0, 0x57, 0x1a, 0xcc, 0x25, 0x95, 0x26, 0x50, 0x0a, 0x58, 0x4a, 0x5d, 0x59, 0x5f, 0x7e, + 0x55, 0xf2, 0x57, 0xb0, 0x5b, 0xe0, 0x13, 0x8f, 0xea, 0xbf, 0xff, 0x6a, 0x41, 0xfb, 0xd3, 0x57, + 0x0b, 0xda, 0x5f, 0xbe, 0x5a, 0xd0, 0x7e, 0xf1, 0xd7, 0x85, 0x89, 0xa3, 0x22, 0xfb, 0x1f, 0x0e, + 0x6f, 0xfd, 0x27, 0x00, 0x00, 0xff, 0xff, 0x17, 0x62, 0x2c, 0x4e, 0x68, 0x31, 0x00, 0x00, } diff --git a/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.pb.go b/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.pb.go index 62731e2d..12c03832 100644 --- a/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.pb.go +++ b/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.pb.go @@ -20,9 +20,9 @@ import ( proto "github.com/golang/protobuf/proto" math "math" -) -import io "io" + io "io" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -708,24 +708,24 @@ var ( ) var fileDescriptorKv = []byte{ - // 298 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x74, 0x90, 0xcf, 0x4a, 0xc3, 0x40, - 0x10, 0xc6, 0xbb, 0x4d, 0x9b, 0xd6, 0x69, 0xa9, 0x61, 0x29, 0xb8, 0x78, 0x08, 0x31, 0x17, 0x15, - 0x21, 0x42, 0x7d, 0x03, 0x31, 0xa7, 0x7a, 0x90, 0x10, 0xbd, 0x96, 0x34, 0x0e, 0xa5, 0xa4, 0xed, - 0x86, 0x34, 0x2e, 0xe4, 0x4d, 0xbc, 0x7b, 0xf7, 0x39, 0x7a, 0xec, 0x23, 0xf8, 0xe7, 0x45, 0xdc, - 0xcc, 0x9a, 0x7a, 0xf2, 0x30, 0xcb, 0xcc, 0xf7, 0xfd, 0xd8, 0xfd, 0x66, 0xa1, 0x9f, 0xa9, 0x20, - 0x2f, 0x64, 0x29, 0xb9, 0xbd, 0x56, 0x69, 0x9a, 0xcf, 0x4f, 0xc7, 0x0b, 0xb9, 0x90, 0x24, 0x5d, - 0xd7, 0x9d, 0x71, 0xfd, 0x77, 0x06, 0xfd, 0x29, 0x56, 0x4f, 0xc9, 0xea, 0x05, 0xb9, 0x03, 0x56, - 0x86, 0x95, 0x60, 0x1e, 0xbb, 0x18, 0x46, 0x75, 0xcb, 0xcf, 0xe1, 0x38, 0x2d, 0x30, 0x29, 0x71, - 0x56, 0xa0, 0x5a, 0x6e, 0x97, 0x72, 0x23, 0xda, 0xda, 0xb5, 0xa2, 0x91, 0x91, 0xa3, 0x5f, 0x95, - 0x9f, 0xc1, 0x70, 0x2d, 0x9f, 0xff, 0x28, 0x8b, 0xa8, 0x81, 0xd6, 0x0e, 0x88, 0x80, 0x9e, 0xc2, - 0x82, 0xdc, 0x0e, 0xb9, 0xcd, 0xc8, 0xc7, 0xd0, 0x55, 0x75, 0x00, 0xd1, 0xa5, 0x97, 0xcd, 0x50, - 0xab, 0x2b, 0x4c, 0xb6, 0x28, 0x6c, 0xa2, 0xcd, 0xe0, 0xbf, 0x31, 0xe8, 0x86, 0x0a, 0x37, 0x25, - 0xbf, 0x82, 0x4e, 0x59, 0xe5, 0x48, 0x71, 0x47, 0x93, 0x93, 0xc0, 0xec, 0x19, 0x90, 0x69, 0xce, - 0x58, 0xdb, 0x11, 0x41, 0xdc, 0x83, 0x76, 0xa6, 0x28, 0xfb, 0x60, 0xe2, 0x34, 0x68, 0xb3, 0x78, - 0xa4, 0x3d, 0x7e, 0x09, 0xbd, 0x5c, 0xc7, 0x9f, 0x69, 0xcc, 0xfa, 0x07, 0xb3, 0x6b, 0x60, 0xaa, - 0x7c, 0x0f, 0x8e, 0x0e, 0xf7, 0xf3, 0x1e, 0x58, 0x0f, 0x8f, 0xb1, 0xd3, 0xe2, 0x00, 0xf6, 0x5d, - 0x78, 0x1f, 0xc6, 0xa1, 0xc3, 0x6e, 0xc5, 0xee, 0xd3, 0x6d, 0xed, 0x75, 0xed, 0xbe, 0x5c, 0xb6, - 0xd7, 0xf5, 0xa1, 0xeb, 0xf5, 0xdb, 0x6d, 0xcd, 0x6d, 0xfa, 0xf7, 0x9b, 0x9f, 0x00, 0x00, 0x00, - 0xff, 0xff, 0xb5, 0x45, 0x92, 0x5d, 0xa1, 0x01, 0x00, 0x00, + // 303 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x6c, 0x90, 0x41, 0x4e, 0xc2, 0x40, + 0x14, 0x86, 0x3b, 0x14, 0x0a, 0x3e, 0x08, 0x36, 0x13, 0x12, 0x27, 0x2e, 0x26, 0x95, 0x8d, 0x18, + 0x13, 0x4c, 0xf0, 0x06, 0xc6, 0xae, 0x70, 0x61, 0x1a, 0x74, 0x4b, 0x4a, 0x79, 0x21, 0xa4, 0x94, + 0x69, 0x4a, 0x9d, 0xa4, 0x37, 0x71, 0xef, 0xde, 0x73, 0xb0, 0xe4, 0x08, 0x52, 0x2f, 0x62, 0xfa, + 0xc6, 0xe2, 0xc6, 0xcd, 0xe4, 0xfd, 0xff, 0xff, 0x65, 0xe6, 0x7f, 0x03, 0x9d, 0x58, 0x8f, 0xd3, + 0x4c, 0xe5, 0x8a, 0x3b, 0x89, 0x8e, 0xa2, 0x74, 0x71, 0x39, 0x58, 0xa9, 0x95, 0x22, 0xeb, 0xae, + 0x9a, 0x4c, 0x3a, 0xfc, 0x64, 0xd0, 0x99, 0x62, 0xf1, 0x1a, 0x6e, 0xde, 0x90, 0xbb, 0x60, 0xc7, + 0x58, 0x08, 0xe6, 0xb1, 0x51, 0x2f, 0xa8, 0x46, 0x7e, 0x0d, 0xe7, 0x51, 0x86, 0x61, 0x8e, 0xf3, + 0x0c, 0xf5, 0x7a, 0xb7, 0x56, 0x5b, 0xd1, 0xf0, 0xd8, 0xc8, 0x0e, 0xfa, 0xc6, 0x0e, 0x7e, 0x5d, + 0x7e, 0x05, 0xbd, 0x44, 0x2d, 0xff, 0x28, 0x9b, 0xa8, 0x6e, 0xa2, 0x96, 0x27, 0x44, 0x40, 0x5b, + 0x63, 0x46, 0x69, 0x93, 0xd2, 0x5a, 0xf2, 0x01, 0xb4, 0x74, 0x55, 0x40, 0xb4, 0xe8, 0x65, 0x23, + 0x2a, 0x77, 0x83, 0xe1, 0x0e, 0x85, 0x43, 0xb4, 0x11, 0xc3, 0x0f, 0x06, 0x2d, 0x5f, 0xe3, 0x36, + 0xe7, 0xb7, 0xd0, 0xcc, 0x8b, 0x14, 0xa9, 0x6e, 0x7f, 0x72, 0x31, 0x36, 0x7b, 0x8e, 0x29, 0x34, + 0xe7, 0xac, 0x48, 0x31, 0x20, 0x88, 0x7b, 0xd0, 0x88, 0x35, 0x75, 0xef, 0x4e, 0xdc, 0x1a, 0xad, + 0x17, 0x0f, 0x1a, 0xb1, 0xe6, 0x37, 0xd0, 0x4e, 0x33, 0xd4, 0xf3, 0x58, 0x53, 0xf9, 0xff, 0x30, + 0xa7, 0x02, 0xa6, 0x7a, 0xe8, 0xc1, 0xd9, 0xe9, 0x7e, 0xde, 0x06, 0xfb, 0xf9, 0x65, 0xe6, 0x5a, + 0x1c, 0xc0, 0x79, 0xf4, 0x9f, 0xfc, 0x99, 0xef, 0xb2, 0x07, 0xb1, 0x3f, 0x4a, 0xeb, 0x70, 0x94, + 0xd6, 0xbe, 0x94, 0xec, 0x50, 0x4a, 0xf6, 0x55, 0x4a, 0xf6, 0xfe, 0x2d, 0xad, 0x85, 0x43, 0xff, + 0x7e, 0xff, 0x13, 0x00, 0x00, 0xff, 0xff, 0xb5, 0x45, 0x92, 0x5d, 0xa1, 0x01, 0x00, 0x00, } diff --git a/vendor/github.com/cpuguy83/go-md2man/LICENSE.md b/vendor/github.com/cpuguy83/go-md2man/LICENSE.md new file mode 100644 index 00000000..1cade6ce --- /dev/null +++ b/vendor/github.com/cpuguy83/go-md2man/LICENSE.md @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Brian Goff + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/cpuguy83/go-md2man/md2man/md2man.go b/vendor/github.com/cpuguy83/go-md2man/md2man/md2man.go new file mode 100644 index 00000000..8f44fa15 --- /dev/null +++ b/vendor/github.com/cpuguy83/go-md2man/md2man/md2man.go @@ -0,0 +1,19 @@ +package md2man + +import ( + "github.com/russross/blackfriday" +) + +func Render(doc []byte) []byte { + renderer := RoffRenderer(0) + extensions := 0 + extensions |= blackfriday.EXTENSION_NO_INTRA_EMPHASIS + extensions |= blackfriday.EXTENSION_TABLES + extensions |= blackfriday.EXTENSION_FENCED_CODE + extensions |= blackfriday.EXTENSION_AUTOLINK + extensions |= blackfriday.EXTENSION_SPACE_HEADERS + extensions |= blackfriday.EXTENSION_FOOTNOTES + extensions |= blackfriday.EXTENSION_TITLEBLOCK + + return blackfriday.Markdown(doc, renderer, extensions) +} diff --git a/vendor/github.com/cpuguy83/go-md2man/md2man/roff.go b/vendor/github.com/cpuguy83/go-md2man/md2man/roff.go new file mode 100644 index 00000000..4478786b --- /dev/null +++ b/vendor/github.com/cpuguy83/go-md2man/md2man/roff.go @@ -0,0 +1,269 @@ +package md2man + +import ( + "bytes" + "fmt" + "html" + "strings" + + "github.com/russross/blackfriday" +) + +type roffRenderer struct{} + +func RoffRenderer(flags int) blackfriday.Renderer { + return &roffRenderer{} +} + +func (r *roffRenderer) GetFlags() int { + return 0 +} + +func (r *roffRenderer) TitleBlock(out *bytes.Buffer, text []byte) { + out.WriteString(".TH ") + + splitText := bytes.Split(text, []byte("\n")) + for i, line := range splitText { + line = bytes.TrimPrefix(line, []byte("% ")) + if i == 0 { + line = bytes.Replace(line, []byte("("), []byte("\" \""), 1) + line = bytes.Replace(line, []byte(")"), []byte("\" \""), 1) + } + line = append([]byte("\""), line...) + line = append(line, []byte("\" ")...) + out.Write(line) + } + + out.WriteString(" \"\"\n") +} + +func (r *roffRenderer) BlockCode(out *bytes.Buffer, text []byte, lang string) { + out.WriteString("\n.PP\n.RS\n\n.nf\n") + escapeSpecialChars(out, text) + out.WriteString("\n.fi\n.RE\n") +} + +func (r *roffRenderer) BlockQuote(out *bytes.Buffer, text []byte) { + out.WriteString("\n.PP\n.RS\n") + out.Write(text) + out.WriteString("\n.RE\n") +} + +func (r *roffRenderer) BlockHtml(out *bytes.Buffer, text []byte) { + out.Write(text) +} + +func (r *roffRenderer) Header(out *bytes.Buffer, text func() bool, level int, id string) { + marker := out.Len() + + switch { + case marker == 0: + // This is the doc header + out.WriteString(".TH ") + case level == 1: + out.WriteString("\n\n.SH ") + case level == 2: + out.WriteString("\n.SH ") + default: + out.WriteString("\n.SS ") + } + + if !text() { + out.Truncate(marker) + return + } +} + +func (r *roffRenderer) HRule(out *bytes.Buffer) { + out.WriteString("\n.ti 0\n\\l'\\n(.lu'\n") +} + +func (r *roffRenderer) List(out *bytes.Buffer, text func() bool, flags int) { + marker := out.Len() + out.WriteString(".IP ") + if flags&blackfriday.LIST_TYPE_ORDERED != 0 { + out.WriteString("\\(bu 2") + } else { + out.WriteString("\\n+[step" + string(flags) + "]") + } + out.WriteString("\n") + if !text() { + out.Truncate(marker) + return + } + +} + +func (r *roffRenderer) ListItem(out *bytes.Buffer, text []byte, flags int) { + out.WriteString("\n\\item ") + out.Write(text) +} + +func (r *roffRenderer) Paragraph(out *bytes.Buffer, text func() bool) { + marker := out.Len() + out.WriteString("\n.PP\n") + if !text() { + out.Truncate(marker) + return + } + if marker != 0 { + out.WriteString("\n") + } +} + +// TODO: This might now work +func (r *roffRenderer) Table(out *bytes.Buffer, header []byte, body []byte, columnData []int) { + out.WriteString(".TS\nallbox;\n") + + out.Write(header) + out.Write(body) + out.WriteString("\n.TE\n") +} + +func (r *roffRenderer) TableRow(out *bytes.Buffer, text []byte) { + if out.Len() > 0 { + out.WriteString("\n") + } + out.Write(text) + out.WriteString("\n") +} + +func (r *roffRenderer) TableHeaderCell(out *bytes.Buffer, text []byte, align int) { + if out.Len() > 0 { + out.WriteString(" ") + } + out.Write(text) + out.WriteString(" ") +} + +// TODO: This is probably broken +func (r *roffRenderer) TableCell(out *bytes.Buffer, text []byte, align int) { + if out.Len() > 0 { + out.WriteString("\t") + } + out.Write(text) + out.WriteString("\t") +} + +func (r *roffRenderer) Footnotes(out *bytes.Buffer, text func() bool) { + +} + +func (r *roffRenderer) FootnoteItem(out *bytes.Buffer, name, text []byte, flags int) { + +} + +func (r *roffRenderer) AutoLink(out *bytes.Buffer, link []byte, kind int) { + out.WriteString("\n\\[la]") + out.Write(link) + out.WriteString("\\[ra]") +} + +func (r *roffRenderer) CodeSpan(out *bytes.Buffer, text []byte) { + out.WriteString("\\fB\\fC") + escapeSpecialChars(out, text) + out.WriteString("\\fR") +} + +func (r *roffRenderer) DoubleEmphasis(out *bytes.Buffer, text []byte) { + out.WriteString("\\fB") + out.Write(text) + out.WriteString("\\fP") +} + +func (r *roffRenderer) Emphasis(out *bytes.Buffer, text []byte) { + out.WriteString("\\fI") + out.Write(text) + out.WriteString("\\fP") +} + +func (r *roffRenderer) Image(out *bytes.Buffer, link []byte, title []byte, alt []byte) { +} + +func (r *roffRenderer) LineBreak(out *bytes.Buffer) { + out.WriteString("\n.br\n") +} + +func (r *roffRenderer) Link(out *bytes.Buffer, link []byte, title []byte, content []byte) { + r.AutoLink(out, link, 0) +} + +func (r *roffRenderer) RawHtmlTag(out *bytes.Buffer, tag []byte) { + out.Write(tag) +} + +func (r *roffRenderer) TripleEmphasis(out *bytes.Buffer, text []byte) { + out.WriteString("\\s+2") + out.Write(text) + out.WriteString("\\s-2") +} + +func (r *roffRenderer) StrikeThrough(out *bytes.Buffer, text []byte) { +} + +func (r *roffRenderer) FootnoteRef(out *bytes.Buffer, ref []byte, id int) { + +} + +func (r *roffRenderer) Entity(out *bytes.Buffer, entity []byte) { + out.WriteString(html.UnescapeString(string(entity))) +} + +func processFooterText(text []byte) []byte { + text = bytes.TrimPrefix(text, []byte("% ")) + newText := []byte{} + textArr := strings.Split(string(text), ") ") + + for i, w := range textArr { + if i == 0 { + w = strings.Replace(w, "(", "\" \"", 1) + w = fmt.Sprintf("\"%s\"", w) + } else { + w = fmt.Sprintf(" \"%s\"", w) + } + newText = append(newText, []byte(w)...) + } + newText = append(newText, []byte(" \"\"")...) + + return newText +} + +func (r *roffRenderer) NormalText(out *bytes.Buffer, text []byte) { + escapeSpecialChars(out, text) +} + +func (r *roffRenderer) DocumentHeader(out *bytes.Buffer) { +} + +func (r *roffRenderer) DocumentFooter(out *bytes.Buffer) { +} + +func needsBackslash(c byte) bool { + for _, r := range []byte("-_&\\~") { + if c == r { + return true + } + } + return false +} + +func escapeSpecialChars(out *bytes.Buffer, text []byte) { + for i := 0; i < len(text); i++ { + // directly copy normal characters + org := i + + for i < len(text) && !needsBackslash(text[i]) { + i++ + } + if i > org { + out.Write(text[org:i]) + } + + // escape a character + if i >= len(text) { + break + } + out.WriteByte('\\') + out.WriteByte(text[i]) + } +} diff --git a/vendor/github.com/dustin/go-humanize/comma.go b/vendor/github.com/dustin/go-humanize/comma.go index 74f446b6..b65ea6fa 100644 --- a/vendor/github.com/dustin/go-humanize/comma.go +++ b/vendor/github.com/dustin/go-humanize/comma.go @@ -39,7 +39,7 @@ func Comma(v int64) string { // Commaf produces a string form of the given number in base 10 with // commas after every three orders of magnitude. // -// e.g. Commaf(834142.32) -> 834,142.32 +// e.g. Comma(834142.32) -> 834,142.32 func Commaf(v float64) string { buf := &bytes.Buffer{} if v < 0 { diff --git a/vendor/github.com/dustin/go-humanize/commaf.go b/vendor/github.com/dustin/go-humanize/commaf.go deleted file mode 100644 index 620690de..00000000 --- a/vendor/github.com/dustin/go-humanize/commaf.go +++ /dev/null @@ -1,40 +0,0 @@ -// +build go1.6 - -package humanize - -import ( - "bytes" - "math/big" - "strings" -) - -// BigCommaf produces a string form of the given big.Float in base 10 -// with commas after every three orders of magnitude. -func BigCommaf(v *big.Float) string { - buf := &bytes.Buffer{} - if v.Sign() < 0 { - buf.Write([]byte{'-'}) - v.Abs(v) - } - - comma := []byte{','} - - parts := strings.Split(v.Text('f', -1), ".") - pos := 0 - if len(parts[0])%3 != 0 { - pos += len(parts[0]) % 3 - buf.WriteString(parts[0][:pos]) - buf.Write(comma) - } - for ; pos < len(parts[0]); pos += 3 { - buf.WriteString(parts[0][pos : pos+3]) - buf.Write(comma) - } - buf.Truncate(buf.Len() - 1) - - if len(parts) > 1 { - buf.Write([]byte{'.'}) - buf.WriteString(parts[1]) - } - return buf.String() -} diff --git a/vendor/github.com/dustin/go-humanize/si.go b/vendor/github.com/dustin/go-humanize/si.go index 9cce4e8d..fe86fe55 100644 --- a/vendor/github.com/dustin/go-humanize/si.go +++ b/vendor/github.com/dustin/go-humanize/si.go @@ -41,7 +41,7 @@ func revfmap(in map[float64]string) map[string]float64 { var riParseRegex *regexp.Regexp func init() { - ri := `^([\-0-9.]+)\s?([` + ri := `^([0-9.]+)\s?([` for _, v := range siPrefixTable { ri += v } @@ -61,21 +61,18 @@ func ComputeSI(input float64) (float64, string) { if input == 0 { return 0, "" } - mag := math.Abs(input) - exponent := math.Floor(logn(mag, 10)) + exponent := math.Floor(logn(input, 10)) exponent = math.Floor(exponent/3) * 3 - value := mag / math.Pow(10, exponent) + value := input / math.Pow(10, exponent) // Handle special case where value is exactly 1000.0 // Should return 1M instead of 1000k if value == 1000.0 { exponent += 3 - value = mag / math.Pow(10, exponent) + value = input / math.Pow(10, exponent) } - value = math.Copysign(value, input) - prefix := siPrefixTable[exponent] return value, prefix } diff --git a/vendor/github.com/dustin/go-humanize/times.go b/vendor/github.com/dustin/go-humanize/times.go index 815f6307..592ebe1d 100644 --- a/vendor/github.com/dustin/go-humanize/times.go +++ b/vendor/github.com/dustin/go-humanize/times.go @@ -9,7 +9,9 @@ import ( // Seconds-based time units const ( - Day = 24 * time.Hour + Minute = 60 + Hour = 60 * Minute + Day = 24 * Hour Week = 7 * Day Month = 30 * Day Year = 12 * Month @@ -23,35 +25,18 @@ func Time(then time.Time) string { return RelTime(then, time.Now(), "ago", "from now") } -// A RelTimeMagnitude struct contains a relative time point at which -// the relative format of time will switch to a new format string. A -// slice of these in ascending order by their "D" field is passed to -// CustomRelTime to format durations. -// -// The Format field is a string that may contain a "%s" which will be -// replaced with the appropriate signed label (e.g. "ago" or "from -// now") and a "%d" that will be replaced by the quantity. -// -// The DivBy field is the amount of time the time difference must be -// divided by in order to display correctly. -// -// e.g. if D is 2*time.Minute and you want to display "%d minutes %s" -// DivBy should be time.Minute so whatever the duration is will be -// expressed in minutes. -type RelTimeMagnitude struct { - D time.Duration - Format string - DivBy time.Duration -} - -var defaultMagnitudes = []RelTimeMagnitude{ - {time.Second, "now", time.Second}, - {2 * time.Second, "1 second %s", 1}, - {time.Minute, "%d seconds %s", time.Second}, - {2 * time.Minute, "1 minute %s", 1}, - {time.Hour, "%d minutes %s", time.Minute}, - {2 * time.Hour, "1 hour %s", 1}, - {Day, "%d hours %s", time.Hour}, +var magnitudes = []struct { + d int64 + format string + divby int64 +}{ + {1, "now", 1}, + {2, "1 second %s", 1}, + {Minute, "%d seconds %s", 1}, + {2 * Minute, "1 minute %s", 1}, + {Hour, "%d minutes %s", Minute}, + {2 * Hour, "1 hour %s", 1}, + {Day, "%d hours %s", Hour}, {2 * Day, "1 day %s", 1}, {Week, "%d days %s", Day}, {2 * Week, "1 week %s", 1}, @@ -72,43 +57,35 @@ var defaultMagnitudes = []RelTimeMagnitude{ // // RelTime(timeInPast, timeInFuture, "earlier", "later") -> "3 weeks earlier" func RelTime(a, b time.Time, albl, blbl string) string { - return CustomRelTime(a, b, albl, blbl, defaultMagnitudes) -} - -// CustomRelTime formats a time into a relative string. -// -// It takes two times two labels and a table of relative time formats. -// In addition to the generic time delta string (e.g. 5 minutes), the -// labels are used applied so that the label corresponding to the -// smaller time is applied. -func CustomRelTime(a, b time.Time, albl, blbl string, magnitudes []RelTimeMagnitude) string { lbl := albl - diff := b.Sub(a) + diff := b.Unix() - a.Unix() - if a.After(b) { + after := a.After(b) + if after { lbl = blbl - diff = a.Sub(b) + diff = a.Unix() - b.Unix() } n := sort.Search(len(magnitudes), func(i int) bool { - return magnitudes[i].D >= diff + return magnitudes[i].d > diff }) mag := magnitudes[n] args := []interface{}{} escaped := false - for _, ch := range mag.Format { + for _, ch := range mag.format { if escaped { switch ch { + case '%': case 's': args = append(args, lbl) case 'd': - args = append(args, diff/mag.DivBy) + args = append(args, diff/mag.divby) } escaped = false } else { escaped = ch == '%' } } - return fmt.Sprintf(mag.Format, args...) + return fmt.Sprintf(mag.format, args...) } diff --git a/vendor/github.com/ghodss/yaml/yaml.go b/vendor/github.com/ghodss/yaml/yaml.go index 0bb015b4..c02beacb 100644 --- a/vendor/github.com/ghodss/yaml/yaml.go +++ b/vendor/github.com/ghodss/yaml/yaml.go @@ -7,7 +7,7 @@ import ( "reflect" "strconv" - yaml "github.com/cloudfoundry-incubator/candiedyaml" + "gopkg.in/yaml.v2" ) // Marshals the object into JSON then converts JSON to YAML and returns the @@ -15,12 +15,12 @@ import ( func Marshal(o interface{}) ([]byte, error) { j, err := json.Marshal(o) if err != nil { - return nil, fmt.Errorf("error marshaling into JSON: %v", err) + return nil, fmt.Errorf("error marshaling into JSON: ", err) } y, err := JSONToYAML(j) if err != nil { - return nil, fmt.Errorf("error converting JSON to YAML: %v", err) + return nil, fmt.Errorf("error converting JSON to YAML: ", err) } return y, nil @@ -48,7 +48,7 @@ func JSONToYAML(j []byte) ([]byte, error) { var jsonObj interface{} // We are using yaml.Unmarshal here (instead of json.Unmarshal) because the // Go JSON library doesn't try to pick the right number type (int, float, - // etc.) when unmarshalling to interface{}, it just picks float64 + // etc.) when unmarshling to interface{}, it just picks float64 // universally. go-yaml does go through the effort of picking the right // number type, so we can preserve number type throughout this process. err := yaml.Unmarshal(j, &jsonObj) diff --git a/vendor/github.com/gogo/protobuf/LICENSE b/vendor/github.com/gogo/protobuf/LICENSE index 7be0cc7b..335e38e1 100644 --- a/vendor/github.com/gogo/protobuf/LICENSE +++ b/vendor/github.com/gogo/protobuf/LICENSE @@ -1,7 +1,7 @@ -Protocol Buffers for Go with Gadgets +Extensions for Protocol Buffers to create more go like structures. -Copyright (c) 2013, The GoGo Authors. All rights reserved. -http://github.com/gogo/protobuf +Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +http://github.com/gogo/protobuf/gogoproto Go support for Protocol Buffers - Google's data interchange format diff --git a/vendor/github.com/gogo/protobuf/gogoproto/doc.go b/vendor/github.com/gogo/protobuf/gogoproto/doc.go index 5ecfae11..f0424d4f 100644 --- a/vendor/github.com/gogo/protobuf/gogoproto/doc.go +++ b/vendor/github.com/gogo/protobuf/gogoproto/doc.go @@ -1,7 +1,7 @@ -// Protocol Buffers for Go with Gadgets +// Extensions for Protocol Buffers to create more go like structures. // -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://github.com/gogo/protobuf/gogoproto // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are diff --git a/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go index 6da0e3e7..f97c2338 100644 --- a/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go +++ b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go @@ -24,9 +24,7 @@ var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion1 var E_GoprotoEnumPrefix = &proto.ExtensionDesc{ ExtendedType: (*google_protobuf.EnumOptions)(nil), @@ -589,77 +587,75 @@ func init() { proto.RegisterExtension(E_Castvalue) } -func init() { proto.RegisterFile("gogo.proto", fileDescriptorGogo) } - var fileDescriptorGogo = []byte{ - // 1098 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x94, 0x97, 0xc9, 0x6f, 0x1c, 0x45, - 0x14, 0x87, 0x85, 0x70, 0xe4, 0x99, 0xe7, 0x0d, 0x8f, 0x8d, 0x09, 0x11, 0x88, 0xe4, 0xc6, 0xc9, - 0x39, 0x45, 0x28, 0x65, 0x45, 0x96, 0x63, 0x39, 0xa3, 0x20, 0x0c, 0x23, 0x13, 0x07, 0x10, 0x87, - 0x51, 0xcf, 0xb8, 0xdc, 0x19, 0xe8, 0xee, 0x6a, 0xba, 0xba, 0xa3, 0x38, 0x37, 0x14, 0x16, 0x21, - 0xc4, 0x8e, 0x04, 0x09, 0x09, 0xcb, 0x81, 0x7d, 0x0d, 0xcb, 0x9d, 0x0b, 0x70, 0xe6, 0x7f, 0xe0, - 0x02, 0x98, 0x4d, 0xf2, 0xcd, 0x17, 0xf4, 0xba, 0xdf, 0xeb, 0xa9, 0x1e, 0x8f, 0x54, 0x35, 0xb7, - 0xf6, 0xb8, 0xbe, 0x6f, 0xaa, 0xdf, 0xeb, 0x7a, 0xbf, 0x69, 0x00, 0x5f, 0xf9, 0x6a, 0x31, 0x4e, - 0x54, 0xaa, 0x1a, 0x75, 0xbc, 0xce, 0x2f, 0x8f, 0x1c, 0xf5, 0x95, 0xf2, 0x03, 0x79, 0x3c, 0xff, - 0xab, 0x93, 0x6d, 0x1f, 0xdf, 0x92, 0xba, 0x9b, 0xf4, 0xe2, 0x54, 0x25, 0xc5, 0x62, 0xf1, 0x20, - 0xcc, 0xd1, 0xe2, 0xb6, 0x8c, 0xb2, 0xb0, 0x1d, 0x27, 0x72, 0xbb, 0x77, 0xa9, 0x71, 0xd7, 0x62, - 0x41, 0x2e, 0x32, 0xb9, 0xb8, 0x16, 0x65, 0xe1, 0x43, 0x71, 0xda, 0x53, 0x91, 0x3e, 0x7c, 0xf3, - 0xb7, 0x5b, 0x8f, 0xde, 0x72, 0x6f, 0x6d, 0x63, 0x96, 0x50, 0xfc, 0x5f, 0x2b, 0x07, 0xc5, 0x06, - 0xdc, 0x5e, 0xf1, 0xe9, 0x34, 0xe9, 0x45, 0xbe, 0x4c, 0x2c, 0xc6, 0x9f, 0xc8, 0x38, 0x67, 0x18, - 0x1f, 0x26, 0x54, 0xac, 0xc2, 0xd4, 0x28, 0xae, 0x9f, 0xc9, 0x35, 0x29, 0x4d, 0x49, 0x13, 0x66, - 0x72, 0x49, 0x37, 0xd3, 0xa9, 0x0a, 0x23, 0x2f, 0x94, 0x16, 0xcd, 0x2f, 0xb9, 0xa6, 0xbe, 0x31, - 0x8d, 0xd8, 0x6a, 0x49, 0x89, 0xf3, 0x30, 0x8f, 0x9f, 0x5c, 0xf4, 0x82, 0x4c, 0x9a, 0xb6, 0x63, - 0x43, 0x6d, 0xe7, 0x71, 0x19, 0x2b, 0x7f, 0xbd, 0x32, 0x96, 0x2b, 0xe7, 0x4a, 0x81, 0xe1, 0x35, - 0x3a, 0xe1, 0xcb, 0x34, 0x95, 0x89, 0x6e, 0x7b, 0x41, 0x30, 0x64, 0x93, 0x67, 0x7a, 0x41, 0x69, - 0xbc, 0xba, 0x5b, 0xed, 0x44, 0xb3, 0x20, 0x57, 0x82, 0x40, 0x6c, 0xc2, 0x1d, 0x43, 0x3a, 0xeb, - 0xe0, 0xbc, 0x46, 0xce, 0xf9, 0x03, 0xdd, 0x45, 0x6d, 0x0b, 0xf8, 0xf3, 0xb2, 0x1f, 0x0e, 0xce, - 0x77, 0xc8, 0xd9, 0x20, 0x96, 0xdb, 0x82, 0xc6, 0xfb, 0x61, 0xf6, 0xa2, 0x4c, 0x3a, 0x4a, 0xcb, - 0xb6, 0x7c, 0x2a, 0xf3, 0x02, 0x07, 0xdd, 0x75, 0xd2, 0xcd, 0x10, 0xb8, 0x86, 0x1c, 0xba, 0x4e, - 0x42, 0x6d, 0xdb, 0xeb, 0x4a, 0x07, 0xc5, 0x0d, 0x52, 0x8c, 0xe3, 0x7a, 0x44, 0x57, 0x60, 0xd2, - 0x57, 0xc5, 0x2d, 0x39, 0xe0, 0xef, 0x12, 0x3e, 0xc1, 0x0c, 0x29, 0x62, 0x15, 0x67, 0x81, 0x97, - 0xba, 0xec, 0xe0, 0x3d, 0x56, 0x30, 0x43, 0x8a, 0x11, 0xca, 0xfa, 0x3e, 0x2b, 0xb4, 0x51, 0xcf, - 0x65, 0x98, 0x50, 0x51, 0xb0, 0xa3, 0x22, 0x97, 0x4d, 0x7c, 0x40, 0x06, 0x20, 0x04, 0x05, 0x4b, - 0x50, 0x77, 0x6d, 0xc4, 0x87, 0x84, 0xd7, 0x24, 0x77, 0xa0, 0x09, 0x33, 0x3c, 0x64, 0x7a, 0x2a, - 0x72, 0x50, 0x7c, 0x44, 0x8a, 0x69, 0x03, 0xa3, 0xdb, 0x48, 0xa5, 0x4e, 0x7d, 0xe9, 0x22, 0xf9, - 0x98, 0x6f, 0x83, 0x10, 0x2a, 0x65, 0x47, 0x46, 0xdd, 0x0b, 0x6e, 0x86, 0x4f, 0xb8, 0x94, 0xcc, - 0xa0, 0x62, 0x15, 0xa6, 0x42, 0x2f, 0xd1, 0x17, 0xbc, 0xc0, 0xa9, 0x1d, 0x9f, 0x92, 0x63, 0xb2, - 0x84, 0xa8, 0x22, 0x59, 0x34, 0x8a, 0xe6, 0x33, 0xae, 0x88, 0x81, 0xd1, 0xd1, 0xd3, 0xa9, 0xd7, - 0x09, 0x64, 0x7b, 0x14, 0xdb, 0xe7, 0x7c, 0xf4, 0x0a, 0x76, 0xdd, 0x34, 0x2e, 0x41, 0x5d, 0xf7, - 0x2e, 0x3b, 0x69, 0xbe, 0xe0, 0x4e, 0xe7, 0x00, 0xc2, 0x8f, 0xc1, 0x9d, 0x43, 0x47, 0xbd, 0x83, - 0xec, 0x4b, 0x92, 0x2d, 0x0c, 0x19, 0xf7, 0x34, 0x12, 0x46, 0x55, 0x7e, 0xc5, 0x23, 0x41, 0x0e, - 0xb8, 0x5a, 0x30, 0x9f, 0x45, 0xda, 0xdb, 0x1e, 0xad, 0x6a, 0x5f, 0x73, 0xd5, 0x0a, 0xb6, 0x52, - 0xb5, 0x73, 0xb0, 0x40, 0xc6, 0xd1, 0xfa, 0xfa, 0x0d, 0x0f, 0xd6, 0x82, 0xde, 0xac, 0x76, 0xf7, - 0x71, 0x38, 0x52, 0x96, 0xf3, 0x52, 0x2a, 0x23, 0x8d, 0x4c, 0x3b, 0xf4, 0x62, 0x07, 0xf3, 0x4d, - 0x32, 0xf3, 0xc4, 0x5f, 0x2b, 0x05, 0xeb, 0x5e, 0x8c, 0xf2, 0x47, 0xe1, 0x30, 0xcb, 0xb3, 0x28, - 0x91, 0x5d, 0xe5, 0x47, 0xbd, 0xcb, 0x72, 0xcb, 0x41, 0xfd, 0xed, 0x40, 0xab, 0x36, 0x0d, 0x1c, - 0xcd, 0x67, 0xe1, 0xb6, 0xf2, 0xf7, 0x46, 0xbb, 0x17, 0xc6, 0x2a, 0x49, 0x2d, 0xc6, 0xef, 0xb8, - 0x53, 0x25, 0x77, 0x36, 0xc7, 0xc4, 0x1a, 0x4c, 0xe7, 0x7f, 0xba, 0x3e, 0x92, 0xdf, 0x93, 0x68, - 0xaa, 0x4f, 0xd1, 0xe0, 0xe8, 0xaa, 0x30, 0xf6, 0x12, 0x97, 0xf9, 0xf7, 0x03, 0x0f, 0x0e, 0x42, - 0x8a, 0xa7, 0x6f, 0x66, 0x20, 0x89, 0x1b, 0xf7, 0x1c, 0x90, 0xac, 0x4b, 0xad, 0x3d, 0xbf, 0xf4, - 0x3c, 0xbd, 0x47, 0x67, 0xb6, 0x1a, 0xc4, 0xe2, 0x01, 0x2c, 0x4f, 0x35, 0x2e, 0xed, 0xb2, 0x2b, - 0x7b, 0x65, 0x85, 0x2a, 0x69, 0x29, 0xce, 0xc0, 0x54, 0x25, 0x2a, 0xed, 0xaa, 0x67, 0x48, 0x35, - 0x69, 0x26, 0xa5, 0x38, 0x01, 0x63, 0x18, 0x7b, 0x76, 0xfc, 0x59, 0xc2, 0xf3, 0xe5, 0xe2, 0x14, - 0xd4, 0x38, 0xee, 0xec, 0xe8, 0x73, 0x84, 0x96, 0x08, 0xe2, 0x1c, 0x75, 0x76, 0xfc, 0x79, 0xc6, - 0x19, 0x41, 0xdc, 0xbd, 0x84, 0x3f, 0xbe, 0x38, 0x46, 0xe3, 0x8a, 0x6b, 0xb7, 0x04, 0xe3, 0x94, - 0x71, 0x76, 0xfa, 0x05, 0xfa, 0x72, 0x26, 0xc4, 0x7d, 0x70, 0xc8, 0xb1, 0xe0, 0x2f, 0x11, 0x5a, - 0xac, 0x17, 0xab, 0x30, 0x61, 0xe4, 0x9a, 0x1d, 0x7f, 0x99, 0x70, 0x93, 0xc2, 0xad, 0x53, 0xae, - 0xd9, 0x05, 0xaf, 0xf0, 0xd6, 0x89, 0xc0, 0xb2, 0x71, 0xa4, 0xd9, 0xe9, 0x57, 0xb9, 0xea, 0x8c, - 0x88, 0x65, 0xa8, 0x97, 0x63, 0xca, 0xce, 0xbf, 0x46, 0x7c, 0x9f, 0xc1, 0x0a, 0x18, 0x63, 0xd2, - 0xae, 0x78, 0x9d, 0x2b, 0x60, 0x50, 0x78, 0x8c, 0x06, 0xa3, 0xcf, 0x6e, 0x7a, 0x83, 0x8f, 0xd1, - 0x40, 0xf2, 0x61, 0x37, 0xf3, 0x69, 0x61, 0x57, 0xbc, 0xc9, 0xdd, 0xcc, 0xd7, 0xe3, 0x36, 0x06, - 0xb3, 0xc4, 0xee, 0x78, 0x8b, 0xb7, 0x31, 0x10, 0x25, 0xa2, 0x05, 0x8d, 0x83, 0x39, 0x62, 0xf7, - 0xbd, 0x4d, 0xbe, 0xd9, 0x03, 0x31, 0x22, 0x1e, 0x81, 0x85, 0xe1, 0x19, 0x62, 0xb7, 0x5e, 0xdd, - 0x1b, 0xf8, 0xd5, 0x6f, 0x46, 0x88, 0x38, 0xd7, 0xff, 0xd5, 0x6f, 0xe6, 0x87, 0x5d, 0x7b, 0x6d, - 0xaf, 0xfa, 0x62, 0x67, 0xc6, 0x87, 0x58, 0x01, 0xe8, 0x8f, 0x6e, 0xbb, 0xeb, 0x3a, 0xb9, 0x0c, - 0x08, 0x8f, 0x06, 0x4d, 0x6e, 0x3b, 0x7f, 0x83, 0x8f, 0x06, 0x11, 0x62, 0x09, 0x6a, 0x51, 0x16, - 0x04, 0xf8, 0x70, 0x34, 0xee, 0x1e, 0x12, 0x13, 0x32, 0xd8, 0x62, 0xf6, 0xf7, 0x7d, 0x3a, 0x18, - 0x0c, 0x88, 0x13, 0x70, 0x48, 0x86, 0x1d, 0xb9, 0x65, 0x23, 0xff, 0xd8, 0xe7, 0x81, 0x80, 0xab, - 0xc5, 0x32, 0x40, 0xf1, 0xd2, 0x98, 0xee, 0xc4, 0xd6, 0x6f, 0xfd, 0x73, 0xbf, 0x78, 0x07, 0x35, - 0x90, 0xbe, 0x20, 0x7f, 0xeb, 0xb4, 0x08, 0x76, 0xab, 0x82, 0xfc, 0x45, 0xf3, 0x24, 0x8c, 0x3f, - 0xa1, 0x55, 0x94, 0x7a, 0xbe, 0x8d, 0xfe, 0x8b, 0x68, 0x5e, 0x8f, 0x05, 0x0b, 0x55, 0x22, 0x53, - 0xcf, 0xd7, 0x36, 0xf6, 0x6f, 0x62, 0x4b, 0x00, 0xe1, 0xae, 0xa7, 0x53, 0x97, 0xfb, 0xfe, 0x87, - 0x61, 0x06, 0x70, 0xd3, 0x78, 0xfd, 0xa4, 0xdc, 0xb1, 0xb1, 0xff, 0xf2, 0xa6, 0x69, 0xbd, 0x38, - 0x05, 0x75, 0xbc, 0xcc, 0xdf, 0xb7, 0x6d, 0xf0, 0x7f, 0x04, 0xf7, 0x89, 0xd3, 0xc7, 0x60, 0xae, - 0xab, 0xc2, 0x41, 0xec, 0x34, 0x34, 0x55, 0x53, 0xb5, 0xf2, 0x07, 0xf1, 0xff, 0x00, 0x00, 0x00, - 0xff, 0xff, 0x87, 0x5c, 0xee, 0x2b, 0x7e, 0x11, 0x00, 0x00, + // 1096 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x94, 0x97, 0xcb, 0x6f, 0xdc, 0x54, + 0x14, 0x87, 0x85, 0x48, 0x95, 0x99, 0x93, 0x17, 0x99, 0x84, 0x50, 0x2a, 0x10, 0xed, 0x8e, 0x55, + 0xba, 0x42, 0xa8, 0xae, 0x10, 0x6a, 0xab, 0x34, 0x2a, 0x22, 0x10, 0x05, 0x52, 0x40, 0x2c, 0x46, + 0x9e, 0xc9, 0x8d, 0x3b, 0xe0, 0xf1, 0x35, 0xbe, 0x76, 0xd5, 0xb0, 0x43, 0xe5, 0x21, 0x84, 0x78, + 0x23, 0x41, 0x4b, 0xcb, 0x63, 0xc1, 0xfb, 0x59, 0x1e, 0x7b, 0x36, 0xc0, 0x9a, 0xff, 0x81, 0x0d, + 0x10, 0x5e, 0x52, 0x76, 0xd9, 0xf4, 0x1e, 0xfb, 0x1c, 0xcf, 0xb5, 0x67, 0xa4, 0x7b, 0x67, 0xe7, + 0x64, 0xee, 0xf7, 0xcd, 0xf5, 0x39, 0xbe, 0xe7, 0x37, 0x06, 0x08, 0x64, 0x20, 0x97, 0xe3, 0x44, + 0xa6, 0xb2, 0xd5, 0xc4, 0xeb, 0xfc, 0xf2, 0xd0, 0xe1, 0x40, 0xca, 0x20, 0x14, 0x47, 0xf3, 0xbf, + 0x3a, 0xd9, 0xf6, 0xd1, 0x2d, 0xa1, 0xba, 0x49, 0x2f, 0x4e, 0x65, 0x52, 0x2c, 0xf6, 0x1e, 0x80, + 0x05, 0x5a, 0xdc, 0x16, 0x51, 0xd6, 0x6f, 0xc7, 0x89, 0xd8, 0xee, 0x5d, 0x68, 0xdd, 0xb6, 0x5c, + 0x90, 0xcb, 0x4c, 0x2e, 0xaf, 0xe8, 0x4f, 0x1f, 0x8c, 0xd3, 0x9e, 0x8c, 0xd4, 0xc1, 0x6b, 0xbf, + 0xdf, 0x78, 0xf8, 0x86, 0x3b, 0x1b, 0x1b, 0xf3, 0x84, 0xe2, 0x67, 0xeb, 0x39, 0xe8, 0x6d, 0xc0, + 0xcd, 0x15, 0x9f, 0x4a, 0x93, 0x5e, 0x14, 0x88, 0xc4, 0x62, 0xfc, 0x99, 0x8c, 0x0b, 0x86, 0xf1, + 0x21, 0x42, 0xbd, 0x53, 0x30, 0x33, 0x8e, 0xeb, 0x17, 0x72, 0x4d, 0x0b, 0x53, 0xb2, 0x0a, 0x73, + 0xb9, 0xa4, 0x9b, 0xa9, 0x54, 0xf6, 0x23, 0xbf, 0x2f, 0x2c, 0x9a, 0x5f, 0x73, 0x4d, 0x73, 0x63, + 0x16, 0xb1, 0x53, 0x25, 0xe5, 0x9d, 0x85, 0x45, 0xfc, 0xcf, 0x79, 0x3f, 0xcc, 0x84, 0x69, 0x3b, + 0x32, 0xd2, 0x76, 0x16, 0x97, 0xb1, 0xf2, 0xb7, 0x8b, 0x13, 0xb9, 0x72, 0xa1, 0x14, 0x18, 0x5e, + 0xa3, 0x13, 0x81, 0x48, 0x53, 0x91, 0xa8, 0xb6, 0x1f, 0x86, 0x23, 0x36, 0x79, 0xba, 0x17, 0x96, + 0xc6, 0x4b, 0xbb, 0xd5, 0x4e, 0xac, 0x16, 0xe4, 0x89, 0x30, 0xf4, 0x36, 0xe1, 0x96, 0x11, 0x9d, + 0x75, 0x70, 0x5e, 0x26, 0xe7, 0xe2, 0x50, 0x77, 0x51, 0xbb, 0x0e, 0xfc, 0xff, 0xb2, 0x1f, 0x0e, + 0xce, 0x77, 0xc9, 0xd9, 0x22, 0x96, 0xdb, 0x82, 0xc6, 0xfb, 0x60, 0xfe, 0xbc, 0x48, 0x3a, 0x52, + 0x89, 0xb6, 0x78, 0x2a, 0xf3, 0x43, 0x07, 0xdd, 0x15, 0xd2, 0xcd, 0x11, 0xb8, 0x82, 0x1c, 0xba, + 0x8e, 0x41, 0x63, 0xdb, 0xef, 0x0a, 0x07, 0xc5, 0x55, 0x52, 0x4c, 0xe2, 0x7a, 0x44, 0x4f, 0xc0, + 0x74, 0x20, 0x8b, 0x5b, 0x72, 0xc0, 0xdf, 0x23, 0x7c, 0x8a, 0x19, 0x52, 0xc4, 0x32, 0xce, 0x42, + 0x3f, 0x75, 0xd9, 0xc1, 0xfb, 0xac, 0x60, 0x86, 0x14, 0x63, 0x94, 0xf5, 0x03, 0x56, 0x28, 0xa3, + 0x9e, 0xf7, 0xc2, 0x94, 0x8c, 0xc2, 0x1d, 0x19, 0xb9, 0x6c, 0xe2, 0x43, 0x32, 0x00, 0x21, 0x28, + 0x38, 0x0e, 0x4d, 0xd7, 0x46, 0x7c, 0x44, 0x78, 0x43, 0x70, 0x07, 0xf4, 0x39, 0xe3, 0x21, 0xa3, + 0x57, 0x38, 0x28, 0x3e, 0x26, 0xc5, 0xac, 0x81, 0xd1, 0x6d, 0xa4, 0x42, 0xa5, 0x81, 0x70, 0x91, + 0x7c, 0xc2, 0xb7, 0x41, 0x08, 0x95, 0xb2, 0x23, 0xa2, 0xee, 0x39, 0x37, 0xc3, 0xa7, 0x5c, 0x4a, + 0x66, 0x50, 0xa1, 0x27, 0x4f, 0xdf, 0x4f, 0xd4, 0x39, 0x3f, 0x74, 0x6a, 0xc7, 0x67, 0xe4, 0x98, + 0x2e, 0x21, 0xaa, 0x48, 0x16, 0x8d, 0xa3, 0xf9, 0x9c, 0x2b, 0x62, 0x60, 0x74, 0xf4, 0x54, 0xea, + 0x77, 0x42, 0xd1, 0x1e, 0xc7, 0xf6, 0x05, 0x1f, 0xbd, 0x82, 0x5d, 0x33, 0x8d, 0xba, 0xd3, 0xaa, + 0xf7, 0xb4, 0x93, 0xe6, 0x4b, 0xee, 0x74, 0x0e, 0x20, 0xfc, 0x18, 0xdc, 0x3a, 0x72, 0xd4, 0x3b, + 0xc8, 0xbe, 0x22, 0xd9, 0xd2, 0x88, 0x71, 0x4f, 0x23, 0x61, 0x5c, 0xe5, 0xd7, 0x3c, 0x12, 0x44, + 0xcd, 0xa5, 0xab, 0x96, 0x45, 0xca, 0xdf, 0x1e, 0xaf, 0x6a, 0xdf, 0x70, 0xd5, 0x0a, 0xb6, 0x52, + 0xb5, 0x87, 0x61, 0x89, 0x8c, 0xe3, 0xf5, 0xf5, 0x5b, 0x1e, 0xac, 0x05, 0xbd, 0x59, 0xed, 0xee, + 0xe3, 0x70, 0xa8, 0x2c, 0xe7, 0x85, 0x54, 0x44, 0x0a, 0x19, 0xbd, 0xe7, 0xd8, 0xc1, 0x7c, 0x8d, + 0xcc, 0x3c, 0xf1, 0x57, 0x4a, 0xc1, 0x9a, 0x1f, 0xa3, 0xfc, 0x51, 0x38, 0xc8, 0xf2, 0x2c, 0x4a, + 0x44, 0x57, 0x06, 0x91, 0x6e, 0xe3, 0x96, 0x83, 0xfa, 0xbb, 0x5a, 0xab, 0x36, 0x0d, 0x1c, 0xcd, + 0x67, 0xe0, 0xa6, 0xf2, 0xf7, 0x46, 0xbb, 0xd7, 0x8f, 0x65, 0x92, 0x5a, 0x8c, 0xdf, 0x73, 0xa7, + 0x4a, 0xee, 0x4c, 0x8e, 0x79, 0x2b, 0x30, 0x9b, 0xff, 0xe9, 0xfa, 0x48, 0xfe, 0x40, 0xa2, 0x99, + 0x01, 0x45, 0x83, 0xa3, 0x2b, 0xfb, 0xb1, 0x9f, 0xb8, 0xcc, 0xbf, 0x1f, 0x79, 0x70, 0x10, 0x52, + 0x3c, 0x7d, 0x73, 0xb5, 0x24, 0x6e, 0xdd, 0x31, 0x24, 0x59, 0x13, 0x4a, 0xf9, 0x41, 0xe9, 0x79, + 0x66, 0x8f, 0xce, 0x6c, 0x35, 0x88, 0xbd, 0xfb, 0xb1, 0x3c, 0xd5, 0xb8, 0xb4, 0xcb, 0x2e, 0xee, + 0x95, 0x15, 0xaa, 0xa4, 0xa5, 0x77, 0x1a, 0x66, 0x2a, 0x51, 0x69, 0x57, 0x3d, 0x4b, 0xaa, 0x69, + 0x33, 0x29, 0xbd, 0xbb, 0x60, 0x02, 0x63, 0xcf, 0x8e, 0x3f, 0x47, 0x78, 0xbe, 0xdc, 0xbb, 0x07, + 0x1a, 0x1c, 0x77, 0x76, 0xf4, 0x79, 0x42, 0x4b, 0x04, 0x71, 0x8e, 0x3a, 0x3b, 0xfe, 0x02, 0xe3, + 0x8c, 0x20, 0xee, 0x5e, 0xc2, 0x9f, 0x5e, 0x9a, 0xa0, 0x71, 0xc5, 0xb5, 0x3b, 0x0e, 0x93, 0x94, + 0x71, 0x76, 0xfa, 0x45, 0xfa, 0x72, 0x26, 0xbc, 0xbb, 0xe1, 0x80, 0x63, 0xc1, 0x5f, 0x26, 0xb4, + 0x58, 0xaf, 0x13, 0x64, 0xca, 0xc8, 0x35, 0x3b, 0xfe, 0x0a, 0xe1, 0x26, 0x85, 0x5b, 0xa7, 0x5c, + 0xb3, 0x0b, 0x5e, 0xe5, 0xad, 0x13, 0x81, 0x65, 0xe3, 0x48, 0xb3, 0xd3, 0xaf, 0x71, 0xd5, 0x19, + 0xd1, 0xa7, 0xa9, 0x59, 0x8e, 0x29, 0x3b, 0xff, 0x3a, 0xf1, 0x03, 0x06, 0x2b, 0x60, 0x8c, 0x49, + 0xbb, 0xe2, 0x0d, 0xae, 0x80, 0x41, 0xe1, 0x31, 0xaa, 0x47, 0x9f, 0xdd, 0xf4, 0x26, 0x1f, 0xa3, + 0x5a, 0xf2, 0x61, 0x37, 0xf3, 0x69, 0x61, 0x57, 0xbc, 0xc5, 0xdd, 0xcc, 0xd7, 0xe3, 0x36, 0xea, + 0x59, 0x62, 0x77, 0xbc, 0xcd, 0xdb, 0xa8, 0x45, 0x89, 0x4e, 0xa6, 0xd6, 0x70, 0x8e, 0xd8, 0x7d, + 0xef, 0x90, 0x6f, 0x7e, 0x28, 0x46, 0xbc, 0x47, 0x60, 0x69, 0x74, 0x86, 0xd8, 0xad, 0x97, 0xf6, + 0x6a, 0xbf, 0xfa, 0xcd, 0x08, 0xd1, 0x91, 0xb7, 0x38, 0x2a, 0x3f, 0xec, 0xda, 0xcb, 0x7b, 0xd5, + 0x17, 0x3b, 0x33, 0x3e, 0xf4, 0x2f, 0x34, 0x18, 0x8c, 0x6e, 0xbb, 0xeb, 0x0a, 0xb9, 0x0c, 0x08, + 0x8f, 0x06, 0x4d, 0x6e, 0x3b, 0x7f, 0x95, 0x8f, 0x06, 0x11, 0x1a, 0x6e, 0x44, 0x59, 0x18, 0xe2, + 0xc3, 0xd1, 0xba, 0x7d, 0x44, 0x4c, 0x88, 0x70, 0x8b, 0xd9, 0x3f, 0xf6, 0xe9, 0x60, 0x30, 0xa0, + 0x67, 0xe8, 0x01, 0xd1, 0xef, 0xe8, 0x1a, 0x58, 0xc8, 0x3f, 0xf7, 0x79, 0x20, 0xe0, 0x6a, 0x7d, + 0x9e, 0xa0, 0x78, 0x69, 0x4c, 0x77, 0x62, 0xeb, 0xb7, 0xfe, 0xb5, 0x5f, 0xbc, 0x83, 0x1a, 0xc8, + 0x40, 0x90, 0xbf, 0x75, 0x5a, 0x04, 0xbb, 0x55, 0x41, 0xfe, 0xa2, 0x79, 0x0c, 0x26, 0x9f, 0x50, + 0x32, 0x4a, 0xfd, 0xc0, 0x46, 0xff, 0x4d, 0x34, 0xaf, 0xc7, 0x82, 0xf5, 0x65, 0x22, 0xf4, 0xa5, + 0xb2, 0xb1, 0xff, 0x10, 0x5b, 0x02, 0x08, 0x77, 0x7d, 0x95, 0xba, 0xdc, 0xf7, 0xbf, 0x0c, 0x33, + 0x80, 0x9b, 0xc6, 0xeb, 0x27, 0xc5, 0x8e, 0x8d, 0xfd, 0x8f, 0x37, 0x4d, 0xeb, 0xf5, 0x00, 0x6c, + 0xe2, 0x65, 0xfe, 0xbe, 0x6d, 0x83, 0xff, 0x27, 0x78, 0x40, 0x9c, 0x3c, 0x02, 0x0b, 0xfa, 0x79, + 0xa9, 0x63, 0x27, 0x61, 0x55, 0xae, 0xca, 0xf5, 0xfc, 0x41, 0xbc, 0x1e, 0x00, 0x00, 0xff, 0xff, + 0x87, 0x5c, 0xee, 0x2b, 0x7e, 0x11, 0x00, 0x00, } diff --git a/vendor/github.com/gogo/protobuf/gogoproto/helper.go b/vendor/github.com/gogo/protobuf/gogoproto/helper.go index 670021fe..8c29dbc0 100644 --- a/vendor/github.com/gogo/protobuf/gogoproto/helper.go +++ b/vendor/github.com/gogo/protobuf/gogoproto/helper.go @@ -1,6 +1,4 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. // http://github.com/gogo/protobuf // // Redistribution and use in source and binary forms, with or without diff --git a/vendor/github.com/gogo/protobuf/proto/clone.go b/vendor/github.com/gogo/protobuf/proto/clone.go index 5d4cba4b..79edb861 100644 --- a/vendor/github.com/gogo/protobuf/proto/clone.go +++ b/vendor/github.com/gogo/protobuf/proto/clone.go @@ -84,20 +84,14 @@ func mergeStruct(out, in reflect.Value) { mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) } - if emIn, ok := in.Addr().Interface().(extensionsBytes); ok { + if emIn, ok := in.Addr().Interface().(extensionsMap); ok { + emOut := out.Addr().Interface().(extensionsMap) + mergeExtension(emOut.ExtensionMap(), emIn.ExtensionMap()) + } else if emIn, ok := in.Addr().Interface().(extensionsBytes); ok { emOut := out.Addr().Interface().(extensionsBytes) bIn := emIn.GetExtensions() bOut := emOut.GetExtensions() *bOut = append(*bOut, *bIn...) - } else if emIn, ok := extendable(in.Addr().Interface()); ok { - emOut, _ := extendable(out.Addr().Interface()) - mIn, muIn := emIn.extensionsRead() - if mIn != nil { - mOut := emOut.extensionsWrite() - muIn.Lock() - mergeExtension(mOut, mIn) - muIn.Unlock() - } } uf := in.FieldByName("XXX_unrecognized") diff --git a/vendor/github.com/gogo/protobuf/proto/decode.go b/vendor/github.com/gogo/protobuf/proto/decode.go index 0d6634cc..7b06266d 100644 --- a/vendor/github.com/gogo/protobuf/proto/decode.go +++ b/vendor/github.com/gogo/protobuf/proto/decode.go @@ -378,11 +378,6 @@ func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group wire := int(u & 0x7) if wire == WireEndGroup { if is_group { - if required > 0 { - // Not enough information to determine the exact field. - // (See below.) - return &RequiredNotSetError{"{Unknown}"} - } return nil // input is satisfied } return fmt.Errorf("proto: %s: wiretype end group for non-group", st) @@ -395,20 +390,16 @@ func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group if !ok { // Maybe it's an extension? if prop.extendable { - if e, eok := structPointer_Interface(base, st).(extensionsBytes); eok { - if isExtensionField(e, int32(tag)) { - if err = o.skip(st, tag, wire); err == nil { - ext := e.GetExtensions() + if e := structPointer_Interface(base, st).(extendableProto); isExtensionField(e, int32(tag)) { + if err = o.skip(st, tag, wire); err == nil { + if ee, eok := e.(extensionsMap); eok { + ext := ee.ExtensionMap()[int32(tag)] // may be missing + ext.enc = append(ext.enc, o.buf[oi:o.index]...) + ee.ExtensionMap()[int32(tag)] = ext + } else if ee, eok := e.(extensionsBytes); eok { + ext := ee.GetExtensions() *ext = append(*ext, o.buf[oi:o.index]...) } - continue - } - } else if e, _ := extendable(structPointer_Interface(base, st)); isExtensionField(e, int32(tag)) { - if err = o.skip(st, tag, wire); err == nil { - extmap := e.extensionsWrite() - ext := extmap[int32(tag)] // may be missing - ext.enc = append(ext.enc, o.buf[oi:o.index]...) - extmap[int32(tag)] = ext } continue } diff --git a/vendor/github.com/gogo/protobuf/proto/decode_gogo.go b/vendor/github.com/gogo/protobuf/proto/decode_gogo.go index ecc63873..603dabec 100644 --- a/vendor/github.com/gogo/protobuf/proto/decode_gogo.go +++ b/vendor/github.com/gogo/protobuf/proto/decode_gogo.go @@ -1,7 +1,5 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://github.com/gogo/protobuf/gogoproto // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are diff --git a/vendor/github.com/gogo/protobuf/proto/encode.go b/vendor/github.com/gogo/protobuf/proto/encode.go index 8c1b8fd1..eb7e0474 100644 --- a/vendor/github.com/gogo/protobuf/proto/encode.go +++ b/vendor/github.com/gogo/protobuf/proto/encode.go @@ -70,10 +70,6 @@ var ( // ErrNil is the error returned if Marshal is called with nil. ErrNil = errors.New("proto: Marshal called with nil") - - // ErrTooLarge is the error returned if Marshal is called with a - // message that encodes to >2GB. - ErrTooLarge = errors.New("proto: message encodes to over 2 GB") ) // The fundamental encoders that put bytes on the wire. @@ -82,10 +78,6 @@ var ( const maxVarintBytes = 10 // maximum length of a varint -// maxMarshalSize is the largest allowed size of an encoded protobuf, -// since C++ and Java use signed int32s for the size. -const maxMarshalSize = 1<<31 - 1 - // EncodeVarint returns the varint encoding of x. // This is the format for the // int32, int64, uint32, uint64, bool, and enum @@ -285,9 +277,6 @@ func (p *Buffer) Marshal(pb Message) error { stats.Encode++ } - if len(p.buf) > maxMarshalSize { - return ErrTooLarge - } return err } @@ -1073,25 +1062,10 @@ func size_slice_struct_group(p *Properties, base structPointer) (n int) { // Encode an extension map. func (o *Buffer) enc_map(p *Properties, base structPointer) error { - exts := structPointer_ExtMap(base, p.field) - if err := encodeExtensionsMap(*exts); err != nil { + v := *structPointer_ExtMap(base, p.field) + if err := encodeExtensionMap(v); err != nil { return err } - - return o.enc_map_body(*exts) -} - -func (o *Buffer) enc_exts(p *Properties, base structPointer) error { - exts := structPointer_Extensions(base, p.field) - if err := encodeExtensions(exts); err != nil { - return err - } - v, _ := exts.extensionsRead() - - return o.enc_map_body(v) -} - -func (o *Buffer) enc_map_body(v map[int32]Extension) error { // Fast-path for common cases: zero or one extensions. if len(v) <= 1 { for _, e := range v { @@ -1114,13 +1088,8 @@ func (o *Buffer) enc_map_body(v map[int32]Extension) error { } func size_map(p *Properties, base structPointer) int { - v := structPointer_ExtMap(base, p.field) - return extensionsMapSize(*v) -} - -func size_exts(p *Properties, base structPointer) int { - v := structPointer_Extensions(base, p.field) - return extensionsSize(v) + v := *structPointer_ExtMap(base, p.field) + return sizeExtensionMap(v) } // Encode a map field. @@ -1149,7 +1118,7 @@ func (o *Buffer) enc_new_map(p *Properties, base structPointer) error { if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil { return err } - if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil && err != ErrNil { + if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil { return err } return nil @@ -1159,6 +1128,11 @@ func (o *Buffer) enc_new_map(p *Properties, base structPointer) error { for _, key := range v.MapKeys() { val := v.MapIndex(key) + // The only illegal map entry values are nil message pointers. + if val.Kind() == reflect.Ptr && val.IsNil() { + return errors.New("proto: map has nil element") + } + keycopy.Set(key) valcopy.Set(val) @@ -1246,9 +1220,6 @@ func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error { return err } } - if len(o.buf) > maxMarshalSize { - return ErrTooLarge - } } } @@ -1265,9 +1236,6 @@ func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error { // Add unrecognized fields at the end. if prop.unrecField.IsValid() { v := *structPointer_Bytes(base, prop.unrecField) - if len(o.buf)+len(v) > maxMarshalSize { - return ErrTooLarge - } if len(v) > 0 { o.buf = append(o.buf, v...) } diff --git a/vendor/github.com/gogo/protobuf/proto/encode_gogo.go b/vendor/github.com/gogo/protobuf/proto/encode_gogo.go index 66e7e163..f77cfb1e 100644 --- a/vendor/github.com/gogo/protobuf/proto/encode_gogo.go +++ b/vendor/github.com/gogo/protobuf/proto/encode_gogo.go @@ -1,7 +1,7 @@ -// Protocol Buffers for Go with Gadgets +// Extensions for Protocol Buffers to create more go like structures. // -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://github.com/gogo/protobuf/gogoproto // // Go support for Protocol Buffers - Google's data interchange format // diff --git a/vendor/github.com/gogo/protobuf/proto/equal.go b/vendor/github.com/gogo/protobuf/proto/equal.go index 8b16f951..f5db1def 100644 --- a/vendor/github.com/gogo/protobuf/proto/equal.go +++ b/vendor/github.com/gogo/protobuf/proto/equal.go @@ -121,16 +121,9 @@ func equalStruct(v1, v2 reflect.Value) bool { } } - if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() { - em2 := v2.FieldByName("XXX_InternalExtensions") - if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) { - return false - } - } - if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { em2 := v2.FieldByName("XXX_extensions") - if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { + if !equalExtensions(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { return false } } @@ -191,13 +184,6 @@ func equalAny(v1, v2 reflect.Value, prop *Properties) bool { } return true case reflect.Ptr: - // Maps may have nil values in them, so check for nil. - if v1.IsNil() && v2.IsNil() { - return true - } - if v1.IsNil() != v2.IsNil() { - return false - } return equalAny(v1.Elem(), v2.Elem(), prop) case reflect.Slice: if v1.Type().Elem().Kind() == reflect.Uint8 { @@ -237,14 +223,8 @@ func equalAny(v1, v2 reflect.Value, prop *Properties) bool { } // base is the struct type that the extensions are based on. -// x1 and x2 are InternalExtensions. -func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool { - em1, _ := x1.extensionsRead() - em2, _ := x2.extensionsRead() - return equalExtMap(base, em1, em2) -} - -func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { +// em1 and em2 are extension maps. +func equalExtensions(base reflect.Type, em1, em2 map[int32]Extension) bool { if len(em1) != len(em2) { return false } diff --git a/vendor/github.com/gogo/protobuf/proto/extensions.go b/vendor/github.com/gogo/protobuf/proto/extensions.go index f7384baa..6180347e 100644 --- a/vendor/github.com/gogo/protobuf/proto/extensions.go +++ b/vendor/github.com/gogo/protobuf/proto/extensions.go @@ -52,112 +52,23 @@ type ExtensionRange struct { Start, End int32 // both inclusive } -// extendableProto is an interface implemented by any protocol buffer generated by the current -// proto compiler that may be extended. +// extendableProto is an interface implemented by any protocol buffer that may be extended. type extendableProto interface { Message ExtensionRangeArray() []ExtensionRange - extensionsWrite() map[int32]Extension - extensionsRead() (map[int32]Extension, sync.Locker) } -// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous -// version of the proto compiler that may be extended. -type extendableProtoV1 interface { - Message - ExtensionRangeArray() []ExtensionRange +type extensionsMap interface { + extendableProto ExtensionMap() map[int32]Extension } type extensionsBytes interface { - Message - ExtensionRangeArray() []ExtensionRange + extendableProto GetExtensions() *[]byte } -// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto. -type extensionAdapter struct { - extendableProtoV1 -} - -func (e extensionAdapter) extensionsWrite() map[int32]Extension { - return e.ExtensionMap() -} - -func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { - return e.ExtensionMap(), notLocker{} -} - -// notLocker is a sync.Locker whose Lock and Unlock methods are nops. -type notLocker struct{} - -func (n notLocker) Lock() {} -func (n notLocker) Unlock() {} - -// extendable returns the extendableProto interface for the given generated proto message. -// If the proto message has the old extension format, it returns a wrapper that implements -// the extendableProto interface. -func extendable(p interface{}) (extendableProto, bool) { - if ep, ok := p.(extendableProto); ok { - return ep, ok - } - if ep, ok := p.(extendableProtoV1); ok { - return extensionAdapter{ep}, ok - } - return nil, false -} - -// XXX_InternalExtensions is an internal representation of proto extensions. -// -// Each generated message struct type embeds an anonymous XXX_InternalExtensions field, -// thus gaining the unexported 'extensions' method, which can be called only from the proto package. -// -// The methods of XXX_InternalExtensions are not concurrency safe in general, -// but calls to logically read-only methods such as has and get may be executed concurrently. -type XXX_InternalExtensions struct { - // The struct must be indirect so that if a user inadvertently copies a - // generated message and its embedded XXX_InternalExtensions, they - // avoid the mayhem of a copied mutex. - // - // The mutex serializes all logically read-only operations to p.extensionMap. - // It is up to the client to ensure that write operations to p.extensionMap are - // mutually exclusive with other accesses. - p *struct { - mu sync.Mutex - extensionMap map[int32]Extension - } -} - -// extensionsWrite returns the extension map, creating it on first use. -func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension { - if e.p == nil { - e.p = new(struct { - mu sync.Mutex - extensionMap map[int32]Extension - }) - e.p.extensionMap = make(map[int32]Extension) - } - return e.p.extensionMap -} - -// extensionsRead returns the extensions map for read-only use. It may be nil. -// The caller must hold the returned mutex's lock when accessing Elements within the map. -func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) { - if e.p == nil { - return nil, nil - } - return e.p.extensionMap, &e.p.mu -} - -type extensionRange interface { - Message - ExtensionRangeArray() []ExtensionRange -} - var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem() -var extendableProtoV1Type = reflect.TypeOf((*extendableProtoV1)(nil)).Elem() -var extendableBytesType = reflect.TypeOf((*extensionsBytes)(nil)).Elem() -var extensionRangeType = reflect.TypeOf((*extensionRange)(nil)).Elem() // ExtensionDesc represents an extension specification. // Used in generated code from the protocol compiler. @@ -190,23 +101,20 @@ type Extension struct { } // SetRawExtension is for testing only. -func SetRawExtension(base Message, id int32, b []byte) { - if ebase, ok := base.(extensionsBytes); ok { +func SetRawExtension(base extendableProto, id int32, b []byte) { + if ebase, ok := base.(extensionsMap); ok { + ebase.ExtensionMap()[id] = Extension{enc: b} + } else if ebase, ok := base.(extensionsBytes); ok { clearExtension(base, id) ext := ebase.GetExtensions() *ext = append(*ext, b...) - return + } else { + panic("unreachable") } - epb, ok := extendable(base) - if !ok { - return - } - extmap := epb.extensionsWrite() - extmap[id] = Extension{enc: b} } // isExtensionField returns true iff the given field number is in an extension range. -func isExtensionField(pb extensionRange, field int32) bool { +func isExtensionField(pb extendableProto, field int32) bool { for _, er := range pb.ExtensionRangeArray() { if er.Start <= field && field <= er.End { return true @@ -217,12 +125,8 @@ func isExtensionField(pb extensionRange, field int32) bool { // checkExtensionTypes checks that the given extension is valid for pb. func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { - var pbi interface{} = pb // Check the extended type. - if ea, ok := pbi.(extensionAdapter); ok { - pbi = ea.extendableProtoV1 - } - if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b { + if a, b := reflect.TypeOf(pb), reflect.TypeOf(extension.ExtendedType); a != b { return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String()) } // Check the range. @@ -268,57 +172,43 @@ func extensionProperties(ed *ExtensionDesc) *Properties { return prop } -// encode encodes any unmarshaled (unencoded) extensions in e. -func encodeExtensions(e *XXX_InternalExtensions) error { - m, mu := e.extensionsRead() - if m == nil { - return nil // fast path - } - mu.Lock() - defer mu.Unlock() - return encodeExtensionsMap(m) -} - -// encode encodes any unmarshaled (unencoded) extensions in e. -func encodeExtensionsMap(m map[int32]Extension) error { +// encodeExtensionMap encodes any unmarshaled (unencoded) extensions in m. +func encodeExtensionMap(m map[int32]Extension) error { for k, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - et := reflect.TypeOf(e.desc.ExtensionType) - props := extensionProperties(e.desc) - - p := NewBuffer(nil) - // If e.value has type T, the encoder expects a *struct{ X T }. - // Pass a *T with a zero field and hope it all works out. - x := reflect.New(et) - x.Elem().Set(reflect.ValueOf(e.value)) - if err := props.enc(p, props, toStructPointer(x)); err != nil { + err := encodeExtension(&e) + if err != nil { return err } - e.enc = p.buf m[k] = e } return nil } -func extensionsSize(e *XXX_InternalExtensions) (n int) { - m, mu := e.extensionsRead() - if m == nil { - return 0 +func encodeExtension(e *Extension) error { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + return nil } - mu.Lock() - defer mu.Unlock() - return extensionsMapSize(m) + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + et := reflect.TypeOf(e.desc.ExtensionType) + props := extensionProperties(e.desc) + + p := NewBuffer(nil) + // If e.value has type T, the encoder expects a *struct{ X T }. + // Pass a *T with a zero field and hope it all works out. + x := reflect.New(et) + x.Elem().Set(reflect.ValueOf(e.value)) + if err := props.enc(p, props, toStructPointer(x)); err != nil { + return err + } + e.enc = p.buf + return nil } -func extensionsMapSize(m map[int32]Extension) (n int) { +func sizeExtensionMap(m map[int32]Extension) (n int) { for _, e := range m { if e.value == nil || e.desc == nil { // Extension is only in its encoded form. @@ -343,8 +233,12 @@ func extensionsMapSize(m map[int32]Extension) (n int) { } // HasExtension returns whether the given extension is present in pb. -func HasExtension(pb Message, extension *ExtensionDesc) bool { - if epb, doki := pb.(extensionsBytes); doki { +func HasExtension(pb extendableProto, extension *ExtensionDesc) bool { + // TODO: Check types, field numbers, etc.? + if epb, doki := pb.(extensionsMap); doki { + _, ok := epb.ExtensionMap()[extension.Field] + return ok + } else if epb, doki := pb.(extensionsBytes); doki { ext := epb.GetExtensions() buf := *ext o := 0 @@ -364,19 +258,7 @@ func HasExtension(pb Message, extension *ExtensionDesc) bool { } return false } - // TODO: Check types, field numbers, etc.? - epb, ok := extendable(pb) - if !ok { - return false - } - extmap, mu := epb.extensionsRead() - if extmap == nil { - return false - } - mu.Lock() - _, ok = extmap[extension.Field] - mu.Unlock() - return ok + panic("unreachable") } func deleteExtension(pb extensionsBytes, theFieldNum int32, offset int) int { @@ -399,32 +281,64 @@ func deleteExtension(pb extensionsBytes, theFieldNum int32, offset int) int { return -1 } -// ClearExtension removes the given extension from pb. -func ClearExtension(pb Message, extension *ExtensionDesc) { - clearExtension(pb, extension.Field) -} - -func clearExtension(pb Message, fieldNum int32) { - if epb, doki := pb.(extensionsBytes); doki { +func clearExtension(pb extendableProto, fieldNum int32) { + if epb, doki := pb.(extensionsMap); doki { + delete(epb.ExtensionMap(), fieldNum) + } else if epb, doki := pb.(extensionsBytes); doki { offset := 0 for offset != -1 { offset = deleteExtension(epb, fieldNum, offset) } - return - } - epb, ok := extendable(pb) - if !ok { - return + } else { + panic("unreachable") } +} + +// ClearExtension removes the given extension from pb. +func ClearExtension(pb extendableProto, extension *ExtensionDesc) { // TODO: Check types, field numbers, etc.? - extmap := epb.extensionsWrite() - delete(extmap, fieldNum) + clearExtension(pb, extension.Field) } // GetExtension parses and returns the given extension of pb. -// If the extension is not present and has no default value it returns ErrMissingExtension. -func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { - if epb, doki := pb.(extensionsBytes); doki { +// If the extension is not present it returns ErrMissingExtension. +func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, error) { + if err := checkExtensionTypes(pb, extension); err != nil { + return nil, err + } + + if epb, doki := pb.(extensionsMap); doki { + emap := epb.ExtensionMap() + e, ok := emap[extension.Field] + if !ok { + // defaultExtensionValue returns the default value or + // ErrMissingExtension if there is no default. + return defaultExtensionValue(extension) + } + if e.value != nil { + // Already decoded. Check the descriptor, though. + if e.desc != extension { + // This shouldn't happen. If it does, it means that + // GetExtension was called twice with two different + // descriptors with the same field number. + return nil, errors.New("proto: descriptor conflict") + } + return e.value, nil + } + + v, err := decodeExtension(e.enc, extension) + if err != nil { + return nil, err + } + + // Remember the decoded version and drop the encoded version. + // That way it is safe to mutate what we return. + e.value = v + e.desc = extension + e.enc = nil + emap[extension.Field] = e + return e.value, nil + } else if epb, doki := pb.(extensionsBytes); doki { ext := epb.GetExtensions() o := 0 for o < len(*ext) { @@ -446,50 +360,7 @@ func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { } return defaultExtensionValue(extension) } - epb, ok := extendable(pb) - if !ok { - return nil, errors.New("proto: not an extendable proto") - } - if err := checkExtensionTypes(epb, extension); err != nil { - return nil, err - } - - emap, mu := epb.extensionsRead() - if emap == nil { - return defaultExtensionValue(extension) - } - mu.Lock() - defer mu.Unlock() - e, ok := emap[extension.Field] - if !ok { - // defaultExtensionValue returns the default value or - // ErrMissingExtension if there is no default. - return defaultExtensionValue(extension) - } - - if e.value != nil { - // Already decoded. Check the descriptor, though. - if e.desc != extension { - // This shouldn't happen. If it does, it means that - // GetExtension was called twice with two different - // descriptors with the same field number. - return nil, errors.New("proto: descriptor conflict") - } - return e.value, nil - } - - v, err := decodeExtension(e.enc, extension) - if err != nil { - return nil, err - } - - // Remember the decoded version and drop the encoded version. - // That way it is safe to mutate what we return. - e.value = v - e.desc = extension - e.enc = nil - emap[extension.Field] = e - return e.value, nil + panic("unreachable") } // defaultExtensionValue returns the default value for extension. @@ -563,9 +434,14 @@ func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { // GetExtensions returns a slice of the extensions present in pb that are also listed in es. // The returned slice has the same length as es; missing extensions will appear as nil elements. func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { + epb, ok := pb.(extendableProto) + if !ok { + err = errors.New("proto: not an extendable proto") + return + } extensions = make([]interface{}, len(es)) for i, e := range es { - extensions[i], err = GetExtension(pb, e) + extensions[i], err = GetExtension(epb, e) if err == ErrMissingExtension { err = nil } @@ -576,55 +452,9 @@ func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, e return } -// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order. -// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing -// just the Field field, which defines the extension's field number. -func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { - epb, ok := extendable(pb) - if !ok { - return nil, fmt.Errorf("proto: %T is not an extendable proto.Message", pb) - } - registeredExtensions := RegisteredExtensions(pb) - - emap, mu := epb.extensionsRead() - mu.Lock() - defer mu.Unlock() - extensions := make([]*ExtensionDesc, 0, len(emap)) - for extid, e := range emap { - desc := e.desc - if desc == nil { - desc = registeredExtensions[extid] - if desc == nil { - desc = &ExtensionDesc{Field: extid} - } - } - - extensions = append(extensions, desc) - } - return extensions, nil -} - // SetExtension sets the specified extension of pb to the specified value. -func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { - if epb, doki := pb.(extensionsBytes); doki { - ClearExtension(pb, extension) - ext := epb.GetExtensions() - et := reflect.TypeOf(extension.ExtensionType) - props := extensionProperties(extension) - p := NewBuffer(nil) - x := reflect.New(et) - x.Elem().Set(reflect.ValueOf(value)) - if err := props.enc(p, props, toStructPointer(x)); err != nil { - return err - } - *ext = append(*ext, p.buf...) - return nil - } - epb, ok := extendable(pb) - if !ok { - return errors.New("proto: not an extendable proto") - } - if err := checkExtensionTypes(epb, extension); err != nil { +func SetExtension(pb extendableProto, extension *ExtensionDesc, value interface{}) error { + if err := checkExtensionTypes(pb, extension); err != nil { return err } typ := reflect.TypeOf(extension.ExtensionType) @@ -639,27 +469,26 @@ func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error if reflect.ValueOf(value).IsNil() { return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) } - - extmap := epb.extensionsWrite() - extmap[extension.Field] = Extension{desc: extension, value: value} - return nil + return setExtension(pb, extension, value) } -// ClearAllExtensions clears all extensions from pb. -func ClearAllExtensions(pb Message) { - if epb, doki := pb.(extensionsBytes); doki { +func setExtension(pb extendableProto, extension *ExtensionDesc, value interface{}) error { + if epb, doki := pb.(extensionsMap); doki { + epb.ExtensionMap()[extension.Field] = Extension{desc: extension, value: value} + } else if epb, doki := pb.(extensionsBytes); doki { + ClearExtension(pb, extension) ext := epb.GetExtensions() - *ext = []byte{} - return - } - epb, ok := extendable(pb) - if !ok { - return - } - m := epb.extensionsWrite() - for k := range m { - delete(m, k) + et := reflect.TypeOf(extension.ExtensionType) + props := extensionProperties(extension) + p := NewBuffer(nil) + x := reflect.New(et) + x.Elem().Set(reflect.ValueOf(value)) + if err := props.enc(p, props, toStructPointer(x)); err != nil { + return err + } + *ext = append(*ext, p.buf...) } + return nil } // A global registry of extensions. diff --git a/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go b/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go index ea6478f0..86b1fa23 100644 --- a/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go +++ b/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go @@ -1,7 +1,5 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://github.com/gogo/protobuf/gogoproto // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are @@ -35,10 +33,9 @@ import ( "reflect" "sort" "strings" - "sync" ) -func GetBoolExtension(pb Message, extension *ExtensionDesc, ifnotset bool) bool { +func GetBoolExtension(pb extendableProto, extension *ExtensionDesc, ifnotset bool) bool { if reflect.ValueOf(pb).IsNil() { return ifnotset } @@ -63,12 +60,8 @@ func (this *Extension) Compare(that *Extension) int { return bytes.Compare(this.enc, that.enc) } -func SizeOfInternalExtension(m extendableProto) (n int) { - return SizeOfExtensionMap(m.extensionsWrite()) -} - func SizeOfExtensionMap(m map[int32]Extension) (n int) { - return extensionsMapSize(m) + return sizeExtensionMap(m) } type sortableMapElem struct { @@ -101,10 +94,6 @@ func (this sortableExtensions) String() string { return "map[" + strings.Join(ss, ",") + "]" } -func StringFromInternalExtension(m extendableProto) string { - return StringFromExtensionsMap(m.extensionsWrite()) -} - func StringFromExtensionsMap(m map[int32]Extension) string { return newSortableExtensionsFromMap(m).String() } @@ -117,12 +106,8 @@ func StringFromExtensionsBytes(ext []byte) string { return StringFromExtensionsMap(m) } -func EncodeInternalExtension(m extendableProto, data []byte) (n int, err error) { - return EncodeExtensionMap(m.extensionsWrite(), data) -} - func EncodeExtensionMap(m map[int32]Extension, data []byte) (n int, err error) { - if err := encodeExtensionsMap(m); err != nil { + if err := encodeExtensionMap(m); err != nil { return 0, err } keys := make([]int, 0, len(m)) @@ -140,7 +125,7 @@ func GetRawExtension(m map[int32]Extension, id int32) ([]byte, error) { if m[id].value == nil || m[id].desc == nil { return m[id].enc, nil } - if err := encodeExtensionsMap(m); err != nil { + if err := encodeExtensionMap(m); err != nil { return nil, err } return m[id].enc, nil @@ -204,42 +189,15 @@ func NewExtension(e []byte) Extension { return ee } -func AppendExtension(e Message, tag int32, buf []byte) { - if ee, eok := e.(extensionsBytes); eok { +func AppendExtension(e extendableProto, tag int32, buf []byte) { + if ee, eok := e.(extensionsMap); eok { + ext := ee.ExtensionMap()[int32(tag)] // may be missing + ext.enc = append(ext.enc, buf...) + ee.ExtensionMap()[int32(tag)] = ext + } else if ee, eok := e.(extensionsBytes); eok { ext := ee.GetExtensions() *ext = append(*ext, buf...) - return } - if ee, eok := e.(extendableProto); eok { - m := ee.extensionsWrite() - ext := m[int32(tag)] // may be missing - ext.enc = append(ext.enc, buf...) - m[int32(tag)] = ext - } -} - -func encodeExtension(e *Extension) error { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - return nil - } - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - et := reflect.TypeOf(e.desc.ExtensionType) - props := extensionProperties(e.desc) - - p := NewBuffer(nil) - // If e.value has type T, the encoder expects a *struct{ X T }. - // Pass a *T with a zero field and hope it all works out. - x := reflect.New(et) - x.Elem().Set(reflect.ValueOf(e.value)) - if err := props.enc(p, props, toStructPointer(x)); err != nil { - return err - } - e.enc = p.buf - return nil } func (this Extension) GoString() string { @@ -251,7 +209,7 @@ func (this Extension) GoString() string { return fmt.Sprintf("proto.NewExtension(%#v)", this.enc) } -func SetUnsafeExtension(pb Message, fieldNum int32, value interface{}) error { +func SetUnsafeExtension(pb extendableProto, fieldNum int32, value interface{}) error { typ := reflect.TypeOf(pb).Elem() ext, ok := extensionMaps[typ] if !ok { @@ -261,10 +219,10 @@ func SetUnsafeExtension(pb Message, fieldNum int32, value interface{}) error { if !ok { return errors.New("proto: bad extension number; not in declared ranges") } - return SetExtension(pb, desc, value) + return setExtension(pb, desc, value) } -func GetUnsafeExtension(pb Message, fieldNum int32) (interface{}, error) { +func GetUnsafeExtension(pb extendableProto, fieldNum int32) (interface{}, error) { typ := reflect.TypeOf(pb).Elem() ext, ok := extensionMaps[typ] if !ok { @@ -276,19 +234,3 @@ func GetUnsafeExtension(pb Message, fieldNum int32) (interface{}, error) { } return GetExtension(pb, desc) } - -func NewUnsafeXXX_InternalExtensions(m map[int32]Extension) XXX_InternalExtensions { - x := &XXX_InternalExtensions{ - p: new(struct { - mu sync.Mutex - extensionMap map[int32]Extension - }), - } - x.p.extensionMap = m - return *x -} - -func GetUnsafeExtensionsMap(extendable Message) map[int32]Extension { - pb := extendable.(extendableProto) - return pb.extensionsWrite() -} diff --git a/vendor/github.com/gogo/protobuf/proto/lib.go b/vendor/github.com/gogo/protobuf/proto/lib.go index 2c30d709..2e35ae2d 100644 --- a/vendor/github.com/gogo/protobuf/proto/lib.go +++ b/vendor/github.com/gogo/protobuf/proto/lib.go @@ -889,10 +889,6 @@ func isProto3Zero(v reflect.Value) bool { return false } -// ProtoPackageIsVersion2 is referenced from generated protocol buffer files -// to assert that that code is compatible with this version of the proto package. -const GoGoProtoPackageIsVersion2 = true - // ProtoPackageIsVersion1 is referenced from generated protocol buffer files // to assert that that code is compatible with this version of the proto package. const GoGoProtoPackageIsVersion1 = true diff --git a/vendor/github.com/gogo/protobuf/proto/lib_gogo.go b/vendor/github.com/gogo/protobuf/proto/lib_gogo.go index 4b4f7c90..a6c2c06b 100644 --- a/vendor/github.com/gogo/protobuf/proto/lib_gogo.go +++ b/vendor/github.com/gogo/protobuf/proto/lib_gogo.go @@ -1,7 +1,5 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://github.com/gogo/protobuf/gogoproto // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are diff --git a/vendor/github.com/gogo/protobuf/proto/message_set.go b/vendor/github.com/gogo/protobuf/proto/message_set.go index fd982dec..e25e01e6 100644 --- a/vendor/github.com/gogo/protobuf/proto/message_set.go +++ b/vendor/github.com/gogo/protobuf/proto/message_set.go @@ -149,21 +149,9 @@ func skipVarint(buf []byte) []byte { // MarshalMessageSet encodes the extension map represented by m in the message set wire format. // It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. -func MarshalMessageSet(exts interface{}) ([]byte, error) { - var m map[int32]Extension - switch exts := exts.(type) { - case *XXX_InternalExtensions: - if err := encodeExtensions(exts); err != nil { - return nil, err - } - m, _ = exts.extensionsRead() - case map[int32]Extension: - if err := encodeExtensionsMap(exts); err != nil { - return nil, err - } - m = exts - default: - return nil, errors.New("proto: not an extension map") +func MarshalMessageSet(m map[int32]Extension) ([]byte, error) { + if err := encodeExtensionMap(m); err != nil { + return nil, err } // Sort extension IDs to provide a deterministic encoding. @@ -190,17 +178,7 @@ func MarshalMessageSet(exts interface{}) ([]byte, error) { // UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. // It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option. -func UnmarshalMessageSet(buf []byte, exts interface{}) error { - var m map[int32]Extension - switch exts := exts.(type) { - case *XXX_InternalExtensions: - m = exts.extensionsWrite() - case map[int32]Extension: - m = exts - default: - return errors.New("proto: not an extension map") - } - +func UnmarshalMessageSet(buf []byte, m map[int32]Extension) error { ms := new(messageSet) if err := Unmarshal(buf, ms); err != nil { return err @@ -231,16 +209,7 @@ func UnmarshalMessageSet(buf []byte, exts interface{}) error { // MarshalMessageSetJSON encodes the extension map represented by m in JSON format. // It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option. -func MarshalMessageSetJSON(exts interface{}) ([]byte, error) { - var m map[int32]Extension - switch exts := exts.(type) { - case *XXX_InternalExtensions: - m, _ = exts.extensionsRead() - case map[int32]Extension: - m = exts - default: - return nil, errors.New("proto: not an extension map") - } +func MarshalMessageSetJSON(m map[int32]Extension) ([]byte, error) { var b bytes.Buffer b.WriteByte('{') @@ -283,7 +252,7 @@ func MarshalMessageSetJSON(exts interface{}) ([]byte, error) { // UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format. // It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option. -func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error { +func UnmarshalMessageSetJSON(buf []byte, m map[int32]Extension) error { // Common-case fast path. if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) { return nil diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go index fb512e2e..749919d2 100644 --- a/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go +++ b/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go @@ -29,7 +29,7 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// +build appengine js +// +build appengine // This file contains an implementation of proto field accesses using package reflect. // It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can @@ -139,11 +139,6 @@ func structPointer_StringSlice(p structPointer, f field) *[]string { return structPointer_ifield(p, f).(*[]string) } -// Extensions returns the address of an extension map field in the struct. -func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions { - return structPointer_ifield(p, f).(*XXX_InternalExtensions) -} - // ExtMap returns the address of an extension map field in the struct. func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { return structPointer_ifield(p, f).(*map[int32]Extension) diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go index 6b5567d4..e9be0fe9 100644 --- a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go +++ b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go @@ -29,7 +29,7 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// +build !appengine,!js +// +build !appengine // This file contains the implementation of the proto field accesses using package unsafe. @@ -126,10 +126,6 @@ func structPointer_StringSlice(p structPointer, f field) *[]string { } // ExtMap returns the address of an extension map field in the struct. -func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions { - return (*XXX_InternalExtensions)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f))) } diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go index ad7c8517..6bc85fa9 100644 --- a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go +++ b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go @@ -1,7 +1,5 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://github.com/gogo/protobuf/gogoproto // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are @@ -72,13 +70,16 @@ func structPointer_Copy(oldptr structPointer, newptr structPointer, size int) { func appendStructPointer(base structPointer, f field, typ reflect.Type) structPointer { size := typ.Elem().Size() - oldHeader := structPointer_GetSliceHeader(base, f) - oldSlice := reflect.NewAt(typ, unsafe.Pointer(oldHeader)).Elem() newLen := oldHeader.Len + 1 - newSlice := reflect.MakeSlice(typ, newLen, newLen) - reflect.Copy(newSlice, oldSlice) - bas := toStructPointer(newSlice) + slice := reflect.MakeSlice(typ, newLen, newLen) + bas := toStructPointer(slice) + for i := 0; i < oldHeader.Len; i++ { + newElemptr := uintptr(bas) + uintptr(i)*size + oldElemptr := oldHeader.Data + uintptr(i)*size + copyUintPtr(oldElemptr, newElemptr, int(size)) + } + oldHeader.Data = uintptr(bas) oldHeader.Len = newLen oldHeader.Cap = newLen diff --git a/vendor/github.com/gogo/protobuf/proto/properties.go b/vendor/github.com/gogo/protobuf/proto/properties.go index 3e4cad03..5e6a0b3b 100644 --- a/vendor/github.com/gogo/protobuf/proto/properties.go +++ b/vendor/github.com/gogo/protobuf/proto/properties.go @@ -1,7 +1,7 @@ -// Protocol Buffers for Go with Gadgets +// Extensions for Protocol Buffers to create more go like structures. // -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://github.com/gogo/protobuf/gogoproto // // Go support for Protocol Buffers - Google's data interchange format // @@ -542,13 +542,17 @@ func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lock p.dec = (*Buffer).dec_slice_int64 p.packedDec = (*Buffer).dec_slice_packed_int64 case reflect.Uint8: + p.enc = (*Buffer).enc_slice_byte p.dec = (*Buffer).dec_slice_byte - if p.proto3 { + p.size = size_slice_byte + // This is a []byte, which is either a bytes field, + // or the value of a map field. In the latter case, + // we always encode an empty []byte, so we should not + // use the proto3 enc/size funcs. + // f == nil iff this is the key/value of a map field. + if p.proto3 && f != nil { p.enc = (*Buffer).enc_proto3_slice_byte p.size = size_proto3_slice_byte - } else { - p.enc = (*Buffer).enc_slice_byte - p.size = size_slice_byte } case reflect.Float32, reflect.Float64: switch t2.Bits() { @@ -740,9 +744,7 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { propertiesMap[t] = prop // build properties - prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) || - reflect.PtrTo(t).Implements(extendableProtoV1Type) || - reflect.PtrTo(t).Implements(extendableBytesType) + prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) prop.unrecField = invalidField prop.Prop = make([]*Properties, t.NumField()) prop.order = make([]int, t.NumField()) @@ -754,11 +756,7 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { name := f.Name p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) - if f.Name == "XXX_InternalExtensions" { // special case - p.enc = (*Buffer).enc_exts - p.dec = nil // not needed - p.size = size_exts - } else if f.Name == "XXX_extensions" { // special case + if f.Name == "XXX_extensions" { // special case if len(f.Tag.Get("protobuf")) > 0 { p.enc = (*Buffer).enc_ext_slice_byte p.dec = nil // not needed @@ -768,14 +766,13 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { p.dec = nil // not needed p.size = size_map } - } else if f.Name == "XXX_unrecognized" { // special case + } + if f.Name == "XXX_unrecognized" { // special case prop.unrecField = toField(&f) } - oneof := f.Tag.Get("protobuf_oneof") // special case - if oneof != "" { + oneof := f.Tag.Get("protobuf_oneof") != "" // special case + if oneof { isOneofMessage = true - // Oneof fields don't use the traditional protobuf tag. - p.OrigName = oneof } prop.Prop[i] = p prop.order[i] = i @@ -786,7 +783,7 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { } print("\n") } - if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && oneof == "" { + if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && !oneof { fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]") } } @@ -924,17 +921,3 @@ func MessageName(x Message) string { return revProtoTypes[reflect.TypeOf(x)] } // MessageType returns the message type (pointer to struct) for a named message. func MessageType(name string) reflect.Type { return protoTypes[name] } - -// A registry of all linked proto files. -var ( - protoFiles = make(map[string][]byte) // file name => fileDescriptor -) - -// RegisterFile is called from generated code and maps from the -// full file name of a .proto file to its compressed FileDescriptorProto. -func RegisterFile(filename string, fileDescriptor []byte) { - protoFiles[filename] = fileDescriptor -} - -// FileDescriptor returns the compressed FileDescriptorProto for a .proto file. -func FileDescriptor(filename string) []byte { return protoFiles[filename] } diff --git a/vendor/github.com/gogo/protobuf/proto/properties_gogo.go b/vendor/github.com/gogo/protobuf/proto/properties_gogo.go index 4607a975..8daf9f77 100644 --- a/vendor/github.com/gogo/protobuf/proto/properties_gogo.go +++ b/vendor/github.com/gogo/protobuf/proto/properties_gogo.go @@ -1,7 +1,5 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://github.com/gogo/protobuf/gogoproto // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are diff --git a/vendor/github.com/gogo/protobuf/proto/skip_gogo.go b/vendor/github.com/gogo/protobuf/proto/skip_gogo.go index 5a5fd93f..4fe7e081 100644 --- a/vendor/github.com/gogo/protobuf/proto/skip_gogo.go +++ b/vendor/github.com/gogo/protobuf/proto/skip_gogo.go @@ -1,7 +1,5 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://github.com/gogo/protobuf/gogoproto // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are diff --git a/vendor/github.com/gogo/protobuf/proto/text.go b/vendor/github.com/gogo/protobuf/proto/text.go index b3e12e26..b60be28a 100644 --- a/vendor/github.com/gogo/protobuf/proto/text.go +++ b/vendor/github.com/gogo/protobuf/proto/text.go @@ -1,7 +1,7 @@ -// Protocol Buffers for Go with Gadgets +// Extensions for Protocol Buffers to create more go like structures. // -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://github.com/gogo/protobuf/gogoproto // // Go support for Protocol Buffers - Google's data interchange format // @@ -50,7 +50,6 @@ import ( "reflect" "sort" "strings" - "sync" ) var ( @@ -160,7 +159,7 @@ func (w *textWriter) indent() { w.ind++ } func (w *textWriter) unindent() { if w.ind == 0 { - log.Print("proto: textWriter unindented too far") + log.Printf("proto: textWriter unindented too far") return } w.ind-- @@ -388,7 +387,7 @@ func writeStruct(w *textWriter, sv reflect.Value) error { pv = reflect.New(sv.Type()) pv.Elem().Set(sv) } - if pv.Type().Implements(extensionRangeType) { + if pv.Type().Implements(extendableProtoType) { if err := writeExtensions(w, pv); err != nil { return err } @@ -636,37 +635,28 @@ func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } // pv is assumed to be a pointer to a protocol message struct that is extendable. func writeExtensions(w *textWriter, pv reflect.Value) error { emap := extensionMaps[pv.Type().Elem()] - e := pv.Interface().(Message) + ep := pv.Interface().(extendableProto) + // Order the extensions by ID. + // This isn't strictly necessary, but it will give us + // canonical output, which will also make testing easier. var m map[int32]Extension - var mu sync.Locker - if em, ok := e.(extensionsBytes); ok { + if em, ok := ep.(extensionsMap); ok { + m = em.ExtensionMap() + } else if em, ok := ep.(extensionsBytes); ok { eb := em.GetExtensions() var err error m, err = BytesToExtensionsMap(*eb) if err != nil { return err } - mu = notLocker{} - } else if _, ok := e.(extendableProto); ok { - ep, _ := extendable(e) - m, mu = ep.extensionsRead() - if m == nil { - return nil - } } - // Order the extensions by ID. - // This isn't strictly necessary, but it will give us - // canonical output, which will also make testing easier. - - mu.Lock() ids := make([]int32, 0, len(m)) for id := range m { ids = append(ids, id) } sort.Sort(int32Slice(ids)) - mu.Unlock() for _, extNum := range ids { ext := m[extNum] @@ -682,7 +672,7 @@ func writeExtensions(w *textWriter, pv reflect.Value) error { continue } - pb, err := GetExtension(e, desc) + pb, err := GetExtension(ep, desc) if err != nil { return fmt.Errorf("failed getting extension: %v", err) } diff --git a/vendor/github.com/gogo/protobuf/proto/text_gogo.go b/vendor/github.com/gogo/protobuf/proto/text_gogo.go index 58926741..cdb23373 100644 --- a/vendor/github.com/gogo/protobuf/proto/text_gogo.go +++ b/vendor/github.com/gogo/protobuf/proto/text_gogo.go @@ -1,7 +1,5 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://github.com/gogo/protobuf/gogoproto // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are diff --git a/vendor/github.com/gogo/protobuf/proto/text_parser.go b/vendor/github.com/gogo/protobuf/proto/text_parser.go index bcd732c3..61b4bc8c 100644 --- a/vendor/github.com/gogo/protobuf/proto/text_parser.go +++ b/vendor/github.com/gogo/protobuf/proto/text_parser.go @@ -1,7 +1,7 @@ -// Protocol Buffers for Go with Gadgets +// Extensions for Protocol Buffers to create more go like structures. // -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://github.com/gogo/protobuf/gogoproto // // Go support for Protocol Buffers - Google's data interchange format // @@ -519,7 +519,7 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error { } reqFieldErr = err } - ep := sv.Addr().Interface().(Message) + ep := sv.Addr().Interface().(extendableProto) if !rep { SetExtension(ep, desc, ext.Interface()) } else { @@ -571,9 +571,8 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error { // The map entry should be this sequence of tokens: // < key : KEY value : VALUE > - // However, implementations may omit key or value, and technically - // we should support them in any order. See b/28924776 for a time - // this went wrong. + // Technically the "key" and "value" could come in any order, + // but in practice they won't. tok := p.next() var terminator string @@ -585,39 +584,32 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error { default: return p.errorf("expected '{' or '<', found %q", tok.value) } - for { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == terminator { - break - } - switch tok.value { - case "key": - if err := p.consumeToken(":"); err != nil { - return err - } - if err := p.readAny(key, props.mkeyprop); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - case "value": - if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil { - return err - } - if err := p.readAny(val, props.mvalprop); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - default: - p.back() - return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) - } + if err := p.consumeToken("key"); err != nil { + return err + } + if err := p.consumeToken(":"); err != nil { + return err + } + if err := p.readAny(key, props.mkeyprop); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + if err := p.consumeToken("value"); err != nil { + return err + } + if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil { + return err + } + if err := p.readAny(val, props.mvalprop); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + if err := p.consumeToken(terminator); err != nil { + return err } dst.SetMapIndex(key, val) @@ -640,8 +632,7 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error { return err } reqFieldErr = err - } - if props.Required { + } else if props.Required { reqCount-- } diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go index 341b59c5..342d65a4 100644 --- a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go @@ -41,9 +41,7 @@ var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion1 type FieldDescriptorProto_Type int32 @@ -957,9 +955,9 @@ type FileOptions struct { // suffixed package. JavananoUseDeprecatedPackage *bool `protobuf:"varint,38,opt,name=javanano_use_deprecated_package,json=javananoUseDeprecatedPackage" json:"javanano_use_deprecated_package,omitempty"` // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_extensions map[int32]proto.Extension `json:"-"` + XXX_unrecognized []byte `json:"-"` } func (m *FileOptions) Reset() { *m = FileOptions{} } @@ -968,12 +966,18 @@ func (*FileOptions) ProtoMessage() {} func (*FileOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{9} } var extRange_FileOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, + {1000, 536870911}, } func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange { return extRange_FileOptions } +func (m *FileOptions) ExtensionMap() map[int32]proto.Extension { + if m.XXX_extensions == nil { + m.XXX_extensions = make(map[int32]proto.Extension) + } + return m.XXX_extensions +} const Default_FileOptions_JavaMultipleFiles bool = false const Default_FileOptions_JavaGenerateEqualsAndHash bool = false @@ -1149,9 +1153,9 @@ type MessageOptions struct { // parser. MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_extensions map[int32]proto.Extension `json:"-"` + XXX_unrecognized []byte `json:"-"` } func (m *MessageOptions) Reset() { *m = MessageOptions{} } @@ -1160,12 +1164,18 @@ func (*MessageOptions) ProtoMessage() {} func (*MessageOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{10} } var extRange_MessageOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, + {1000, 536870911}, } func (*MessageOptions) ExtensionRangeArray() []proto.ExtensionRange { return extRange_MessageOptions } +func (m *MessageOptions) ExtensionMap() map[int32]proto.Extension { + if m.XXX_extensions == nil { + m.XXX_extensions = make(map[int32]proto.Extension) + } + return m.XXX_extensions +} const Default_MessageOptions_MessageSetWireFormat bool = false const Default_MessageOptions_NoStandardDescriptorAccessor bool = false @@ -1265,9 +1275,9 @@ type FieldOptions struct { // For Google-internal migration only. Do not use. Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_extensions map[int32]proto.Extension `json:"-"` + XXX_unrecognized []byte `json:"-"` } func (m *FieldOptions) Reset() { *m = FieldOptions{} } @@ -1276,12 +1286,18 @@ func (*FieldOptions) ProtoMessage() {} func (*FieldOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{11} } var extRange_FieldOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, + {1000, 536870911}, } func (*FieldOptions) ExtensionRangeArray() []proto.ExtensionRange { return extRange_FieldOptions } +func (m *FieldOptions) ExtensionMap() map[int32]proto.Extension { + if m.XXX_extensions == nil { + m.XXX_extensions = make(map[int32]proto.Extension) + } + return m.XXX_extensions +} const Default_FieldOptions_Ctype FieldOptions_CType = FieldOptions_STRING const Default_FieldOptions_Jstype FieldOptions_JSType = FieldOptions_JS_NORMAL @@ -1348,9 +1364,9 @@ type EnumOptions struct { // is a formalization for deprecating enums. Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_extensions map[int32]proto.Extension `json:"-"` + XXX_unrecognized []byte `json:"-"` } func (m *EnumOptions) Reset() { *m = EnumOptions{} } @@ -1359,12 +1375,18 @@ func (*EnumOptions) ProtoMessage() {} func (*EnumOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{12} } var extRange_EnumOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, + {1000, 536870911}, } func (*EnumOptions) ExtensionRangeArray() []proto.ExtensionRange { return extRange_EnumOptions } +func (m *EnumOptions) ExtensionMap() map[int32]proto.Extension { + if m.XXX_extensions == nil { + m.XXX_extensions = make(map[int32]proto.Extension) + } + return m.XXX_extensions +} const Default_EnumOptions_Deprecated bool = false @@ -1396,9 +1418,9 @@ type EnumValueOptions struct { // this is a formalization for deprecating enum values. Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_extensions map[int32]proto.Extension `json:"-"` + XXX_unrecognized []byte `json:"-"` } func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} } @@ -1407,12 +1429,18 @@ func (*EnumValueOptions) ProtoMessage() {} func (*EnumValueOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{13} } var extRange_EnumValueOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, + {1000, 536870911}, } func (*EnumValueOptions) ExtensionRangeArray() []proto.ExtensionRange { return extRange_EnumValueOptions } +func (m *EnumValueOptions) ExtensionMap() map[int32]proto.Extension { + if m.XXX_extensions == nil { + m.XXX_extensions = make(map[int32]proto.Extension) + } + return m.XXX_extensions +} const Default_EnumValueOptions_Deprecated bool = false @@ -1437,9 +1465,9 @@ type ServiceOptions struct { // this is a formalization for deprecating services. Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_extensions map[int32]proto.Extension `json:"-"` + XXX_unrecognized []byte `json:"-"` } func (m *ServiceOptions) Reset() { *m = ServiceOptions{} } @@ -1448,12 +1476,18 @@ func (*ServiceOptions) ProtoMessage() {} func (*ServiceOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{14} } var extRange_ServiceOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, + {1000, 536870911}, } func (*ServiceOptions) ExtensionRangeArray() []proto.ExtensionRange { return extRange_ServiceOptions } +func (m *ServiceOptions) ExtensionMap() map[int32]proto.Extension { + if m.XXX_extensions == nil { + m.XXX_extensions = make(map[int32]proto.Extension) + } + return m.XXX_extensions +} const Default_ServiceOptions_Deprecated bool = false @@ -1478,9 +1512,9 @@ type MethodOptions struct { // this is a formalization for deprecating methods. Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_extensions map[int32]proto.Extension `json:"-"` + XXX_unrecognized []byte `json:"-"` } func (m *MethodOptions) Reset() { *m = MethodOptions{} } @@ -1489,12 +1523,18 @@ func (*MethodOptions) ProtoMessage() {} func (*MethodOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{15} } var extRange_MethodOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, + {1000, 536870911}, } func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange { return extRange_MethodOptions } +func (m *MethodOptions) ExtensionMap() map[int32]proto.Extension { + if m.XXX_extensions == nil { + m.XXX_extensions = make(map[int32]proto.Extension) + } + return m.XXX_extensions +} const Default_MethodOptions_Deprecated bool = false @@ -1835,147 +1875,143 @@ func init() { proto.RegisterEnum("google.protobuf.FieldOptions_JSType", FieldOptions_JSType_name, FieldOptions_JSType_value) } -func init() { proto.RegisterFile("descriptor.proto", fileDescriptorDescriptor) } - var fileDescriptorDescriptor = []byte{ - // 2211 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xcc, 0x58, 0x4f, 0x73, 0xdb, 0xc6, - 0x15, 0x0f, 0xf8, 0x4f, 0xe4, 0x23, 0x45, 0xad, 0x56, 0x8a, 0x03, 0xcb, 0x76, 0x2c, 0x33, 0x76, - 0x2c, 0xdb, 0xad, 0x9c, 0x91, 0xff, 0x44, 0x51, 0x3a, 0xe9, 0x50, 0x24, 0xac, 0xd0, 0x43, 0x89, - 0x2c, 0x28, 0xb6, 0x4e, 0x2e, 0x98, 0x15, 0xb0, 0xa4, 0x60, 0x83, 0x0b, 0x14, 0x00, 0x6d, 0x2b, - 0x27, 0xcf, 0xf4, 0xd4, 0x6f, 0xd0, 0x69, 0x3b, 0x3d, 0xe4, 0x92, 0x99, 0x7e, 0x80, 0x1e, 0x7a, - 0xef, 0xb5, 0x87, 0x9e, 0x7b, 0xec, 0x4c, 0xfb, 0x0d, 0x7a, 0xed, 0xec, 0x2e, 0x00, 0x82, 0x7f, - 0x14, 0xab, 0x99, 0x49, 0xd3, 0x93, 0xb4, 0xbf, 0xf7, 0x7b, 0x8f, 0x6f, 0xdf, 0xfe, 0xb0, 0xef, - 0x01, 0x80, 0x2c, 0x1a, 0x98, 0xbe, 0xed, 0x85, 0xae, 0xbf, 0xed, 0xf9, 0x6e, 0xe8, 0xe2, 0x95, - 0xa1, 0xeb, 0x0e, 0x1d, 0x2a, 0x57, 0x27, 0xe3, 0x41, 0xed, 0x10, 0x56, 0x9f, 0xd8, 0x0e, 0x6d, - 0x26, 0xc4, 0x1e, 0x0d, 0xf1, 0x2e, 0xe4, 0x06, 0xb6, 0x43, 0x55, 0x65, 0x33, 0xbb, 0x55, 0xde, - 0xb9, 0xb9, 0x3d, 0xe3, 0xb4, 0x3d, 0xed, 0xd1, 0xe5, 0xb0, 0x2e, 0x3c, 0x6a, 0xff, 0xc8, 0xc1, - 0xda, 0x02, 0x2b, 0xc6, 0x90, 0x63, 0x64, 0xc4, 0x23, 0x2a, 0x5b, 0x25, 0x5d, 0xfc, 0x8f, 0x55, - 0x58, 0xf2, 0x88, 0xf9, 0x82, 0x0c, 0xa9, 0x9a, 0x11, 0x70, 0xbc, 0xc4, 0xef, 0x03, 0x58, 0xd4, - 0xa3, 0xcc, 0xa2, 0xcc, 0x3c, 0x53, 0xb3, 0x9b, 0xd9, 0xad, 0x92, 0x9e, 0x42, 0xf0, 0x3d, 0x58, - 0xf5, 0xc6, 0x27, 0x8e, 0x6d, 0x1a, 0x29, 0x1a, 0x6c, 0x66, 0xb7, 0xf2, 0x3a, 0x92, 0x86, 0xe6, - 0x84, 0x7c, 0x1b, 0x56, 0x5e, 0x51, 0xf2, 0x22, 0x4d, 0x2d, 0x0b, 0x6a, 0x95, 0xc3, 0x29, 0x62, - 0x03, 0x2a, 0x23, 0x1a, 0x04, 0x64, 0x48, 0x8d, 0xf0, 0xcc, 0xa3, 0x6a, 0x4e, 0xec, 0x7e, 0x73, - 0x6e, 0xf7, 0xb3, 0x3b, 0x2f, 0x47, 0x5e, 0xc7, 0x67, 0x1e, 0xc5, 0x75, 0x28, 0x51, 0x36, 0x1e, - 0xc9, 0x08, 0xf9, 0x73, 0xea, 0xa7, 0xb1, 0xf1, 0x68, 0x36, 0x4a, 0x91, 0xbb, 0x45, 0x21, 0x96, - 0x02, 0xea, 0xbf, 0xb4, 0x4d, 0xaa, 0x16, 0x44, 0x80, 0xdb, 0x73, 0x01, 0x7a, 0xd2, 0x3e, 0x1b, - 0x23, 0xf6, 0xc3, 0x0d, 0x28, 0xd1, 0xd7, 0x21, 0x65, 0x81, 0xed, 0x32, 0x75, 0x49, 0x04, 0xb9, - 0xb5, 0xe0, 0x14, 0xa9, 0x63, 0xcd, 0x86, 0x98, 0xf8, 0xe1, 0xc7, 0xb0, 0xe4, 0x7a, 0xa1, 0xed, - 0xb2, 0x40, 0x2d, 0x6e, 0x2a, 0x5b, 0xe5, 0x9d, 0xab, 0x0b, 0x85, 0xd0, 0x91, 0x1c, 0x3d, 0x26, - 0xe3, 0x16, 0xa0, 0xc0, 0x1d, 0xfb, 0x26, 0x35, 0x4c, 0xd7, 0xa2, 0x86, 0xcd, 0x06, 0xae, 0x5a, - 0x12, 0x01, 0xae, 0xcf, 0x6f, 0x44, 0x10, 0x1b, 0xae, 0x45, 0x5b, 0x6c, 0xe0, 0xea, 0xd5, 0x60, - 0x6a, 0x8d, 0x2f, 0x41, 0x21, 0x38, 0x63, 0x21, 0x79, 0xad, 0x56, 0x84, 0x42, 0xa2, 0x55, 0xed, - 0xdf, 0x79, 0x58, 0xb9, 0x88, 0xc4, 0x3e, 0x85, 0xfc, 0x80, 0xef, 0x52, 0xcd, 0xfc, 0x37, 0x35, - 0x90, 0x3e, 0xd3, 0x45, 0x2c, 0x7c, 0xc7, 0x22, 0xd6, 0xa1, 0xcc, 0x68, 0x10, 0x52, 0x4b, 0x2a, - 0x22, 0x7b, 0x41, 0x4d, 0x81, 0x74, 0x9a, 0x97, 0x54, 0xee, 0x3b, 0x49, 0xea, 0x19, 0xac, 0x24, - 0x29, 0x19, 0x3e, 0x61, 0xc3, 0x58, 0x9b, 0xf7, 0xdf, 0x96, 0xc9, 0xb6, 0x16, 0xfb, 0xe9, 0xdc, - 0x4d, 0xaf, 0xd2, 0xa9, 0x35, 0x6e, 0x02, 0xb8, 0x8c, 0xba, 0x03, 0xc3, 0xa2, 0xa6, 0xa3, 0x16, - 0xcf, 0xa9, 0x52, 0x87, 0x53, 0xe6, 0xaa, 0xe4, 0x4a, 0xd4, 0x74, 0xf0, 0x27, 0x13, 0xa9, 0x2d, - 0x9d, 0xa3, 0x94, 0x43, 0xf9, 0x90, 0xcd, 0xa9, 0xad, 0x0f, 0x55, 0x9f, 0x72, 0xdd, 0x53, 0x2b, - 0xda, 0x59, 0x49, 0x24, 0xb1, 0xfd, 0xd6, 0x9d, 0xe9, 0x91, 0x9b, 0xdc, 0xd8, 0xb2, 0x9f, 0x5e, - 0xe2, 0x0f, 0x20, 0x01, 0x0c, 0x21, 0x2b, 0x10, 0xb7, 0x50, 0x25, 0x06, 0x8f, 0xc8, 0x88, 0x6e, - 0xec, 0x42, 0x75, 0xba, 0x3c, 0x78, 0x1d, 0xf2, 0x41, 0x48, 0xfc, 0x50, 0xa8, 0x30, 0xaf, 0xcb, - 0x05, 0x46, 0x90, 0xa5, 0xcc, 0x12, 0xb7, 0x5c, 0x5e, 0xe7, 0xff, 0x6e, 0x7c, 0x0c, 0xcb, 0x53, - 0x3f, 0x7f, 0x51, 0xc7, 0xda, 0x6f, 0x0a, 0xb0, 0xbe, 0x48, 0x73, 0x0b, 0xe5, 0x7f, 0x09, 0x0a, - 0x6c, 0x3c, 0x3a, 0xa1, 0xbe, 0x9a, 0x15, 0x11, 0xa2, 0x15, 0xae, 0x43, 0xde, 0x21, 0x27, 0xd4, - 0x51, 0x73, 0x9b, 0xca, 0x56, 0x75, 0xe7, 0xde, 0x85, 0x54, 0xbd, 0xdd, 0xe6, 0x2e, 0xba, 0xf4, - 0xc4, 0x9f, 0x41, 0x2e, 0xba, 0xe2, 0x78, 0x84, 0xbb, 0x17, 0x8b, 0xc0, 0xb5, 0xa8, 0x0b, 0x3f, - 0x7c, 0x05, 0x4a, 0xfc, 0xaf, 0xac, 0x6d, 0x41, 0xe4, 0x5c, 0xe4, 0x00, 0xaf, 0x2b, 0xde, 0x80, - 0xa2, 0x90, 0x99, 0x45, 0xe3, 0xd6, 0x90, 0xac, 0xf9, 0xc1, 0x58, 0x74, 0x40, 0xc6, 0x4e, 0x68, - 0xbc, 0x24, 0xce, 0x98, 0x0a, 0xc1, 0x94, 0xf4, 0x4a, 0x04, 0xfe, 0x9c, 0x63, 0xf8, 0x3a, 0x94, - 0xa5, 0x2a, 0x6d, 0x66, 0xd1, 0xd7, 0xe2, 0xf6, 0xc9, 0xeb, 0x52, 0xa8, 0x2d, 0x8e, 0xf0, 0x9f, - 0x7f, 0x1e, 0xb8, 0x2c, 0x3e, 0x5a, 0xf1, 0x13, 0x1c, 0x10, 0x3f, 0xff, 0xf1, 0xec, 0xc5, 0x77, - 0x6d, 0xf1, 0xf6, 0x66, 0xb5, 0x58, 0xfb, 0x53, 0x06, 0x72, 0xe2, 0x79, 0x5b, 0x81, 0xf2, 0xf1, - 0x17, 0x5d, 0xcd, 0x68, 0x76, 0xfa, 0xfb, 0x6d, 0x0d, 0x29, 0xb8, 0x0a, 0x20, 0x80, 0x27, 0xed, - 0x4e, 0xfd, 0x18, 0x65, 0x92, 0x75, 0xeb, 0xe8, 0xf8, 0xf1, 0x43, 0x94, 0x4d, 0x1c, 0xfa, 0x12, - 0xc8, 0xa5, 0x09, 0x0f, 0x76, 0x50, 0x1e, 0x23, 0xa8, 0xc8, 0x00, 0xad, 0x67, 0x5a, 0xf3, 0xf1, - 0x43, 0x54, 0x98, 0x46, 0x1e, 0xec, 0xa0, 0x25, 0xbc, 0x0c, 0x25, 0x81, 0xec, 0x77, 0x3a, 0x6d, - 0x54, 0x4c, 0x62, 0xf6, 0x8e, 0xf5, 0xd6, 0xd1, 0x01, 0x2a, 0x25, 0x31, 0x0f, 0xf4, 0x4e, 0xbf, - 0x8b, 0x20, 0x89, 0x70, 0xa8, 0xf5, 0x7a, 0xf5, 0x03, 0x0d, 0x95, 0x13, 0xc6, 0xfe, 0x17, 0xc7, - 0x5a, 0x0f, 0x55, 0xa6, 0xd2, 0x7a, 0xb0, 0x83, 0x96, 0x93, 0x9f, 0xd0, 0x8e, 0xfa, 0x87, 0xa8, - 0x8a, 0x57, 0x61, 0x59, 0xfe, 0x44, 0x9c, 0xc4, 0xca, 0x0c, 0xf4, 0xf8, 0x21, 0x42, 0x93, 0x44, - 0x64, 0x94, 0xd5, 0x29, 0xe0, 0xf1, 0x43, 0x84, 0x6b, 0x0d, 0xc8, 0x0b, 0x75, 0x61, 0x0c, 0xd5, - 0x76, 0x7d, 0x5f, 0x6b, 0x1b, 0x9d, 0xee, 0x71, 0xab, 0x73, 0x54, 0x6f, 0x23, 0x65, 0x82, 0xe9, - 0xda, 0xcf, 0xfa, 0x2d, 0x5d, 0x6b, 0xa2, 0x4c, 0x1a, 0xeb, 0x6a, 0xf5, 0x63, 0xad, 0x89, 0xb2, - 0xb5, 0xbb, 0xb0, 0xbe, 0xe8, 0x9e, 0x59, 0xf4, 0x64, 0xd4, 0xbe, 0x56, 0x60, 0x6d, 0xc1, 0x95, - 0xb9, 0xf0, 0x29, 0xfa, 0x29, 0xe4, 0xa5, 0xd2, 0x64, 0x13, 0xb9, 0xb3, 0xf0, 0xee, 0x15, 0xba, - 0x9b, 0x6b, 0x24, 0xc2, 0x2f, 0xdd, 0x48, 0xb3, 0xe7, 0x34, 0x52, 0x1e, 0x62, 0x4e, 0x4e, 0xbf, - 0x52, 0x40, 0x3d, 0x2f, 0xf6, 0x5b, 0x9e, 0xf7, 0xcc, 0xd4, 0xf3, 0xfe, 0xe9, 0x6c, 0x02, 0x37, - 0xce, 0xdf, 0xc3, 0x5c, 0x16, 0xdf, 0x28, 0x70, 0x69, 0xf1, 0xbc, 0xb1, 0x30, 0x87, 0xcf, 0xa0, - 0x30, 0xa2, 0xe1, 0xa9, 0x1b, 0xf7, 0xdc, 0x0f, 0x17, 0xdc, 0xe4, 0xdc, 0x3c, 0x5b, 0xab, 0xc8, - 0x2b, 0xdd, 0x0a, 0xb2, 0xe7, 0x0d, 0x0d, 0x32, 0x9b, 0xb9, 0x4c, 0x7f, 0x9d, 0x81, 0x77, 0x17, - 0x06, 0x5f, 0x98, 0xe8, 0x35, 0x00, 0x9b, 0x79, 0xe3, 0x50, 0xf6, 0x55, 0x79, 0xcd, 0x94, 0x04, - 0x22, 0x1e, 0x61, 0x7e, 0x85, 0x8c, 0xc3, 0xc4, 0x9e, 0x15, 0x76, 0x90, 0x90, 0x20, 0xec, 0x4e, - 0x12, 0xcd, 0x89, 0x44, 0xdf, 0x3f, 0x67, 0xa7, 0x73, 0x2d, 0xeb, 0x23, 0x40, 0xa6, 0x63, 0x53, - 0x16, 0x1a, 0x41, 0xe8, 0x53, 0x32, 0xb2, 0xd9, 0x50, 0xdc, 0xa3, 0xc5, 0xbd, 0xfc, 0x80, 0x38, - 0x01, 0xd5, 0x57, 0xa4, 0xb9, 0x17, 0x5b, 0xb9, 0x87, 0x68, 0x16, 0x7e, 0xca, 0xa3, 0x30, 0xe5, - 0x21, 0xcd, 0x89, 0x47, 0xed, 0x6f, 0x4b, 0x50, 0x4e, 0x4d, 0x67, 0xf8, 0x06, 0x54, 0x9e, 0x93, - 0x97, 0xc4, 0x88, 0x27, 0x6e, 0x59, 0x89, 0x32, 0xc7, 0xba, 0xd1, 0xd4, 0xfd, 0x11, 0xac, 0x0b, - 0x8a, 0x3b, 0x0e, 0xa9, 0x6f, 0x98, 0x0e, 0x09, 0x02, 0x51, 0xb4, 0xa2, 0xa0, 0x62, 0x6e, 0xeb, - 0x70, 0x53, 0x23, 0xb6, 0xe0, 0x47, 0xb0, 0x26, 0x3c, 0x46, 0x63, 0x27, 0xb4, 0x3d, 0x87, 0x1a, - 0xfc, 0x1d, 0x20, 0x10, 0xf7, 0x69, 0x92, 0xd9, 0x2a, 0x67, 0x1c, 0x46, 0x04, 0x9e, 0x51, 0x80, - 0x0f, 0xe0, 0x9a, 0x70, 0x1b, 0x52, 0x46, 0x7d, 0x12, 0x52, 0x83, 0xfe, 0x72, 0x4c, 0x9c, 0xc0, - 0x20, 0xcc, 0x32, 0x4e, 0x49, 0x70, 0xaa, 0xae, 0xa7, 0x03, 0x5c, 0xe6, 0xdc, 0x83, 0x88, 0xaa, - 0x09, 0x66, 0x9d, 0x59, 0x9f, 0x93, 0xe0, 0x14, 0xef, 0xc1, 0x25, 0x11, 0x28, 0x08, 0x7d, 0x9b, - 0x0d, 0x0d, 0xf3, 0x94, 0x9a, 0x2f, 0x8c, 0x71, 0x38, 0xd8, 0x55, 0xaf, 0xa4, 0x23, 0x88, 0x24, - 0x7b, 0x82, 0xd3, 0xe0, 0x94, 0x7e, 0x38, 0xd8, 0xc5, 0x3d, 0xa8, 0xf0, 0xf3, 0x18, 0xd9, 0x5f, - 0x51, 0x63, 0xe0, 0xfa, 0xa2, 0x47, 0x54, 0x17, 0x3c, 0xdc, 0xa9, 0x22, 0x6e, 0x77, 0x22, 0x87, - 0x43, 0xd7, 0xa2, 0x7b, 0xf9, 0x5e, 0x57, 0xd3, 0x9a, 0x7a, 0x39, 0x8e, 0xf2, 0xc4, 0xf5, 0xb9, - 0xa6, 0x86, 0x6e, 0x52, 0xe3, 0xb2, 0xd4, 0xd4, 0xd0, 0x8d, 0x2b, 0xfc, 0x08, 0xd6, 0x4c, 0x53, - 0x6e, 0xdb, 0x36, 0x8d, 0x68, 0x58, 0x0f, 0x54, 0x34, 0x55, 0x2f, 0xd3, 0x3c, 0x90, 0x84, 0x48, - 0xe6, 0x01, 0xfe, 0x04, 0xde, 0x9d, 0xd4, 0x2b, 0xed, 0xb8, 0x3a, 0xb7, 0xcb, 0x59, 0xd7, 0x47, - 0xb0, 0xe6, 0x9d, 0xcd, 0x3b, 0xe2, 0xa9, 0x5f, 0xf4, 0xce, 0x66, 0xdd, 0x6e, 0x89, 0x17, 0x30, - 0x9f, 0x9a, 0x24, 0xa4, 0x96, 0xfa, 0x5e, 0x9a, 0x9d, 0x32, 0xe0, 0xfb, 0x80, 0x4c, 0xd3, 0xa0, - 0x8c, 0x9c, 0x38, 0xd4, 0x20, 0x3e, 0x65, 0x24, 0x50, 0xaf, 0xa7, 0xc9, 0x55, 0xd3, 0xd4, 0x84, - 0xb5, 0x2e, 0x8c, 0xf8, 0x2e, 0xac, 0xba, 0x27, 0xcf, 0x4d, 0x29, 0x2e, 0xc3, 0xf3, 0xe9, 0xc0, - 0x7e, 0xad, 0xde, 0x14, 0x65, 0x5a, 0xe1, 0x06, 0x21, 0xad, 0xae, 0x80, 0xf1, 0x1d, 0x40, 0x66, - 0x70, 0x4a, 0x7c, 0x4f, 0x34, 0xe9, 0xc0, 0x23, 0x26, 0x55, 0x6f, 0x49, 0xaa, 0xc4, 0x8f, 0x62, - 0x18, 0x6b, 0x70, 0x9d, 0x6f, 0x9e, 0x11, 0xe6, 0x1a, 0xe3, 0x80, 0x1a, 0x93, 0x14, 0x93, 0xb3, - 0xf8, 0x90, 0xa7, 0xa5, 0x5f, 0x8d, 0x69, 0xfd, 0x80, 0x36, 0x13, 0x52, 0x7c, 0x3c, 0xcf, 0x60, - 0x7d, 0xcc, 0x6c, 0x16, 0x52, 0xdf, 0xf3, 0x29, 0x77, 0x96, 0x0f, 0xac, 0xfa, 0xcf, 0xa5, 0x73, - 0x86, 0xee, 0x7e, 0x9a, 0x2d, 0x45, 0xa2, 0xaf, 0x8d, 0xe7, 0xc1, 0xda, 0x1e, 0x54, 0xd2, 0xda, - 0xc1, 0x25, 0x90, 0xea, 0x41, 0x0a, 0xef, 0xa8, 0x8d, 0x4e, 0x93, 0xf7, 0xc2, 0x2f, 0x35, 0x94, - 0xe1, 0x3d, 0xb9, 0xdd, 0x3a, 0xd6, 0x0c, 0xbd, 0x7f, 0x74, 0xdc, 0x3a, 0xd4, 0x50, 0xf6, 0x6e, - 0xa9, 0xf8, 0xaf, 0x25, 0xf4, 0xe6, 0xcd, 0x9b, 0x37, 0x99, 0xda, 0x5f, 0x32, 0x50, 0x9d, 0x9e, - 0x83, 0xf1, 0x4f, 0xe0, 0xbd, 0xf8, 0xa5, 0x35, 0xa0, 0xa1, 0xf1, 0xca, 0xf6, 0x85, 0x9c, 0x47, - 0x44, 0x4e, 0x92, 0xc9, 0x49, 0xac, 0x47, 0xac, 0x1e, 0x0d, 0x7f, 0x61, 0xfb, 0x5c, 0xac, 0x23, - 0x12, 0xe2, 0x36, 0x5c, 0x67, 0xae, 0x11, 0x84, 0x84, 0x59, 0xc4, 0xb7, 0x8c, 0xc9, 0xe7, 0x02, - 0x83, 0x98, 0x26, 0x0d, 0x02, 0x57, 0x76, 0x92, 0x24, 0xca, 0x55, 0xe6, 0xf6, 0x22, 0xf2, 0xe4, - 0x8a, 0xad, 0x47, 0xd4, 0x19, 0xd5, 0x64, 0xcf, 0x53, 0xcd, 0x15, 0x28, 0x8d, 0x88, 0x67, 0x50, - 0x16, 0xfa, 0x67, 0x62, 0x7a, 0x2b, 0xea, 0xc5, 0x11, 0xf1, 0x34, 0xbe, 0xfe, 0xfe, 0xce, 0x20, - 0x5d, 0xc7, 0xbf, 0x67, 0xa1, 0x92, 0x9e, 0xe0, 0xf8, 0x40, 0x6c, 0x8a, 0x6b, 0x5e, 0x11, 0xb7, - 0xc0, 0x07, 0xdf, 0x3a, 0xef, 0x6d, 0x37, 0xf8, 0xfd, 0xbf, 0x57, 0x90, 0x73, 0x95, 0x2e, 0x3d, - 0x79, 0xef, 0xe5, 0x5a, 0xa3, 0x72, 0x5a, 0x2f, 0xea, 0xd1, 0x0a, 0x1f, 0x40, 0xe1, 0x79, 0x20, - 0x62, 0x17, 0x44, 0xec, 0x9b, 0xdf, 0x1e, 0xfb, 0x69, 0x4f, 0x04, 0x2f, 0x3d, 0xed, 0x19, 0x47, - 0x1d, 0xfd, 0xb0, 0xde, 0xd6, 0x23, 0x77, 0x7c, 0x19, 0x72, 0x0e, 0xf9, 0xea, 0x6c, 0xba, 0x53, - 0x08, 0xe8, 0xa2, 0x85, 0xbf, 0x0c, 0xb9, 0x57, 0x94, 0xbc, 0x98, 0xbe, 0x9f, 0x05, 0xf4, 0x3d, - 0x4a, 0xff, 0x3e, 0xe4, 0x45, 0xbd, 0x30, 0x40, 0x54, 0x31, 0xf4, 0x0e, 0x2e, 0x42, 0xae, 0xd1, - 0xd1, 0xb9, 0xfc, 0x11, 0x54, 0x24, 0x6a, 0x74, 0x5b, 0x5a, 0x43, 0x43, 0x99, 0xda, 0x23, 0x28, - 0xc8, 0x22, 0xf0, 0x47, 0x23, 0x29, 0x03, 0x7a, 0x27, 0x5a, 0x46, 0x31, 0x94, 0xd8, 0xda, 0x3f, - 0xdc, 0xd7, 0x74, 0x94, 0x49, 0x1f, 0xef, 0x9f, 0x15, 0x28, 0xa7, 0x06, 0x2a, 0xde, 0xca, 0x89, - 0xe3, 0xb8, 0xaf, 0x0c, 0xe2, 0xd8, 0x24, 0x88, 0xce, 0x07, 0x04, 0x54, 0xe7, 0xc8, 0x45, 0xeb, - 0xf7, 0x3f, 0xd1, 0xe6, 0x1f, 0x14, 0x40, 0xb3, 0xc3, 0xd8, 0x4c, 0x82, 0xca, 0x0f, 0x9a, 0xe0, - 0xef, 0x15, 0xa8, 0x4e, 0x4f, 0x60, 0x33, 0xe9, 0xdd, 0xf8, 0x41, 0xd3, 0xfb, 0x9d, 0x02, 0xcb, - 0x53, 0x73, 0xd7, 0xff, 0x55, 0x76, 0xbf, 0xcd, 0xc2, 0xda, 0x02, 0x3f, 0x5c, 0x8f, 0x06, 0x54, - 0x39, 0x33, 0xff, 0xf8, 0x22, 0xbf, 0xb5, 0xcd, 0xfb, 0x5f, 0x97, 0xf8, 0x61, 0x34, 0xcf, 0xde, - 0x01, 0x64, 0x5b, 0x94, 0x85, 0xf6, 0xc0, 0xa6, 0x7e, 0xf4, 0x6e, 0x2c, 0xa7, 0xd6, 0x95, 0x09, - 0x2e, 0x5f, 0x8f, 0x7f, 0x04, 0xd8, 0x73, 0x03, 0x3b, 0xb4, 0x5f, 0x52, 0xc3, 0x66, 0xf1, 0x8b, - 0x34, 0x9f, 0x62, 0x73, 0x3a, 0x8a, 0x2d, 0x2d, 0x16, 0x26, 0x6c, 0x46, 0x87, 0x64, 0x86, 0xcd, - 0xaf, 0xa1, 0xac, 0x8e, 0x62, 0x4b, 0xc2, 0xbe, 0x01, 0x15, 0xcb, 0x1d, 0xf3, 0x81, 0x40, 0xf2, - 0xf8, 0xad, 0xa7, 0xe8, 0x65, 0x89, 0x25, 0x94, 0x68, 0x62, 0x9b, 0xbc, 0xc1, 0x57, 0xf4, 0xb2, - 0xc4, 0x24, 0xe5, 0x36, 0xac, 0x90, 0xe1, 0xd0, 0xe7, 0xc1, 0xe3, 0x40, 0x72, 0x0c, 0xad, 0x26, - 0xb0, 0x20, 0x6e, 0x3c, 0x85, 0x62, 0x5c, 0x07, 0xde, 0x58, 0x78, 0x25, 0x0c, 0x4f, 0x7e, 0x47, - 0xc9, 0xf0, 0x97, 0x7a, 0x16, 0x1b, 0x6f, 0x40, 0xc5, 0x0e, 0x8c, 0xc9, 0x07, 0xbd, 0xcc, 0x66, - 0x66, 0xab, 0xa8, 0x97, 0xed, 0x20, 0xf9, 0x82, 0x53, 0xfb, 0x26, 0x03, 0xd5, 0xe9, 0x0f, 0x92, - 0xb8, 0x09, 0x45, 0xc7, 0x35, 0x89, 0x10, 0x82, 0xfc, 0x1a, 0xbe, 0xf5, 0x96, 0x6f, 0x98, 0xdb, - 0xed, 0x88, 0xaf, 0x27, 0x9e, 0x1b, 0x7f, 0x55, 0xa0, 0x18, 0xc3, 0xf8, 0x12, 0xe4, 0x3c, 0x12, - 0x9e, 0x8a, 0x70, 0xf9, 0xfd, 0x0c, 0x52, 0x74, 0xb1, 0xe6, 0x78, 0xe0, 0x11, 0x26, 0x24, 0x10, - 0xe1, 0x7c, 0xcd, 0xcf, 0xd5, 0xa1, 0xc4, 0x12, 0x03, 0xae, 0x3b, 0x1a, 0x51, 0x16, 0x06, 0xf1, - 0xb9, 0x46, 0x78, 0x23, 0x82, 0xf1, 0x3d, 0x58, 0x0d, 0x7d, 0x62, 0x3b, 0x53, 0xdc, 0x9c, 0xe0, - 0xa2, 0xd8, 0x90, 0x90, 0xf7, 0xe0, 0x72, 0x1c, 0xd7, 0xa2, 0x21, 0x31, 0x4f, 0xa9, 0x35, 0x71, - 0x2a, 0x88, 0xaf, 0x5d, 0xef, 0x45, 0x84, 0x66, 0x64, 0x8f, 0x7d, 0xf7, 0x9f, 0xc1, 0x9a, 0xe9, - 0x8e, 0x66, 0x2b, 0xb1, 0x8f, 0x66, 0xde, 0xbb, 0x82, 0xcf, 0x95, 0x2f, 0x61, 0x32, 0x54, 0x7c, - 0x9d, 0xc9, 0x1e, 0x74, 0xf7, 0xff, 0x98, 0xd9, 0x38, 0x90, 0x7e, 0xdd, 0xb8, 0x82, 0x3a, 0x1d, - 0x38, 0xd4, 0xe4, 0xd5, 0xf9, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x78, 0x42, 0x69, 0x71, 0xb3, - 0x18, 0x00, 0x00, + // 2192 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xcc, 0x58, 0x4f, 0x73, 0xdb, 0xd6, + 0x11, 0x2f, 0xff, 0x8a, 0x5c, 0x52, 0x24, 0xf4, 0xac, 0xd8, 0xb4, 0x62, 0xc7, 0x31, 0x63, 0xc7, + 0x8e, 0xd3, 0xd2, 0x19, 0xb7, 0x49, 0x5c, 0xa5, 0x93, 0x0e, 0x45, 0xc2, 0x0a, 0x3d, 0x94, 0xc8, + 0x3e, 0x92, 0xad, 0x93, 0x0b, 0x06, 0x02, 0x1f, 0x29, 0xd8, 0x20, 0xc0, 0x02, 0xa0, 0x6d, 0xe5, + 0xd4, 0x99, 0x9e, 0xfa, 0x0d, 0x3a, 0x6d, 0xa7, 0x87, 0x5c, 0x32, 0xd3, 0x0f, 0xd0, 0x43, 0xef, + 0xbd, 0xf6, 0xd0, 0x73, 0x8f, 0x9d, 0x69, 0xbf, 0x41, 0xaf, 0xdd, 0xf7, 0x1e, 0x00, 0x02, 0x24, + 0x15, 0xab, 0x99, 0x49, 0x13, 0x5d, 0xc4, 0xb7, 0xfb, 0xdb, 0xc5, 0xbe, 0x7d, 0xbf, 0xb7, 0xbb, + 0x00, 0x28, 0x63, 0xe6, 0x19, 0xae, 0x39, 0xf7, 0x1d, 0xb7, 0x31, 0x77, 0x1d, 0xdf, 0x21, 0xd5, + 0xa9, 0xe3, 0x4c, 0x2d, 0x26, 0x57, 0x27, 0x8b, 0x49, 0xfd, 0x08, 0x76, 0x1e, 0x99, 0x16, 0x6b, + 0x47, 0xc0, 0x01, 0xf3, 0xc9, 0x43, 0xc8, 0x4e, 0x50, 0x58, 0x4b, 0xbd, 0x99, 0xb9, 0x5b, 0x7a, + 0x70, 0xab, 0xb1, 0x62, 0xd4, 0x48, 0x5a, 0xf4, 0xb9, 0x98, 0x0a, 0x8b, 0xfa, 0x3f, 0xb3, 0x70, + 0x69, 0x83, 0x96, 0x10, 0xc8, 0xda, 0xfa, 0x8c, 0x7b, 0x4c, 0xdd, 0x2d, 0x52, 0xf1, 0x9b, 0xd4, + 0x60, 0x6b, 0xae, 0x1b, 0xcf, 0xf4, 0x29, 0xab, 0xa5, 0x85, 0x38, 0x5c, 0x92, 0x37, 0x00, 0xc6, + 0x6c, 0xce, 0xec, 0x31, 0xb3, 0x8d, 0xb3, 0x5a, 0x06, 0xa3, 0x28, 0xd2, 0x98, 0x84, 0xbc, 0x0b, + 0x3b, 0xf3, 0xc5, 0x89, 0x65, 0x1a, 0x5a, 0x0c, 0x06, 0x08, 0xcb, 0x51, 0x45, 0x2a, 0xda, 0x4b, + 0xf0, 0x1d, 0xa8, 0xbe, 0x60, 0xfa, 0xb3, 0x38, 0xb4, 0x24, 0xa0, 0x15, 0x2e, 0x8e, 0x01, 0x5b, + 0x50, 0x9e, 0x31, 0xcf, 0xc3, 0x00, 0x34, 0xff, 0x6c, 0xce, 0x6a, 0x59, 0xb1, 0xfb, 0x37, 0xd7, + 0x76, 0xbf, 0xba, 0xf3, 0x52, 0x60, 0x35, 0x44, 0x23, 0xd2, 0x84, 0x22, 0xb3, 0x17, 0x33, 0xe9, + 0x21, 0x77, 0x4e, 0xfe, 0x54, 0x44, 0xac, 0x7a, 0x29, 0x70, 0xb3, 0xc0, 0xc5, 0x96, 0xc7, 0xdc, + 0xe7, 0xa6, 0xc1, 0x6a, 0x79, 0xe1, 0xe0, 0xce, 0x9a, 0x83, 0x81, 0xd4, 0xaf, 0xfa, 0x08, 0xed, + 0x70, 0x2b, 0x45, 0xf6, 0xd2, 0x67, 0xb6, 0x67, 0x3a, 0x76, 0x6d, 0x4b, 0x38, 0xb9, 0xbd, 0xe1, + 0x14, 0x99, 0x35, 0x5e, 0x75, 0xb1, 0xb4, 0x23, 0x1f, 0xc0, 0x96, 0x33, 0xf7, 0xf1, 0x97, 0x57, + 0x2b, 0xe0, 0xf9, 0x94, 0x1e, 0x5c, 0xdb, 0x48, 0x84, 0x9e, 0xc4, 0xd0, 0x10, 0x4c, 0x3a, 0xa0, + 0x78, 0xce, 0xc2, 0x35, 0x98, 0x66, 0x38, 0x63, 0xa6, 0x99, 0xf6, 0xc4, 0xa9, 0x15, 0x85, 0x83, + 0x1b, 0xeb, 0x1b, 0x11, 0xc0, 0x16, 0xe2, 0x3a, 0x08, 0xa3, 0x15, 0x2f, 0xb1, 0x26, 0x97, 0x21, + 0xef, 0x9d, 0xd9, 0xbe, 0xfe, 0xb2, 0x56, 0x16, 0x0c, 0x09, 0x56, 0xf5, 0xff, 0xe4, 0xa0, 0x7a, + 0x11, 0x8a, 0x7d, 0x04, 0xb9, 0x09, 0xdf, 0x25, 0x12, 0xec, 0x7f, 0xc8, 0x81, 0xb4, 0x49, 0x26, + 0x31, 0xff, 0x35, 0x93, 0xd8, 0x84, 0x92, 0xcd, 0x3c, 0x9f, 0x8d, 0x25, 0x23, 0x32, 0x17, 0xe4, + 0x14, 0x48, 0xa3, 0x75, 0x4a, 0x65, 0xbf, 0x16, 0xa5, 0x9e, 0x40, 0x35, 0x0a, 0x49, 0x73, 0x75, + 0x7b, 0x1a, 0x72, 0xf3, 0xfe, 0xab, 0x22, 0x69, 0xa8, 0xa1, 0x1d, 0xe5, 0x66, 0xb4, 0xc2, 0x12, + 0x6b, 0xd2, 0x06, 0x70, 0x6c, 0xe6, 0x4c, 0xf0, 0x7a, 0x19, 0x16, 0xf2, 0x64, 0x73, 0x96, 0x7a, + 0x1c, 0xb2, 0x96, 0x25, 0x47, 0x4a, 0x0d, 0x8b, 0xfc, 0x78, 0x49, 0xb5, 0xad, 0x73, 0x98, 0x72, + 0x24, 0x2f, 0xd9, 0x1a, 0xdb, 0x46, 0x50, 0x71, 0x19, 0xe7, 0x3d, 0xa6, 0x58, 0xee, 0xac, 0x28, + 0x82, 0x68, 0xbc, 0x72, 0x67, 0x34, 0x30, 0x93, 0x1b, 0xdb, 0x76, 0xe3, 0x4b, 0xf2, 0x16, 0x44, + 0x02, 0x4d, 0xd0, 0x0a, 0x44, 0x15, 0x2a, 0x87, 0xc2, 0x63, 0x94, 0xed, 0x3d, 0x84, 0x4a, 0x32, + 0x3d, 0x64, 0x17, 0x72, 0x9e, 0xaf, 0xbb, 0xbe, 0x60, 0x61, 0x8e, 0xca, 0x05, 0x51, 0x20, 0x83, + 0x45, 0x46, 0x54, 0xb9, 0x1c, 0xe5, 0x3f, 0xf7, 0x3e, 0x84, 0xed, 0xc4, 0xe3, 0x2f, 0x6a, 0x58, + 0xff, 0x6d, 0x1e, 0x76, 0x37, 0x71, 0x6e, 0x23, 0xfd, 0xf1, 0xfa, 0x20, 0x03, 0x4e, 0x98, 0x8b, + 0xbc, 0xe3, 0x1e, 0x82, 0x15, 0x32, 0x2a, 0x67, 0xe9, 0x27, 0xcc, 0x42, 0x36, 0xa5, 0xee, 0x56, + 0x1e, 0xbc, 0x7b, 0x21, 0x56, 0x37, 0xba, 0xdc, 0x84, 0x4a, 0x4b, 0xf2, 0x31, 0x64, 0x83, 0x12, + 0xc7, 0x3d, 0xdc, 0xbb, 0x98, 0x07, 0xce, 0x45, 0x2a, 0xec, 0xc8, 0xeb, 0x50, 0xe4, 0xff, 0x65, + 0x6e, 0xf3, 0x22, 0xe6, 0x02, 0x17, 0xf0, 0xbc, 0x92, 0x3d, 0x28, 0x08, 0x9a, 0x8d, 0x59, 0xd8, + 0x1a, 0xa2, 0x35, 0x3f, 0x98, 0x31, 0x9b, 0xe8, 0x0b, 0xcb, 0xd7, 0x9e, 0xeb, 0xd6, 0x82, 0x09, + 0xc2, 0xe0, 0xc1, 0x04, 0xc2, 0x9f, 0x73, 0x19, 0xb9, 0x01, 0x25, 0xc9, 0x4a, 0x13, 0x6d, 0x5e, + 0x8a, 0xea, 0x93, 0xa3, 0x92, 0xa8, 0x1d, 0x2e, 0xe1, 0x8f, 0x7f, 0xea, 0xe1, 0x5d, 0x08, 0x8e, + 0x56, 0x3c, 0x82, 0x0b, 0xc4, 0xe3, 0x3f, 0x5c, 0x2d, 0x7c, 0xd7, 0x37, 0x6f, 0x6f, 0x95, 0x8b, + 0xf5, 0x3f, 0xa7, 0x21, 0x2b, 0xee, 0x5b, 0x15, 0x4a, 0xc3, 0x4f, 0xfb, 0xaa, 0xd6, 0xee, 0x8d, + 0x0e, 0xba, 0xaa, 0x92, 0x22, 0x15, 0x00, 0x21, 0x78, 0xd4, 0xed, 0x35, 0x87, 0x4a, 0x3a, 0x5a, + 0x77, 0x8e, 0x87, 0x1f, 0xfc, 0x48, 0xc9, 0x44, 0x06, 0x23, 0x29, 0xc8, 0xc6, 0x01, 0x3f, 0x7c, + 0xa0, 0xe4, 0x90, 0x09, 0x65, 0xe9, 0xa0, 0xf3, 0x44, 0x6d, 0x23, 0x22, 0x9f, 0x94, 0x20, 0x66, + 0x8b, 0x6c, 0x43, 0x51, 0x48, 0x0e, 0x7a, 0xbd, 0xae, 0x52, 0x88, 0x7c, 0x0e, 0x86, 0xb4, 0x73, + 0x7c, 0xa8, 0x14, 0x23, 0x9f, 0x87, 0xb4, 0x37, 0xea, 0x2b, 0x10, 0x79, 0x38, 0x52, 0x07, 0x83, + 0xe6, 0xa1, 0xaa, 0x94, 0x22, 0xc4, 0xc1, 0xa7, 0x43, 0x75, 0xa0, 0x94, 0x13, 0x61, 0xe1, 0x23, + 0xb6, 0xa3, 0x47, 0xa8, 0xc7, 0xa3, 0x23, 0xa5, 0x42, 0x76, 0x60, 0x5b, 0x3e, 0x22, 0x0c, 0xa2, + 0xba, 0x22, 0xc2, 0x48, 0x95, 0x65, 0x20, 0xd2, 0xcb, 0x4e, 0x42, 0x80, 0x08, 0x52, 0x6f, 0x41, + 0x4e, 0xb0, 0x0b, 0x59, 0x5c, 0xe9, 0x36, 0x0f, 0xd4, 0xae, 0xd6, 0xeb, 0x0f, 0x3b, 0xbd, 0xe3, + 0x66, 0x17, 0x73, 0x17, 0xc9, 0xa8, 0xfa, 0xb3, 0x51, 0x87, 0xaa, 0x6d, 0xcc, 0x5f, 0x4c, 0xd6, + 0x57, 0x9b, 0x43, 0x94, 0x65, 0xea, 0xf7, 0x60, 0x77, 0x53, 0x9d, 0xd9, 0x74, 0x33, 0xea, 0x5f, + 0xa4, 0xe0, 0xd2, 0x86, 0x92, 0xb9, 0xf1, 0x16, 0xfd, 0x14, 0x72, 0x92, 0x69, 0xb2, 0x89, 0xbc, + 0xb3, 0xb1, 0xf6, 0x0a, 0xde, 0xad, 0x35, 0x12, 0x61, 0x17, 0x6f, 0xa4, 0x99, 0x73, 0x1a, 0x29, + 0x77, 0xb1, 0x46, 0xa7, 0x5f, 0xa7, 0xa0, 0x76, 0x9e, 0xef, 0x57, 0xdc, 0xf7, 0x74, 0xe2, 0xbe, + 0x7f, 0xb4, 0x1a, 0xc0, 0xcd, 0xf3, 0xf7, 0xb0, 0x16, 0xc5, 0x97, 0x29, 0xb8, 0xbc, 0x79, 0xde, + 0xd8, 0x18, 0xc3, 0xc7, 0x90, 0x9f, 0x31, 0xff, 0xd4, 0x09, 0x7b, 0xee, 0xdb, 0x1b, 0x2a, 0x39, + 0x57, 0xaf, 0xe6, 0x2a, 0xb0, 0x8a, 0xb7, 0x82, 0xcc, 0x79, 0x43, 0x83, 0x8c, 0x66, 0x2d, 0xd2, + 0xdf, 0xa4, 0xe1, 0xb5, 0x8d, 0xce, 0x37, 0x06, 0x7a, 0x1d, 0xc0, 0xb4, 0xe7, 0x0b, 0x5f, 0xf6, + 0x55, 0x59, 0x66, 0x8a, 0x42, 0x22, 0xae, 0x30, 0x2f, 0x21, 0x0b, 0x3f, 0xd2, 0x67, 0x84, 0x1e, + 0xa4, 0x48, 0x00, 0x1e, 0x2e, 0x03, 0xcd, 0x8a, 0x40, 0xdf, 0x38, 0x67, 0xa7, 0x6b, 0x2d, 0xeb, + 0x3d, 0x50, 0x0c, 0xcb, 0x64, 0xb6, 0xaf, 0x79, 0xbe, 0xcb, 0xf4, 0x99, 0x69, 0x4f, 0x45, 0x1d, + 0x2d, 0xec, 0xe7, 0x26, 0xba, 0xe5, 0x31, 0x5a, 0x95, 0xea, 0x41, 0xa8, 0xe5, 0x16, 0xa2, 0x59, + 0xb8, 0x31, 0x8b, 0x7c, 0xc2, 0x42, 0xaa, 0x23, 0x8b, 0xfa, 0xdf, 0xb7, 0xa0, 0x14, 0x9b, 0xce, + 0xc8, 0x4d, 0x28, 0x3f, 0xd5, 0x9f, 0xeb, 0x5a, 0x38, 0x71, 0xcb, 0x4c, 0x94, 0xb8, 0xac, 0x1f, + 0x4c, 0xdd, 0xef, 0xc1, 0xae, 0x80, 0xe0, 0x1e, 0xf1, 0x41, 0x86, 0xa5, 0x7b, 0x9e, 0x48, 0x5a, + 0x41, 0x40, 0x09, 0xd7, 0xf5, 0xb8, 0xaa, 0x15, 0x6a, 0xc8, 0xfb, 0x70, 0x49, 0x58, 0xcc, 0xb0, + 0xf0, 0x9a, 0x73, 0x8b, 0x69, 0xfc, 0x1d, 0xc0, 0x13, 0xf5, 0x34, 0x8a, 0x6c, 0x87, 0x23, 0x8e, + 0x02, 0x00, 0x8f, 0xc8, 0x23, 0x87, 0x70, 0x5d, 0x98, 0x4d, 0x99, 0xcd, 0x5c, 0xdd, 0x67, 0x1a, + 0xfb, 0xe5, 0x02, 0xb1, 0x9a, 0x6e, 0x8f, 0xb5, 0x53, 0xdd, 0x3b, 0xad, 0xed, 0xc6, 0x1d, 0x5c, + 0xe5, 0xd8, 0xc3, 0x00, 0xaa, 0x0a, 0x64, 0xd3, 0x1e, 0x7f, 0x82, 0x38, 0xb2, 0x0f, 0x97, 0x85, + 0x23, 0x4c, 0x0a, 0xee, 0x59, 0x33, 0x4e, 0x99, 0xf1, 0x4c, 0x5b, 0xf8, 0x93, 0x87, 0xb5, 0xd7, + 0xe3, 0x1e, 0x44, 0x90, 0x03, 0x81, 0x69, 0x71, 0xc8, 0x08, 0x11, 0x64, 0x00, 0x65, 0x7e, 0x1e, + 0x33, 0xf3, 0x73, 0x0c, 0xdb, 0x71, 0x45, 0x8f, 0xa8, 0x6c, 0xb8, 0xdc, 0xb1, 0x24, 0x36, 0x7a, + 0x81, 0xc1, 0x11, 0xce, 0xa7, 0xfb, 0xb9, 0x41, 0x5f, 0x55, 0xdb, 0xb4, 0x14, 0x7a, 0x79, 0xe4, + 0xb8, 0x9c, 0x53, 0x53, 0x27, 0xca, 0x71, 0x49, 0x72, 0x6a, 0xea, 0x84, 0x19, 0xc6, 0x7c, 0x19, + 0x86, 0xdc, 0x36, 0xbe, 0xbb, 0x04, 0xc3, 0xba, 0x57, 0x53, 0x12, 0xf9, 0x32, 0x8c, 0x43, 0x09, + 0x08, 0x68, 0xee, 0xe1, 0x95, 0x78, 0x6d, 0x99, 0xaf, 0xb8, 0xe1, 0xce, 0xda, 0x2e, 0x57, 0x4d, + 0xf1, 0x89, 0xf3, 0xb3, 0x75, 0x43, 0x92, 0x78, 0xe2, 0xfc, 0x6c, 0xd5, 0xec, 0xb6, 0x78, 0x01, + 0x73, 0x99, 0x81, 0x29, 0x1f, 0xd7, 0xae, 0xc4, 0xd1, 0x31, 0x05, 0xb9, 0x8f, 0x44, 0x36, 0x34, + 0x66, 0xeb, 0x27, 0x78, 0xf6, 0xba, 0x8b, 0x3f, 0xbc, 0xda, 0x8d, 0x38, 0xb8, 0x62, 0x18, 0xaa, + 0xd0, 0x36, 0x85, 0x92, 0xdc, 0x83, 0x1d, 0xe7, 0xe4, 0xa9, 0x21, 0xc9, 0xa5, 0xa1, 0x9f, 0x89, + 0xf9, 0xb2, 0x76, 0x4b, 0xa4, 0xa9, 0xca, 0x15, 0x82, 0x5a, 0x7d, 0x21, 0x26, 0xef, 0xa0, 0x73, + 0xef, 0x54, 0x77, 0xe7, 0xa2, 0x49, 0x7b, 0x98, 0x54, 0x56, 0xbb, 0x2d, 0xa1, 0x52, 0x7e, 0x1c, + 0x8a, 0x89, 0x0a, 0x37, 0xf8, 0xe6, 0x6d, 0xdd, 0x76, 0xb4, 0x85, 0xc7, 0xb4, 0x65, 0x88, 0xd1, + 0x59, 0xbc, 0xcd, 0xc3, 0xa2, 0xd7, 0x42, 0xd8, 0xc8, 0xc3, 0x62, 0x16, 0x82, 0xc2, 0xe3, 0x79, + 0x02, 0xbb, 0x0b, 0xdb, 0xb4, 0x91, 0xe2, 0xa8, 0xe1, 0xc6, 0xf2, 0xc2, 0xd6, 0xfe, 0xb5, 0x75, + 0xce, 0xd0, 0x3d, 0x8a, 0xa3, 0x25, 0x49, 0xe8, 0xa5, 0xc5, 0xba, 0xb0, 0xbe, 0x0f, 0xe5, 0x38, + 0x77, 0x48, 0x11, 0x24, 0x7b, 0xb0, 0xbb, 0x61, 0x47, 0x6d, 0xf5, 0xda, 0xbc, 0x17, 0x7e, 0xa6, + 0x62, 0x63, 0xc3, 0x9e, 0xdc, 0xed, 0x0c, 0x55, 0x8d, 0x8e, 0x8e, 0x87, 0x9d, 0x23, 0x55, 0xc9, + 0xdc, 0x2b, 0x16, 0xfe, 0xbd, 0xa5, 0xfc, 0x0a, 0xff, 0xd2, 0xf5, 0xbf, 0xa6, 0xa1, 0x92, 0x9c, + 0x83, 0xc9, 0x4f, 0xe0, 0x4a, 0xf8, 0xd2, 0xea, 0x31, 0x5f, 0x7b, 0x61, 0xba, 0x82, 0xce, 0x33, + 0x5d, 0x4e, 0x92, 0xd1, 0x49, 0xec, 0x06, 0x28, 0x7c, 0xbd, 0xff, 0x05, 0x62, 0x1e, 0x09, 0x08, + 0xe9, 0xc2, 0x0d, 0x4c, 0x19, 0xce, 0x9a, 0xf6, 0x58, 0x77, 0xc7, 0xda, 0xf2, 0x73, 0x81, 0xa6, + 0x1b, 0xc8, 0x03, 0xcf, 0x91, 0x9d, 0x24, 0xf2, 0x72, 0xcd, 0x76, 0x06, 0x01, 0x78, 0x59, 0x62, + 0x9b, 0x01, 0x74, 0x85, 0x35, 0x99, 0xf3, 0x58, 0x83, 0xb3, 0xd7, 0x4c, 0x9f, 0x23, 0x6d, 0x7c, + 0xf7, 0x4c, 0x4c, 0x6f, 0x05, 0x5a, 0x40, 0x81, 0xca, 0xd7, 0xdf, 0xdc, 0x19, 0xc4, 0xf3, 0xf8, + 0x8f, 0x0c, 0x94, 0xe3, 0x13, 0x1c, 0x1f, 0x88, 0x0d, 0x51, 0xe6, 0x53, 0xa2, 0x0a, 0xbc, 0xf5, + 0x95, 0xf3, 0x5e, 0xa3, 0xc5, 0xeb, 0xff, 0x7e, 0x5e, 0xce, 0x55, 0x54, 0x5a, 0xf2, 0xde, 0xcb, + 0xb9, 0xc6, 0xe4, 0xb4, 0x5e, 0xa0, 0xc1, 0x0a, 0x8b, 0x5d, 0xfe, 0xa9, 0x27, 0x7c, 0xe7, 0x85, + 0xef, 0x5b, 0x5f, 0xed, 0xfb, 0xf1, 0x40, 0x38, 0x2f, 0x3e, 0x1e, 0x68, 0xc7, 0x3d, 0x7a, 0xd4, + 0xec, 0xd2, 0xc0, 0x9c, 0x5c, 0x85, 0xac, 0xa5, 0x7f, 0x7e, 0x96, 0xec, 0x14, 0x42, 0x74, 0xd1, + 0xc4, 0xa3, 0x07, 0xfe, 0xc9, 0x23, 0x59, 0x9f, 0x85, 0xe8, 0x1b, 0xa4, 0xfe, 0x7d, 0xc8, 0x89, + 0x7c, 0x11, 0x80, 0x20, 0x63, 0xca, 0xf7, 0x48, 0x01, 0xb2, 0xad, 0x1e, 0xe5, 0xf4, 0x47, 0xbe, + 0x4b, 0xa9, 0xd6, 0xef, 0xa8, 0x2d, 0xbc, 0x01, 0xf5, 0xf7, 0x21, 0x2f, 0x93, 0xc0, 0xaf, 0x46, + 0x94, 0x06, 0x34, 0x92, 0xcb, 0xc0, 0x47, 0x2a, 0xd4, 0x8e, 0x8e, 0x0e, 0x54, 0xaa, 0xa4, 0xe3, + 0xc7, 0xfb, 0x97, 0x14, 0x94, 0x62, 0x03, 0x15, 0x6f, 0xe5, 0xba, 0x65, 0x39, 0x2f, 0x34, 0xdd, + 0x32, 0xb1, 0x42, 0xc9, 0xf3, 0x01, 0x21, 0x6a, 0x72, 0xc9, 0x45, 0xf3, 0xf7, 0x7f, 0xe1, 0xe6, + 0x1f, 0x53, 0xa0, 0xac, 0x0e, 0x63, 0x2b, 0x01, 0xa6, 0xbe, 0xd5, 0x00, 0xff, 0x90, 0x82, 0x4a, + 0x72, 0x02, 0x5b, 0x09, 0xef, 0xe6, 0xb7, 0x1a, 0xde, 0xef, 0x53, 0xb0, 0x9d, 0x98, 0xbb, 0xbe, + 0x53, 0xd1, 0xfd, 0x2e, 0x03, 0x97, 0x36, 0xd8, 0x61, 0x01, 0x92, 0x03, 0xaa, 0x9c, 0x99, 0x7f, + 0x70, 0x91, 0x67, 0x35, 0x78, 0xff, 0xeb, 0xeb, 0xae, 0x1f, 0xcc, 0xb3, 0xd8, 0x2f, 0xcd, 0x31, + 0x16, 0x55, 0x73, 0x62, 0xe2, 0xf8, 0x26, 0xdf, 0x58, 0xe4, 0xd4, 0x5a, 0x5d, 0xca, 0xe5, 0xeb, + 0xf1, 0xf7, 0x81, 0xcc, 0x1d, 0xcf, 0xf4, 0xcd, 0xe7, 0xfc, 0xf3, 0x5c, 0xf8, 0x22, 0xcd, 0xa7, + 0xd8, 0x2c, 0x55, 0x42, 0x4d, 0xc7, 0xf6, 0x23, 0xb4, 0xcd, 0xa6, 0xfa, 0x0a, 0x9a, 0x97, 0xa1, + 0x0c, 0x55, 0x42, 0x4d, 0x84, 0xc6, 0x41, 0x73, 0xec, 0x2c, 0xf8, 0x40, 0x20, 0x71, 0xbc, 0xea, + 0xa5, 0x68, 0x49, 0xca, 0x22, 0x48, 0x30, 0xb1, 0x2d, 0xdf, 0xe0, 0xcb, 0xb4, 0x24, 0x65, 0x12, + 0x72, 0x07, 0xaa, 0xfa, 0x74, 0xea, 0x72, 0xe7, 0xa1, 0x23, 0x39, 0x86, 0x56, 0x22, 0xb1, 0x00, + 0xee, 0x3d, 0x86, 0x42, 0x98, 0x07, 0xde, 0x58, 0x78, 0x26, 0xb0, 0xe7, 0x8b, 0xef, 0x28, 0x69, + 0xfe, 0x52, 0x6f, 0x87, 0x4a, 0x7c, 0xa8, 0xe9, 0x69, 0xcb, 0x0f, 0x7a, 0x69, 0xd4, 0x17, 0x68, + 0xc9, 0xf4, 0xa2, 0x2f, 0x38, 0xf5, 0x2f, 0xb1, 0xbd, 0x26, 0x3f, 0x48, 0x92, 0x36, 0x14, 0x2c, + 0x07, 0xf9, 0xc1, 0x2d, 0xe4, 0xd7, 0xf0, 0xbb, 0xaf, 0xf8, 0x86, 0xd9, 0xe8, 0x06, 0x78, 0x1a, + 0x59, 0xee, 0xfd, 0x2d, 0x05, 0x85, 0x50, 0x8c, 0x8d, 0x22, 0x3b, 0xd7, 0xfd, 0x53, 0xe1, 0x2e, + 0x77, 0x90, 0x56, 0x52, 0x54, 0xac, 0xb9, 0x1c, 0xa7, 0x19, 0x5b, 0x50, 0x20, 0x90, 0xf3, 0x35, + 0x3f, 0x57, 0x8b, 0xe9, 0x63, 0x31, 0xe0, 0x3a, 0xb3, 0x19, 0x9e, 0xa4, 0x17, 0x9e, 0x6b, 0x20, + 0x6f, 0x05, 0x62, 0xfe, 0x5d, 0xdc, 0x77, 0x75, 0xd3, 0x4a, 0x60, 0xb3, 0x02, 0xab, 0x84, 0x8a, + 0x08, 0xbc, 0x0f, 0x57, 0x43, 0xbf, 0x63, 0xe6, 0xeb, 0x38, 0x3c, 0x8f, 0x97, 0x46, 0x79, 0xf1, + 0xb5, 0xeb, 0x4a, 0x00, 0x68, 0x07, 0xfa, 0xd0, 0xf6, 0xe0, 0x09, 0x0e, 0xb2, 0xce, 0x6c, 0x35, + 0x13, 0x07, 0xca, 0xca, 0x7b, 0x97, 0xf7, 0x49, 0xea, 0x33, 0x58, 0x0e, 0x15, 0x5f, 0xa4, 0x33, + 0x87, 0xfd, 0x83, 0x3f, 0xa5, 0xf7, 0x0e, 0xa5, 0x5d, 0x3f, 0xcc, 0x20, 0x65, 0x13, 0x8b, 0x19, + 0x3c, 0x3b, 0xff, 0x0d, 0x00, 0x00, 0xff, 0xff, 0x78, 0x42, 0x69, 0x71, 0xb3, 0x18, 0x00, 0x00, } diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/gostring.go similarity index 92% rename from vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go rename to vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/gostring.go index b175f555..76e2c95f 100644 --- a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/gostring.go @@ -1,48 +1,12 @@ -// Code generated by protoc-gen-gogo. -// source: descriptor.proto -// DO NOT EDIT! - -/* -Package descriptor is a generated protocol buffer package. - -It is generated from these files: - descriptor.proto - -It has these top-level messages: - FileDescriptorSet - FileDescriptorProto - DescriptorProto - FieldDescriptorProto - OneofDescriptorProto - EnumDescriptorProto - EnumValueDescriptorProto - ServiceDescriptorProto - MethodDescriptorProto - FileOptions - MessageOptions - FieldOptions - EnumOptions - EnumValueOptions - ServiceOptions - MethodOptions - UninterpretedOption - SourceCodeInfo -*/ package descriptor import fmt "fmt" + import strings "strings" import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto" import sort "sort" import strconv "strconv" import reflect "reflect" -import proto "github.com/gogo/protobuf/proto" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf func (this *FileDescriptorSet) GoString() string { if this == nil { @@ -389,7 +353,9 @@ func (this *FileOptions) GoString() string { if this.UninterpretedOption != nil { s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") } - s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_extensions != nil { + s = append(s, "XXX_extensions: "+extensionToGoStringDescriptor(this.XXX_extensions)+",\n") + } if this.XXX_unrecognized != nil { s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") } @@ -417,7 +383,9 @@ func (this *MessageOptions) GoString() string { if this.UninterpretedOption != nil { s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") } - s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_extensions != nil { + s = append(s, "XXX_extensions: "+extensionToGoStringDescriptor(this.XXX_extensions)+",\n") + } if this.XXX_unrecognized != nil { s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") } @@ -451,7 +419,9 @@ func (this *FieldOptions) GoString() string { if this.UninterpretedOption != nil { s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") } - s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_extensions != nil { + s = append(s, "XXX_extensions: "+extensionToGoStringDescriptor(this.XXX_extensions)+",\n") + } if this.XXX_unrecognized != nil { s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") } @@ -473,7 +443,9 @@ func (this *EnumOptions) GoString() string { if this.UninterpretedOption != nil { s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") } - s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_extensions != nil { + s = append(s, "XXX_extensions: "+extensionToGoStringDescriptor(this.XXX_extensions)+",\n") + } if this.XXX_unrecognized != nil { s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") } @@ -492,7 +464,9 @@ func (this *EnumValueOptions) GoString() string { if this.UninterpretedOption != nil { s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") } - s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_extensions != nil { + s = append(s, "XXX_extensions: "+extensionToGoStringDescriptor(this.XXX_extensions)+",\n") + } if this.XXX_unrecognized != nil { s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") } @@ -511,7 +485,9 @@ func (this *ServiceOptions) GoString() string { if this.UninterpretedOption != nil { s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") } - s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_extensions != nil { + s = append(s, "XXX_extensions: "+extensionToGoStringDescriptor(this.XXX_extensions)+",\n") + } if this.XXX_unrecognized != nil { s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") } @@ -530,7 +506,9 @@ func (this *MethodOptions) GoString() string { if this.UninterpretedOption != nil { s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") } - s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_extensions != nil { + s = append(s, "XXX_extensions: "+extensionToGoStringDescriptor(this.XXX_extensions)+",\n") + } if this.XXX_unrecognized != nil { s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") } @@ -638,12 +616,11 @@ func valueToGoStringDescriptor(v interface{}, typ string) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) } -func extensionToGoStringDescriptor(m github_com_gogo_protobuf_proto.Message) string { - e := github_com_gogo_protobuf_proto.GetUnsafeExtensionsMap(m) +func extensionToGoStringDescriptor(e map[int32]github_com_gogo_protobuf_proto.Extension) string { if e == nil { return "nil" } - s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{" + s := "map[int32]proto.Extension{" keys := make([]int, 0, len(e)) for k := range e { keys = append(keys, int(k)) @@ -653,6 +630,6 @@ func extensionToGoStringDescriptor(m github_com_gogo_protobuf_proto.Message) str for _, k := range keys { ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString()) } - s += strings.Join(ss, ",") + "})" + s += strings.Join(ss, ",") + "}" return s } diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go index 861f4d02..ab170f91 100644 --- a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go @@ -1,6 +1,4 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. // http://github.com/gogo/protobuf // // Redistribution and use in source and binary forms, with or without diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go b/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go index 38f86c5f..6308548c 100644 --- a/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go +++ b/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go @@ -510,63 +510,41 @@ func (m *Marshaler) marshalValue(out *errWriter, prop *proto.Properties, v refle return out.err } -// Unmarshaler is a configurable object for converting from a JSON -// representation to a protocol buffer object. -type Unmarshaler struct { - // Whether to allow messages to contain unknown fields, as opposed to - // failing to unmarshal. - AllowUnknownFields bool -} - -// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream. -// This function is lenient and will decode any options permutations of the -// related Marshaler. -func (u *Unmarshaler) UnmarshalNext(dec *json.Decoder, pb proto.Message) error { - inputValue := json.RawMessage{} - if err := dec.Decode(&inputValue); err != nil { - return err - } - return u.unmarshalValue(reflect.ValueOf(pb).Elem(), inputValue, nil) -} - -// Unmarshal unmarshals a JSON object stream into a protocol -// buffer. This function is lenient and will decode any options -// permutations of the related Marshaler. -func (u *Unmarshaler) Unmarshal(r io.Reader, pb proto.Message) error { - dec := json.NewDecoder(r) - return u.UnmarshalNext(dec, pb) -} - // UnmarshalNext unmarshals the next protocol buffer from a JSON object stream. // This function is lenient and will decode any options permutations of the // related Marshaler. func UnmarshalNext(dec *json.Decoder, pb proto.Message) error { - return new(Unmarshaler).UnmarshalNext(dec, pb) + inputValue := json.RawMessage{} + if err := dec.Decode(&inputValue); err != nil { + return err + } + return unmarshalValue(reflect.ValueOf(pb).Elem(), inputValue, nil) } // Unmarshal unmarshals a JSON object stream into a protocol // buffer. This function is lenient and will decode any options // permutations of the related Marshaler. func Unmarshal(r io.Reader, pb proto.Message) error { - return new(Unmarshaler).Unmarshal(r, pb) + dec := json.NewDecoder(r) + return UnmarshalNext(dec, pb) } // UnmarshalString will populate the fields of a protocol buffer based // on a JSON string. This function is lenient and will decode any options // permutations of the related Marshaler. func UnmarshalString(str string, pb proto.Message) error { - return new(Unmarshaler).Unmarshal(strings.NewReader(str), pb) + return Unmarshal(strings.NewReader(str), pb) } // unmarshalValue converts/copies a value into the target. // prop may be nil. -func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMessage, prop *proto.Properties) error { +func unmarshalValue(target reflect.Value, inputValue json.RawMessage, prop *proto.Properties) error { targetType := target.Type() // Allocate memory for pointer fields. if targetType.Kind() == reflect.Ptr { target.Set(reflect.New(targetType.Elem())) - return u.unmarshalValue(target.Elem(), inputValue, prop) + return unmarshalValue(target.Elem(), inputValue, prop) } // Handle well-known types. @@ -581,7 +559,7 @@ func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMe // as the wrapped primitive type, except that null is allowed." // encoding/json will turn JSON `null` into Go `nil`, // so we don't have to do any extra work. - return u.unmarshalValue(target.Field(0), inputValue, prop) + return unmarshalValue(target.Field(0), inputValue, prop) case "Any": return fmt.Errorf("unmarshaling Any not supported yet") case "Duration": @@ -679,7 +657,7 @@ func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMe continue } - if err := u.unmarshalValue(target.Field(i), valueForField, sprops.Prop[i]); err != nil { + if err := unmarshalValue(target.Field(i), valueForField, sprops.Prop[i]); err != nil { return err } } @@ -692,12 +670,12 @@ func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMe } nv := reflect.New(oop.Type.Elem()) target.Field(oop.Field).Set(nv) - if err := u.unmarshalValue(nv.Elem().Field(0), raw, oop.Prop); err != nil { + if err := unmarshalValue(nv.Elem().Field(0), raw, oop.Prop); err != nil { return err } } } - if !u.AllowUnknownFields && len(jsonFields) > 0 { + if len(jsonFields) > 0 { // Pick any field to be the scapegoat. var f string for fname := range jsonFields { @@ -718,7 +696,7 @@ func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMe len := len(slc) target.Set(reflect.MakeSlice(targetType, len, len)) for i := 0; i < len; i++ { - if err := u.unmarshalValue(target.Index(i), slc[i], prop); err != nil { + if err := unmarshalValue(target.Index(i), slc[i], prop); err != nil { return err } } @@ -747,14 +725,14 @@ func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMe k = reflect.ValueOf(ks) } else { k = reflect.New(targetType.Key()).Elem() - if err := u.unmarshalValue(k, json.RawMessage(ks), keyprop); err != nil { + if err := unmarshalValue(k, json.RawMessage(ks), keyprop); err != nil { return err } } // Unmarshal map value. v := reflect.New(targetType.Elem()).Elem() - if err := u.unmarshalValue(v, raw, valprop); err != nil { + if err := unmarshalValue(v, raw, valprop); err != nil { return err } target.SetMapIndex(k, v) @@ -812,21 +790,10 @@ func (w *errWriter) write(str string) { // The easiest way to sort them in some deterministic order is to use fmt. // If this turns out to be inefficient we can always consider other options, // such as doing a Schwartzian transform. -// -// Numeric keys are sorted in numeric order per -// https://developers.google.com/protocol-buffers/docs/proto#maps. type mapKeys []reflect.Value func (s mapKeys) Len() int { return len(s) } func (s mapKeys) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s mapKeys) Less(i, j int) bool { - if k := s[i].Kind(); k == s[j].Kind() { - switch k { - case reflect.Int32, reflect.Int64: - return s[i].Int() < s[j].Int() - case reflect.Uint32, reflect.Uint64: - return s[i].Uint() < s[j].Uint() - } - } return fmt.Sprint(s[i].Interface()) < fmt.Sprint(s[j].Interface()) } diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go index 04dcb881..07288a25 100644 --- a/vendor/github.com/golang/protobuf/proto/decode.go +++ b/vendor/github.com/golang/protobuf/proto/decode.go @@ -378,11 +378,6 @@ func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group wire := int(u & 0x7) if wire == WireEndGroup { if is_group { - if required > 0 { - // Not enough information to determine the exact field. - // (See below.) - return &RequiredNotSetError{"{Unknown}"} - } return nil // input is satisfied } return fmt.Errorf("proto: %s: wiretype end group for non-group", st) diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go index 6b9b3637..9f484f53 100644 --- a/vendor/github.com/golang/protobuf/proto/extensions.go +++ b/vendor/github.com/golang/protobuf/proto/extensions.go @@ -489,37 +489,6 @@ func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, e return } -// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order. -// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing -// just the Field field, which defines the extension's field number. -func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { - epb, ok := extendable(pb) - if !ok { - return nil, fmt.Errorf("proto: %T is not an extendable proto.Message", pb) - } - registeredExtensions := RegisteredExtensions(pb) - - emap, mu := epb.extensionsRead() - if emap == nil { - return nil, nil - } - mu.Lock() - defer mu.Unlock() - extensions := make([]*ExtensionDesc, 0, len(emap)) - for extid, e := range emap { - desc := e.desc - if desc == nil { - desc = registeredExtensions[extid] - if desc == nil { - desc = &ExtensionDesc{Field: extid} - } - } - - extensions = append(extensions, desc) - } - return extensions, nil -} - // SetExtension sets the specified extension of pb to the specified value. func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { epb, ok := extendable(pb) diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go index ac4ddbc0..170b8e87 100644 --- a/vendor/github.com/golang/protobuf/proto/lib.go +++ b/vendor/github.com/golang/protobuf/proto/lib.go @@ -308,7 +308,7 @@ func GetStats() Stats { return stats } // temporary Buffer and are fine for most applications. type Buffer struct { buf []byte // encode/decode byte stream - index int // read point + index int // write point // pools of basic types to amortize allocation. bools []bool diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go index 965876bf..8214ce32 100644 --- a/vendor/github.com/golang/protobuf/proto/text.go +++ b/vendor/github.com/golang/protobuf/proto/text.go @@ -154,7 +154,7 @@ func (w *textWriter) indent() { w.ind++ } func (w *textWriter) unindent() { if w.ind == 0 { - log.Print("proto: textWriter unindented too far") + log.Printf("proto: textWriter unindented too far") return } w.ind-- diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go index 7e6f145a..0b8c59f7 100644 --- a/vendor/github.com/golang/protobuf/proto/text_parser.go +++ b/vendor/github.com/golang/protobuf/proto/text_parser.go @@ -44,9 +44,6 @@ import ( "unicode/utf8" ) -// Error string emitted when deserializing Any and fields are already set -const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set" - type ParseError struct { Message string Line int // 1-based line number @@ -511,16 +508,8 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error { if err != nil { return p.errorf("failed to marshal message of type %q: %v", messageName, err) } - if fieldSet["type_url"] { - return p.errorf(anyRepeatedlyUnpacked, "type_url") - } - if fieldSet["value"] { - return p.errorf(anyRepeatedlyUnpacked, "value") - } sv.FieldByName("TypeUrl").SetString(extName) sv.FieldByName("Value").SetBytes(b) - fieldSet["type_url"] = true - fieldSet["value"] = true continue } diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/LICENSE b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/LICENSE new file mode 100644 index 00000000..b2b06503 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client.go b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client.go new file mode 100644 index 00000000..d9e87b2f --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client.go @@ -0,0 +1,72 @@ +// Copyright 2016 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +// gRPC Prometheus monitoring interceptors for client-side gRPC. + +package grpc_prometheus + +import ( + "io" + + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +// UnaryClientInterceptor is a gRPC client-side interceptor that provides Prometheus monitoring for Unary RPCs. +func UnaryClientInterceptor(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + monitor := newClientReporter(Unary, method) + monitor.SentMessage() + err := invoker(ctx, method, req, reply, cc, opts...) + if err != nil { + monitor.ReceivedMessage() + } + monitor.Handled(grpc.Code(err)) + return err +} + +// StreamServerInterceptor is a gRPC client-side interceptor that provides Prometheus monitoring for Streaming RPCs. +func StreamClientInterceptor(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { + monitor := newClientReporter(clientStreamType(desc), method) + clientStream, err := streamer(ctx, desc, cc, method, opts...) + if err != nil { + monitor.Handled(grpc.Code(err)) + return nil, err + } + return &monitoredClientStream{clientStream, monitor}, nil +} + +func clientStreamType(desc *grpc.StreamDesc) grpcType { + if desc.ClientStreams && !desc.ServerStreams { + return ClientStream + } else if !desc.ClientStreams && desc.ServerStreams { + return ServerStream + } + return BidiStream +} + +// monitoredClientStream wraps grpc.ClientStream allowing each Sent/Recv of message to increment counters. +type monitoredClientStream struct { + grpc.ClientStream + monitor *clientReporter +} + +func (s *monitoredClientStream) SendMsg(m interface{}) error { + err := s.ClientStream.SendMsg(m) + if err == nil { + s.monitor.SentMessage() + } + return err +} + +func (s *monitoredClientStream) RecvMsg(m interface{}) error { + err := s.ClientStream.RecvMsg(m) + if err == nil { + s.monitor.ReceivedMessage() + } else if err == io.EOF { + s.monitor.Handled(codes.OK) + } else { + s.monitor.Handled(grpc.Code(err)) + } + return err +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_reporter.go b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_reporter.go new file mode 100644 index 00000000..16b76155 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_reporter.go @@ -0,0 +1,111 @@ +// Copyright 2016 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +package grpc_prometheus + +import ( + "time" + + "google.golang.org/grpc/codes" + + prom "github.com/prometheus/client_golang/prometheus" +) + +var ( + clientStartedCounter = prom.NewCounterVec( + prom.CounterOpts{ + Namespace: "grpc", + Subsystem: "client", + Name: "started_total", + Help: "Total number of RPCs started on the client.", + }, []string{"grpc_type", "grpc_service", "grpc_method"}) + + clientHandledCounter = prom.NewCounterVec( + prom.CounterOpts{ + Namespace: "grpc", + Subsystem: "client", + Name: "handled_total", + Help: "Total number of RPCs completed by the client, regardless of success or failure.", + }, []string{"grpc_type", "grpc_service", "grpc_method", "grpc_code"}) + + clientStreamMsgReceived = prom.NewCounterVec( + prom.CounterOpts{ + Namespace: "grpc", + Subsystem: "client", + Name: "msg_received_total", + Help: "Total number of RPC stream messages received by the client.", + }, []string{"grpc_type", "grpc_service", "grpc_method"}) + + clientStreamMsgSent = prom.NewCounterVec( + prom.CounterOpts{ + Namespace: "grpc", + Subsystem: "client", + Name: "msg_sent_total", + Help: "Total number of gRPC stream messages sent by the client.", + }, []string{"grpc_type", "grpc_service", "grpc_method"}) + + clientHandledHistogramEnabled = false + clientHandledHistogramOpts = prom.HistogramOpts{ + Namespace: "grpc", + Subsystem: "client", + Name: "handling_seconds", + Help: "Histogram of response latency (seconds) of the gRPC until it is finished by the application.", + Buckets: prom.DefBuckets, + } + clientHandledHistogram *prom.HistogramVec +) + +func init() { + prom.MustRegister(clientStartedCounter) + prom.MustRegister(clientHandledCounter) + prom.MustRegister(clientStreamMsgReceived) + prom.MustRegister(clientStreamMsgSent) +} + +// EnableClientHandlingTimeHistogram turns on recording of handling time of RPCs. +// Histogram metrics can be very expensive for Prometheus to retain and query. +func EnableClientHandlingTimeHistogram(opts ...HistogramOption) { + for _, o := range opts { + o(&clientHandledHistogramOpts) + } + if !clientHandledHistogramEnabled { + clientHandledHistogram = prom.NewHistogramVec( + clientHandledHistogramOpts, + []string{"grpc_type", "grpc_service", "grpc_method"}, + ) + prom.Register(clientHandledHistogram) + } + clientHandledHistogramEnabled = true +} + +type clientReporter struct { + rpcType grpcType + serviceName string + methodName string + startTime time.Time +} + +func newClientReporter(rpcType grpcType, fullMethod string) *clientReporter { + r := &clientReporter{rpcType: rpcType} + if clientHandledHistogramEnabled { + r.startTime = time.Now() + } + r.serviceName, r.methodName = splitMethodName(fullMethod) + clientStartedCounter.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc() + return r +} + +func (r *clientReporter) ReceivedMessage() { + clientStreamMsgReceived.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc() +} + +func (r *clientReporter) SentMessage() { + clientStreamMsgSent.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc() +} + +func (r *clientReporter) Handled(code codes.Code) { + clientHandledCounter.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName, code.String()).Inc() + if clientHandledHistogramEnabled { + clientHandledHistogram.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Observe(time.Since(r.startTime).Seconds()) + } +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server.go b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server.go new file mode 100644 index 00000000..f85c8c23 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server.go @@ -0,0 +1,74 @@ +// Copyright 2016 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +// gRPC Prometheus monitoring interceptors for server-side gRPC. + +package grpc_prometheus + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc" +) + +// PreregisterServices takes a gRPC server and pre-initializes all counters to 0. +// This allows for easier monitoring in Prometheus (no missing metrics), and should be called *after* all services have +// been registered with the server. +func Register(server *grpc.Server) { + serviceInfo := server.GetServiceInfo() + for serviceName, info := range serviceInfo { + for _, mInfo := range info.Methods { + preRegisterMethod(serviceName, &mInfo) + } + } +} + +// UnaryServerInterceptor is a gRPC server-side interceptor that provides Prometheus monitoring for Unary RPCs. +func UnaryServerInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + monitor := newServerReporter(Unary, info.FullMethod) + monitor.ReceivedMessage() + resp, err := handler(ctx, req) + monitor.Handled(grpc.Code(err)) + if err == nil { + monitor.SentMessage() + } + return resp, err +} + +// StreamServerInterceptor is a gRPC server-side interceptor that provides Prometheus monitoring for Streaming RPCs. +func StreamServerInterceptor(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + monitor := newServerReporter(streamRpcType(info), info.FullMethod) + err := handler(srv, &monitoredServerStream{ss, monitor}) + monitor.Handled(grpc.Code(err)) + return err +} + +func streamRpcType(info *grpc.StreamServerInfo) grpcType { + if info.IsClientStream && !info.IsServerStream { + return ClientStream + } else if !info.IsClientStream && info.IsServerStream { + return ServerStream + } + return BidiStream +} + +// monitoredStream wraps grpc.ServerStream allowing each Sent/Recv of message to increment counters. +type monitoredServerStream struct { + grpc.ServerStream + monitor *serverReporter +} + +func (s *monitoredServerStream) SendMsg(m interface{}) error { + err := s.ServerStream.SendMsg(m) + if err == nil { + s.monitor.SentMessage() + } + return err +} + +func (s *monitoredServerStream) RecvMsg(m interface{}) error { + err := s.ServerStream.RecvMsg(m) + if err == nil { + s.monitor.ReceivedMessage() + } + return err +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_reporter.go b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_reporter.go new file mode 100644 index 00000000..628a8905 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_reporter.go @@ -0,0 +1,157 @@ +// Copyright 2016 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +package grpc_prometheus + +import ( + "time" + + "google.golang.org/grpc/codes" + + prom "github.com/prometheus/client_golang/prometheus" + "google.golang.org/grpc" +) + +type grpcType string + +const ( + Unary grpcType = "unary" + ClientStream grpcType = "client_stream" + ServerStream grpcType = "server_stream" + BidiStream grpcType = "bidi_stream" +) + +var ( + serverStartedCounter = prom.NewCounterVec( + prom.CounterOpts{ + Namespace: "grpc", + Subsystem: "server", + Name: "started_total", + Help: "Total number of RPCs started on the server.", + }, []string{"grpc_type", "grpc_service", "grpc_method"}) + + serverHandledCounter = prom.NewCounterVec( + prom.CounterOpts{ + Namespace: "grpc", + Subsystem: "server", + Name: "handled_total", + Help: "Total number of RPCs completed on the server, regardless of success or failure.", + }, []string{"grpc_type", "grpc_service", "grpc_method", "grpc_code"}) + + serverStreamMsgReceived = prom.NewCounterVec( + prom.CounterOpts{ + Namespace: "grpc", + Subsystem: "server", + Name: "msg_received_total", + Help: "Total number of RPC stream messages received on the server.", + }, []string{"grpc_type", "grpc_service", "grpc_method"}) + + serverStreamMsgSent = prom.NewCounterVec( + prom.CounterOpts{ + Namespace: "grpc", + Subsystem: "server", + Name: "msg_sent_total", + Help: "Total number of gRPC stream messages sent by the server.", + }, []string{"grpc_type", "grpc_service", "grpc_method"}) + + serverHandledHistogramEnabled = false + serverHandledHistogramOpts = prom.HistogramOpts{ + Namespace: "grpc", + Subsystem: "server", + Name: "handling_seconds", + Help: "Histogram of response latency (seconds) of gRPC that had been application-level handled by the server.", + Buckets: prom.DefBuckets, + } + serverHandledHistogram *prom.HistogramVec +) + +func init() { + prom.MustRegister(serverStartedCounter) + prom.MustRegister(serverHandledCounter) + prom.MustRegister(serverStreamMsgReceived) + prom.MustRegister(serverStreamMsgSent) +} + +type HistogramOption func(*prom.HistogramOpts) + +// WithHistogramBuckets allows you to specify custom bucket ranges for histograms if EnableHandlingTimeHistogram is on. +func WithHistogramBuckets(buckets []float64) HistogramOption { + return func(o *prom.HistogramOpts) { o.Buckets = buckets } +} + +// EnableHandlingTimeHistogram turns on recording of handling time of RPCs for server-side interceptors. +// Histogram metrics can be very expensive for Prometheus to retain and query. +func EnableHandlingTimeHistogram(opts ...HistogramOption) { + for _, o := range opts { + o(&serverHandledHistogramOpts) + } + if !serverHandledHistogramEnabled { + serverHandledHistogram = prom.NewHistogramVec( + serverHandledHistogramOpts, + []string{"grpc_type", "grpc_service", "grpc_method"}, + ) + prom.Register(serverHandledHistogram) + } + serverHandledHistogramEnabled = true +} + +type serverReporter struct { + rpcType grpcType + serviceName string + methodName string + startTime time.Time +} + +func newServerReporter(rpcType grpcType, fullMethod string) *serverReporter { + r := &serverReporter{rpcType: rpcType} + if serverHandledHistogramEnabled { + r.startTime = time.Now() + } + r.serviceName, r.methodName = splitMethodName(fullMethod) + serverStartedCounter.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc() + return r +} + +func (r *serverReporter) ReceivedMessage() { + serverStreamMsgReceived.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc() +} + +func (r *serverReporter) SentMessage() { + serverStreamMsgSent.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc() +} + +func (r *serverReporter) Handled(code codes.Code) { + serverHandledCounter.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName, code.String()).Inc() + if serverHandledHistogramEnabled { + serverHandledHistogram.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Observe(time.Since(r.startTime).Seconds()) + } +} + +// preRegisterMethod is invoked on Register of a Server, allowing all gRPC services labels to be pre-populated. +func preRegisterMethod(serviceName string, mInfo *grpc.MethodInfo) { + methodName := mInfo.Name + methodType := string(typeFromMethodInfo(mInfo)) + // These are just references (no increments), as just referencing will create the labels but not set values. + serverStartedCounter.GetMetricWithLabelValues(methodType, serviceName, methodName) + serverStreamMsgReceived.GetMetricWithLabelValues(methodType, serviceName, methodName) + serverStreamMsgSent.GetMetricWithLabelValues(methodType, serviceName, methodName) + if serverHandledHistogramEnabled { + serverHandledHistogram.GetMetricWithLabelValues(methodType, serviceName, methodName) + } + for _, code := range allCodes { + serverHandledCounter.GetMetricWithLabelValues(methodType, serviceName, methodName, code.String()) + } +} + +func typeFromMethodInfo(mInfo *grpc.MethodInfo) grpcType { + if mInfo.IsClientStream == false && mInfo.IsServerStream == false { + return Unary + } + if mInfo.IsClientStream == true && mInfo.IsServerStream == false { + return ClientStream + } + if mInfo.IsClientStream == false && mInfo.IsServerStream == true { + return ServerStream + } + return BidiStream +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/util.go b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/util.go new file mode 100644 index 00000000..372460ac --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/util.go @@ -0,0 +1,27 @@ +// Copyright 2016 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +package grpc_prometheus + +import ( + "strings" + + "google.golang.org/grpc/codes" +) + +var ( + allCodes = []codes.Code{ + codes.OK, codes.Canceled, codes.Unknown, codes.InvalidArgument, codes.DeadlineExceeded, codes.NotFound, + codes.AlreadyExists, codes.PermissionDenied, codes.Unauthenticated, codes.ResourceExhausted, + codes.FailedPrecondition, codes.Aborted, codes.OutOfRange, codes.Unimplemented, codes.Internal, + codes.Unavailable, codes.DataLoss, + } +) + +func splitMethodName(fullMethodName string) (string, string) { + fullMethodName = strings.TrimPrefix(fullMethodName, "/") // remove leading slash + if i := strings.Index(fullMethodName, "/"); i >= 0 { + return fullMethodName[:i], fullMethodName[i+1:] + } + return "unknown", "unknown" +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go index 49f13f7f..9a421911 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go @@ -111,8 +111,7 @@ func decodeJSONPb(d *json.Decoder, v interface{}) error { if !ok { return decodeNonProtoField(d, v) } - unmarshaler := &jsonpb.Unmarshaler{AllowUnknownFields: true} - return unmarshaler.UnmarshalNext(d, p) + return jsonpb.UnmarshalNext(d, p) } func decodeNonProtoField(d *json.Decoder, v interface{}) error { @@ -125,8 +124,7 @@ func decodeNonProtoField(d *json.Decoder, v interface{}) error { rv.Set(reflect.New(rv.Type().Elem())) } if rv.Type().ConvertibleTo(typeProtoMessage) { - unmarshaler := &jsonpb.Unmarshaler{AllowUnknownFields: true} - return unmarshaler.UnmarshalNext(d, rv.Interface().(proto.Message)) + return jsonpb.UnmarshalNext(d, rv.Interface().(proto.Message)) } rv = rv.Elem() } diff --git a/vendor/google.golang.org/cloud/LICENSE b/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE similarity index 99% rename from vendor/google.golang.org/cloud/LICENSE rename to vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE index a4c5efd8..13f15dfc 100644 --- a/vendor/google.golang.org/cloud/LICENSE +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE @@ -1,4 +1,3 @@ - Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -187,7 +186,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright 2014 Google Inc. + Copyright 2013 Matt T. Proud Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go new file mode 100644 index 00000000..66d9b545 --- /dev/null +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go @@ -0,0 +1,75 @@ +// Copyright 2013 Matt T. Proud +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pbutil + +import ( + "encoding/binary" + "errors" + "io" + + "github.com/golang/protobuf/proto" +) + +var errInvalidVarint = errors.New("invalid varint32 encountered") + +// ReadDelimited decodes a message from the provided length-delimited stream, +// where the length is encoded as 32-bit varint prefix to the message body. +// It returns the total number of bytes read and any applicable error. This is +// roughly equivalent to the companion Java API's +// MessageLite#parseDelimitedFrom. As per the reader contract, this function +// calls r.Read repeatedly as required until exactly one message including its +// prefix is read and decoded (or an error has occurred). The function never +// reads more bytes from the stream than required. The function never returns +// an error if a message has been read and decoded correctly, even if the end +// of the stream has been reached in doing so. In that case, any subsequent +// calls return (0, io.EOF). +func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) { + // Per AbstractParser#parsePartialDelimitedFrom with + // CodedInputStream#readRawVarint32. + headerBuf := make([]byte, binary.MaxVarintLen32) + var bytesRead, varIntBytes int + var messageLength uint64 + for varIntBytes == 0 { // i.e. no varint has been decoded yet. + if bytesRead >= len(headerBuf) { + return bytesRead, errInvalidVarint + } + // We have to read byte by byte here to avoid reading more bytes + // than required. Each read byte is appended to what we have + // read before. + newBytesRead, err := r.Read(headerBuf[bytesRead : bytesRead+1]) + if newBytesRead == 0 { + if err != nil { + return bytesRead, err + } + // A Reader should not return (0, nil), but if it does, + // it should be treated as no-op (according to the + // Reader contract). So let's go on... + continue + } + bytesRead += newBytesRead + // Now present everything read so far to the varint decoder and + // see if a varint can be decoded already. + messageLength, varIntBytes = proto.DecodeVarint(headerBuf[:bytesRead]) + } + + messageBuf := make([]byte, messageLength) + newBytesRead, err := io.ReadFull(r, messageBuf) + bytesRead += newBytesRead + if err != nil { + return bytesRead, err + } + + return bytesRead, proto.Unmarshal(messageBuf, m) +} diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go new file mode 100644 index 00000000..c318385c --- /dev/null +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go @@ -0,0 +1,16 @@ +// Copyright 2013 Matt T. Proud +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package pbutil provides record length-delimited Protocol Buffer streaming. +package pbutil diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go new file mode 100644 index 00000000..4b76ea9a --- /dev/null +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go @@ -0,0 +1,46 @@ +// Copyright 2013 Matt T. Proud +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pbutil + +import ( + "encoding/binary" + "io" + + "github.com/golang/protobuf/proto" +) + +// WriteDelimited encodes and dumps a message to the provided writer prefixed +// with a 32-bit varint indicating the length of the encoded message, producing +// a length-delimited record stream, which can be used to chain together +// encoded messages of the same type together in a file. It returns the total +// number of bytes written and any applicable error. This is roughly +// equivalent to the companion Java API's MessageLite#writeDelimitedTo. +func WriteDelimited(w io.Writer, m proto.Message) (n int, err error) { + buffer, err := proto.Marshal(m) + if err != nil { + return 0, err + } + + buf := make([]byte, binary.MaxVarintLen32) + encodedLength := binary.PutUvarint(buf, uint64(len(buffer))) + + sync, err := w.Write(buf[:encodedLength]) + if err != nil { + return sync, err + } + + n, err = w.Write(buffer) + return n + sync, err +} diff --git a/vendor/github.com/cloudfoundry-incubator/candiedyaml/LICENSE b/vendor/github.com/prometheus/client_golang/LICENSE similarity index 89% rename from vendor/github.com/cloudfoundry-incubator/candiedyaml/LICENSE rename to vendor/github.com/prometheus/client_golang/LICENSE index d9a10c0d..261eeb9e 100644 --- a/vendor/github.com/cloudfoundry-incubator/candiedyaml/LICENSE +++ b/vendor/github.com/prometheus/client_golang/LICENSE @@ -174,3 +174,28 @@ of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/prometheus/client_golang/NOTICE b/vendor/github.com/prometheus/client_golang/NOTICE new file mode 100644 index 00000000..37e4a7d4 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/NOTICE @@ -0,0 +1,28 @@ +Prometheus instrumentation library for Go applications +Copyright 2012-2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). + + +The following components are included in this product: + +goautoneg +http://bitbucket.org/ww/goautoneg +Copyright 2011, Open Knowledge Foundation Ltd. +See README.txt for license details. + +perks - a fork of https://github.com/bmizerany/perks +https://github.com/beorn7/perks +Copyright 2013-2015 Blake Mizerany, Björn Rabenstein +See https://github.com/beorn7/perks/blob/master/README.md for license details. + +Go support for Protocol Buffers - Google's data interchange format +http://github.com/golang/protobuf/ +Copyright 2010 The Go Authors +See source code for license details. + +Support for streaming Protocol Buffer messages for the Go language (golang). +https://github.com/matttproud/golang_protobuf_extensions +Copyright 2013 Matt T. Proud +Licensed under the Apache License, Version 2.0 diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collector.go new file mode 100644 index 00000000..c0468800 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/collector.go @@ -0,0 +1,75 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +// Collector is the interface implemented by anything that can be used by +// Prometheus to collect metrics. A Collector has to be registered for +// collection. See Register, MustRegister, RegisterOrGet, and MustRegisterOrGet. +// +// The stock metrics provided by this package (like Gauge, Counter, Summary) are +// also Collectors (which only ever collect one metric, namely itself). An +// implementer of Collector may, however, collect multiple metrics in a +// coordinated fashion and/or create metrics on the fly. Examples for collectors +// already implemented in this library are the metric vectors (i.e. collection +// of multiple instances of the same Metric but with different label values) +// like GaugeVec or SummaryVec, and the ExpvarCollector. +type Collector interface { + // Describe sends the super-set of all possible descriptors of metrics + // collected by this Collector to the provided channel and returns once + // the last descriptor has been sent. The sent descriptors fulfill the + // consistency and uniqueness requirements described in the Desc + // documentation. (It is valid if one and the same Collector sends + // duplicate descriptors. Those duplicates are simply ignored. However, + // two different Collectors must not send duplicate descriptors.) This + // method idempotently sends the same descriptors throughout the + // lifetime of the Collector. If a Collector encounters an error while + // executing this method, it must send an invalid descriptor (created + // with NewInvalidDesc) to signal the error to the registry. + Describe(chan<- *Desc) + // Collect is called by Prometheus when collecting metrics. The + // implementation sends each collected metric via the provided channel + // and returns once the last metric has been sent. The descriptor of + // each sent metric is one of those returned by Describe. Returned + // metrics that share the same descriptor must differ in their variable + // label values. This method may be called concurrently and must + // therefore be implemented in a concurrency safe way. Blocking occurs + // at the expense of total performance of rendering all registered + // metrics. Ideally, Collector implementations support concurrent + // readers. + Collect(chan<- Metric) +} + +// SelfCollector implements Collector for a single Metric so that that the +// Metric collects itself. Add it as an anonymous field to a struct that +// implements Metric, and call Init with the Metric itself as an argument. +type SelfCollector struct { + self Metric +} + +// Init provides the SelfCollector with a reference to the metric it is supposed +// to collect. It is usually called within the factory function to create a +// metric. See example. +func (c *SelfCollector) Init(self Metric) { + c.self = self +} + +// Describe implements Collector. +func (c *SelfCollector) Describe(ch chan<- *Desc) { + ch <- c.self.Desc() +} + +// Collect implements Collector. +func (c *SelfCollector) Collect(ch chan<- Metric) { + ch <- c.self +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go new file mode 100644 index 00000000..a2952d1c --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/counter.go @@ -0,0 +1,175 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "errors" + "hash/fnv" +) + +// Counter is a Metric that represents a single numerical value that only ever +// goes up. That implies that it cannot be used to count items whose number can +// also go down, e.g. the number of currently running goroutines. Those +// "counters" are represented by Gauges. +// +// A Counter is typically used to count requests served, tasks completed, errors +// occurred, etc. +// +// To create Counter instances, use NewCounter. +type Counter interface { + Metric + Collector + + // Set is used to set the Counter to an arbitrary value. It is only used + // if you have to transfer a value from an external counter into this + // Prometheus metric. Do not use it for regular handling of a + // Prometheus counter (as it can be used to break the contract of + // monotonically increasing values). + Set(float64) + // Inc increments the counter by 1. + Inc() + // Add adds the given value to the counter. It panics if the value is < + // 0. + Add(float64) +} + +// CounterOpts is an alias for Opts. See there for doc comments. +type CounterOpts Opts + +// NewCounter creates a new Counter based on the provided CounterOpts. +func NewCounter(opts CounterOpts) Counter { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ) + result := &counter{value: value{desc: desc, valType: CounterValue, labelPairs: desc.constLabelPairs}} + result.Init(result) // Init self-collection. + return result +} + +type counter struct { + value +} + +func (c *counter) Add(v float64) { + if v < 0 { + panic(errors.New("counter cannot decrease in value")) + } + c.value.Add(v) +} + +// CounterVec is a Collector that bundles a set of Counters that all share the +// same Desc, but have different values for their variable labels. This is used +// if you want to count the same thing partitioned by various dimensions +// (e.g. number of HTTP requests, partitioned by response code and +// method). Create instances with NewCounterVec. +// +// CounterVec embeds MetricVec. See there for a full list of methods with +// detailed documentation. +type CounterVec struct { + MetricVec +} + +// NewCounterVec creates a new CounterVec based on the provided CounterOpts and +// partitioned by the given label names. At least one label name must be +// provided. +func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &CounterVec{ + MetricVec: MetricVec{ + children: map[uint64]Metric{}, + desc: desc, + hash: fnv.New64a(), + newMetric: func(lvs ...string) Metric { + result := &counter{value: value{ + desc: desc, + valType: CounterValue, + labelPairs: makeLabelPairs(desc, lvs), + }} + result.Init(result) // Init self-collection. + return result + }, + }, + } +} + +// GetMetricWithLabelValues replaces the method of the same name in +// MetricVec. The difference is that this method returns a Counter and not a +// Metric so that no type conversion is required. +func (m *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) { + metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Counter), err + } + return nil, err +} + +// GetMetricWith replaces the method of the same name in MetricVec. The +// difference is that this method returns a Counter and not a Metric so that no +// type conversion is required. +func (m *CounterVec) GetMetricWith(labels Labels) (Counter, error) { + metric, err := m.MetricVec.GetMetricWith(labels) + if metric != nil { + return metric.(Counter), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. By not returning an +// error, WithLabelValues allows shortcuts like +// myVec.WithLabelValues("404", "GET").Add(42) +func (m *CounterVec) WithLabelValues(lvs ...string) Counter { + return m.MetricVec.WithLabelValues(lvs...).(Counter) +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. By not returning an error, With allows shortcuts like +// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) +func (m *CounterVec) With(labels Labels) Counter { + return m.MetricVec.With(labels).(Counter) +} + +// CounterFunc is a Counter whose value is determined at collect time by calling a +// provided function. +// +// To create CounterFunc instances, use NewCounterFunc. +type CounterFunc interface { + Metric + Collector +} + +// NewCounterFunc creates a new CounterFunc based on the provided +// CounterOpts. The value reported is determined by calling the given function +// from within the Write method. Take into account that metric collection may +// happen concurrently. If that results in concurrent calls to Write, like in +// the case where a CounterFunc is directly registered with Prometheus, the +// provided function must be concurrency-safe. The function should also honor +// the contract for a Counter (values only go up, not down), but compliance will +// not be checked. +func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc { + return newValueFunc(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), CounterValue, function) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go new file mode 100644 index 00000000..fcde784d --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go @@ -0,0 +1,201 @@ +package prometheus + +import ( + "bytes" + "errors" + "fmt" + "hash/fnv" + "regexp" + "sort" + "strings" + + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" +) + +var ( + metricNameRE = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_:]*$`) + labelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") +) + +// reservedLabelPrefix is a prefix which is not legal in user-supplied +// label names. +const reservedLabelPrefix = "__" + +// Labels represents a collection of label name -> value mappings. This type is +// commonly used with the With(Labels) and GetMetricWith(Labels) methods of +// metric vector Collectors, e.g.: +// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) +// +// The other use-case is the specification of constant label pairs in Opts or to +// create a Desc. +type Labels map[string]string + +// Desc is the descriptor used by every Prometheus Metric. It is essentially +// the immutable meta-data of a Metric. The normal Metric implementations +// included in this package manage their Desc under the hood. Users only have to +// deal with Desc if they use advanced features like the ExpvarCollector or +// custom Collectors and Metrics. +// +// Descriptors registered with the same registry have to fulfill certain +// consistency and uniqueness criteria if they share the same fully-qualified +// name: They must have the same help string and the same label names (aka label +// dimensions) in each, constLabels and variableLabels, but they must differ in +// the values of the constLabels. +// +// Descriptors that share the same fully-qualified names and the same label +// values of their constLabels are considered equal. +// +// Use NewDesc to create new Desc instances. +type Desc struct { + // fqName has been built from Namespace, Subsystem, and Name. + fqName string + // help provides some helpful information about this metric. + help string + // constLabelPairs contains precalculated DTO label pairs based on + // the constant labels. + constLabelPairs []*dto.LabelPair + // VariableLabels contains names of labels for which the metric + // maintains variable values. + variableLabels []string + // id is a hash of the values of the ConstLabels and fqName. This + // must be unique among all registered descriptors and can therefore be + // used as an identifier of the descriptor. + id uint64 + // dimHash is a hash of the label names (preset and variable) and the + // Help string. Each Desc with the same fqName must have the same + // dimHash. + dimHash uint64 + // err is an error that occured during construction. It is reported on + // registration time. + err error +} + +// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc +// and will be reported on registration time. variableLabels and constLabels can +// be nil if no such labels should be set. fqName and help must not be empty. +// +// variableLabels only contain the label names. Their label values are variable +// and therefore not part of the Desc. (They are managed within the Metric.) +// +// For constLabels, the label values are constant. Therefore, they are fully +// specified in the Desc. See the Opts documentation for the implications of +// constant labels. +func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc { + d := &Desc{ + fqName: fqName, + help: help, + variableLabels: variableLabels, + } + if help == "" { + d.err = errors.New("empty help string") + return d + } + if !metricNameRE.MatchString(fqName) { + d.err = fmt.Errorf("%q is not a valid metric name", fqName) + return d + } + // labelValues contains the label values of const labels (in order of + // their sorted label names) plus the fqName (at position 0). + labelValues := make([]string, 1, len(constLabels)+1) + labelValues[0] = fqName + labelNames := make([]string, 0, len(constLabels)+len(variableLabels)) + labelNameSet := map[string]struct{}{} + // First add only the const label names and sort them... + for labelName := range constLabels { + if !checkLabelName(labelName) { + d.err = fmt.Errorf("%q is not a valid label name", labelName) + return d + } + labelNames = append(labelNames, labelName) + labelNameSet[labelName] = struct{}{} + } + sort.Strings(labelNames) + // ... so that we can now add const label values in the order of their names. + for _, labelName := range labelNames { + labelValues = append(labelValues, constLabels[labelName]) + } + // Now add the variable label names, but prefix them with something that + // cannot be in a regular label name. That prevents matching the label + // dimension with a different mix between preset and variable labels. + for _, labelName := range variableLabels { + if !checkLabelName(labelName) { + d.err = fmt.Errorf("%q is not a valid label name", labelName) + return d + } + labelNames = append(labelNames, "$"+labelName) + labelNameSet[labelName] = struct{}{} + } + if len(labelNames) != len(labelNameSet) { + d.err = errors.New("duplicate label names") + return d + } + h := fnv.New64a() + var b bytes.Buffer // To copy string contents into, avoiding []byte allocations. + for _, val := range labelValues { + b.Reset() + b.WriteString(val) + b.WriteByte(separatorByte) + h.Write(b.Bytes()) + } + d.id = h.Sum64() + // Sort labelNames so that order doesn't matter for the hash. + sort.Strings(labelNames) + // Now hash together (in this order) the help string and the sorted + // label names. + h.Reset() + b.Reset() + b.WriteString(help) + b.WriteByte(separatorByte) + h.Write(b.Bytes()) + for _, labelName := range labelNames { + b.Reset() + b.WriteString(labelName) + b.WriteByte(separatorByte) + h.Write(b.Bytes()) + } + d.dimHash = h.Sum64() + + d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels)) + for n, v := range constLabels { + d.constLabelPairs = append(d.constLabelPairs, &dto.LabelPair{ + Name: proto.String(n), + Value: proto.String(v), + }) + } + sort.Sort(LabelPairSorter(d.constLabelPairs)) + return d +} + +// NewInvalidDesc returns an invalid descriptor, i.e. a descriptor with the +// provided error set. If a collector returning such a descriptor is registered, +// registration will fail with the provided error. NewInvalidDesc can be used by +// a Collector to signal inability to describe itself. +func NewInvalidDesc(err error) *Desc { + return &Desc{ + err: err, + } +} + +func (d *Desc) String() string { + lpStrings := make([]string, 0, len(d.constLabelPairs)) + for _, lp := range d.constLabelPairs { + lpStrings = append( + lpStrings, + fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()), + ) + } + return fmt.Sprintf( + "Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: %v}", + d.fqName, + d.help, + strings.Join(lpStrings, ","), + d.variableLabels, + ) +} + +func checkLabelName(l string) bool { + return labelNameRE.MatchString(l) && + !strings.HasPrefix(l, reservedLabelPrefix) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/doc.go new file mode 100644 index 00000000..425fe879 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/doc.go @@ -0,0 +1,109 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package prometheus provides embeddable metric primitives for servers and +// standardized exposition of telemetry through a web services interface. +// +// All exported functions and methods are safe to be used concurrently unless +// specified otherwise. +// +// To expose metrics registered with the Prometheus registry, an HTTP server +// needs to know about the Prometheus handler. The usual endpoint is "/metrics". +// +// http.Handle("/metrics", prometheus.Handler()) +// +// As a starting point a very basic usage example: +// +// package main +// +// import ( +// "net/http" +// +// "github.com/prometheus/client_golang/prometheus" +// ) +// +// var ( +// cpuTemp = prometheus.NewGauge(prometheus.GaugeOpts{ +// Name: "cpu_temperature_celsius", +// Help: "Current temperature of the CPU.", +// }) +// hdFailures = prometheus.NewCounter(prometheus.CounterOpts{ +// Name: "hd_errors_total", +// Help: "Number of hard-disk errors.", +// }) +// ) +// +// func init() { +// prometheus.MustRegister(cpuTemp) +// prometheus.MustRegister(hdFailures) +// } +// +// func main() { +// cpuTemp.Set(65.3) +// hdFailures.Inc() +// +// http.Handle("/metrics", prometheus.Handler()) +// http.ListenAndServe(":8080", nil) +// } +// +// +// This is a complete program that exports two metrics, a Gauge and a Counter. +// It also exports some stats about the HTTP usage of the /metrics +// endpoint. (See the Handler function for more detail.) +// +// Two more advanced metric types are the Summary and Histogram. +// +// In addition to the fundamental metric types Gauge, Counter, Summary, and +// Histogram, a very important part of the Prometheus data model is the +// partitioning of samples along dimensions called labels, which results in +// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec, +// and HistogramVec. +// +// Those are all the parts needed for basic usage. Detailed documentation and +// examples are provided below. +// +// Everything else this package offers is essentially for "power users" only. A +// few pointers to "power user features": +// +// All the various ...Opts structs have a ConstLabels field for labels that +// never change their value (which is only useful under special circumstances, +// see documentation of the Opts type). +// +// The Untyped metric behaves like a Gauge, but signals the Prometheus server +// not to assume anything about its type. +// +// Functions to fine-tune how the metric registry works: EnableCollectChecks, +// PanicOnCollectError, Register, Unregister, SetMetricFamilyInjectionHook. +// +// For custom metric collection, there are two entry points: Custom Metric +// implementations and custom Collector implementations. A Metric is the +// fundamental unit in the Prometheus data model: a sample at a point in time +// together with its meta-data (like its fully-qualified name and any number of +// pairs of label name and label value) that knows how to marshal itself into a +// data transfer object (aka DTO, implemented as a protocol buffer). A Collector +// gets registered with the Prometheus registry and manages the collection of +// one or more Metrics. Many parts of this package are building blocks for +// Metrics and Collectors. Desc is the metric descriptor, actually used by all +// metrics under the hood, and by Collectors to describe the Metrics to be +// collected, but only to be dealt with by users if they implement their own +// Metrics or Collectors. To create a Desc, the BuildFQName function will come +// in handy. Other useful components for Metric and Collector implementation +// include: LabelPairSorter to sort the DTO version of label pairs, +// NewConstMetric and MustNewConstMetric to create "throw away" Metrics at +// collection time, MetricVec to bundle custom Metrics into a metric vector +// Collector, SelfCollector to make a custom Metric collect itself. +// +// A good example for a custom Collector is the ExpVarCollector included in this +// package, which exports variables exported via the "expvar" package as +// Prometheus metrics. +package prometheus diff --git a/vendor/github.com/prometheus/client_golang/prometheus/expvar.go b/vendor/github.com/prometheus/client_golang/prometheus/expvar.go new file mode 100644 index 00000000..0f7630d5 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/expvar.go @@ -0,0 +1,119 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "encoding/json" + "expvar" +) + +// ExpvarCollector collects metrics from the expvar interface. It provides a +// quick way to expose numeric values that are already exported via expvar as +// Prometheus metrics. Note that the data models of expvar and Prometheus are +// fundamentally different, and that the ExpvarCollector is inherently +// slow. Thus, the ExpvarCollector is probably great for experiments and +// prototying, but you should seriously consider a more direct implementation of +// Prometheus metrics for monitoring production systems. +// +// Use NewExpvarCollector to create new instances. +type ExpvarCollector struct { + exports map[string]*Desc +} + +// NewExpvarCollector returns a newly allocated ExpvarCollector that still has +// to be registered with the Prometheus registry. +// +// The exports map has the following meaning: +// +// The keys in the map correspond to expvar keys, i.e. for every expvar key you +// want to export as Prometheus metric, you need an entry in the exports +// map. The descriptor mapped to each key describes how to export the expvar +// value. It defines the name and the help string of the Prometheus metric +// proxying the expvar value. The type will always be Untyped. +// +// For descriptors without variable labels, the expvar value must be a number or +// a bool. The number is then directly exported as the Prometheus sample +// value. (For a bool, 'false' translates to 0 and 'true' to 1). Expvar values +// that are not numbers or bools are silently ignored. +// +// If the descriptor has one variable label, the expvar value must be an expvar +// map. The keys in the expvar map become the various values of the one +// Prometheus label. The values in the expvar map must be numbers or bools again +// as above. +// +// For descriptors with more than one variable label, the expvar must be a +// nested expvar map, i.e. where the values of the topmost map are maps again +// etc. until a depth is reached that corresponds to the number of labels. The +// leaves of that structure must be numbers or bools as above to serve as the +// sample values. +// +// Anything that does not fit into the scheme above is silently ignored. +func NewExpvarCollector(exports map[string]*Desc) *ExpvarCollector { + return &ExpvarCollector{ + exports: exports, + } +} + +// Describe implements Collector. +func (e *ExpvarCollector) Describe(ch chan<- *Desc) { + for _, desc := range e.exports { + ch <- desc + } +} + +// Collect implements Collector. +func (e *ExpvarCollector) Collect(ch chan<- Metric) { + for name, desc := range e.exports { + var m Metric + expVar := expvar.Get(name) + if expVar == nil { + continue + } + var v interface{} + labels := make([]string, len(desc.variableLabels)) + if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil { + ch <- NewInvalidMetric(desc, err) + continue + } + var processValue func(v interface{}, i int) + processValue = func(v interface{}, i int) { + if i >= len(labels) { + copiedLabels := append(make([]string, 0, len(labels)), labels...) + switch v := v.(type) { + case float64: + m = MustNewConstMetric(desc, UntypedValue, v, copiedLabels...) + case bool: + if v { + m = MustNewConstMetric(desc, UntypedValue, 1, copiedLabels...) + } else { + m = MustNewConstMetric(desc, UntypedValue, 0, copiedLabels...) + } + default: + return + } + ch <- m + return + } + vm, ok := v.(map[string]interface{}) + if !ok { + return + } + for lv, val := range vm { + labels[i] = lv + processValue(val, i+1) + } + } + processValue(v, 0) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go new file mode 100644 index 00000000..ba8a402c --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go @@ -0,0 +1,147 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import "hash/fnv" + +// Gauge is a Metric that represents a single numerical value that can +// arbitrarily go up and down. +// +// A Gauge is typically used for measured values like temperatures or current +// memory usage, but also "counts" that can go up and down, like the number of +// running goroutines. +// +// To create Gauge instances, use NewGauge. +type Gauge interface { + Metric + Collector + + // Set sets the Gauge to an arbitrary value. + Set(float64) + // Inc increments the Gauge by 1. + Inc() + // Dec decrements the Gauge by 1. + Dec() + // Add adds the given value to the Gauge. (The value can be + // negative, resulting in a decrease of the Gauge.) + Add(float64) + // Sub subtracts the given value from the Gauge. (The value can be + // negative, resulting in an increase of the Gauge.) + Sub(float64) +} + +// GaugeOpts is an alias for Opts. See there for doc comments. +type GaugeOpts Opts + +// NewGauge creates a new Gauge based on the provided GaugeOpts. +func NewGauge(opts GaugeOpts) Gauge { + return newValue(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), GaugeValue, 0) +} + +// GaugeVec is a Collector that bundles a set of Gauges that all share the same +// Desc, but have different values for their variable labels. This is used if +// you want to count the same thing partitioned by various dimensions +// (e.g. number of operations queued, partitioned by user and operation +// type). Create instances with NewGaugeVec. +type GaugeVec struct { + MetricVec +} + +// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and +// partitioned by the given label names. At least one label name must be +// provided. +func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &GaugeVec{ + MetricVec: MetricVec{ + children: map[uint64]Metric{}, + desc: desc, + hash: fnv.New64a(), + newMetric: func(lvs ...string) Metric { + return newValue(desc, GaugeValue, 0, lvs...) + }, + }, + } +} + +// GetMetricWithLabelValues replaces the method of the same name in +// MetricVec. The difference is that this method returns a Gauge and not a +// Metric so that no type conversion is required. +func (m *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) { + metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Gauge), err + } + return nil, err +} + +// GetMetricWith replaces the method of the same name in MetricVec. The +// difference is that this method returns a Gauge and not a Metric so that no +// type conversion is required. +func (m *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) { + metric, err := m.MetricVec.GetMetricWith(labels) + if metric != nil { + return metric.(Gauge), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. By not returning an +// error, WithLabelValues allows shortcuts like +// myVec.WithLabelValues("404", "GET").Add(42) +func (m *GaugeVec) WithLabelValues(lvs ...string) Gauge { + return m.MetricVec.WithLabelValues(lvs...).(Gauge) +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. By not returning an error, With allows shortcuts like +// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) +func (m *GaugeVec) With(labels Labels) Gauge { + return m.MetricVec.With(labels).(Gauge) +} + +// GaugeFunc is a Gauge whose value is determined at collect time by calling a +// provided function. +// +// To create GaugeFunc instances, use NewGaugeFunc. +type GaugeFunc interface { + Metric + Collector +} + +// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The +// value reported is determined by calling the given function from within the +// Write method. Take into account that metric collection may happen +// concurrently. If that results in concurrent calls to Write, like in the case +// where a GaugeFunc is directly registered with Prometheus, the provided +// function must be concurrency-safe. +func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc { + return newValueFunc(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), GaugeValue, function) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go new file mode 100644 index 00000000..8be24769 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go @@ -0,0 +1,263 @@ +package prometheus + +import ( + "fmt" + "runtime" + "runtime/debug" + "time" +) + +type goCollector struct { + goroutines Gauge + gcDesc *Desc + + // metrics to describe and collect + metrics memStatsMetrics +} + +// NewGoCollector returns a collector which exports metrics about the current +// go process. +func NewGoCollector() *goCollector { + return &goCollector{ + goroutines: NewGauge(GaugeOpts{ + Namespace: "go", + Name: "goroutines", + Help: "Number of goroutines that currently exist.", + }), + gcDesc: NewDesc( + "go_gc_duration_seconds", + "A summary of the GC invocation durations.", + nil, nil), + metrics: memStatsMetrics{ + { + desc: NewDesc( + memstatNamespace("alloc_bytes"), + "Number of bytes allocated and still in use.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("alloc_bytes_total"), + "Total number of bytes allocated, even if freed.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) }, + valType: CounterValue, + }, { + desc: NewDesc( + memstatNamespace("sys_bytes"), + "Number of bytes obtained by system. Sum of all system allocations.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("lookups_total"), + "Total number of pointer lookups.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) }, + valType: CounterValue, + }, { + desc: NewDesc( + memstatNamespace("mallocs_total"), + "Total number of mallocs.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) }, + valType: CounterValue, + }, { + desc: NewDesc( + memstatNamespace("frees_total"), + "Total number of frees.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) }, + valType: CounterValue, + }, { + desc: NewDesc( + memstatNamespace("heap_alloc_bytes"), + "Number of heap bytes allocated and still in use.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_sys_bytes"), + "Number of heap bytes obtained from system.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_idle_bytes"), + "Number of heap bytes waiting to be used.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_inuse_bytes"), + "Number of heap bytes that are in use.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_released_bytes_total"), + "Total number of heap bytes released to OS.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) }, + valType: CounterValue, + }, { + desc: NewDesc( + memstatNamespace("heap_objects"), + "Number of allocated objects.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("stack_inuse_bytes"), + "Number of bytes in use by the stack allocator.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("stack_sys_bytes"), + "Number of bytes obtained from system for stack allocator.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("mspan_inuse_bytes"), + "Number of bytes in use by mspan structures.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("mspan_sys_bytes"), + "Number of bytes used for mspan structures obtained from system.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("mcache_inuse_bytes"), + "Number of bytes in use by mcache structures.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("mcache_sys_bytes"), + "Number of bytes used for mcache structures obtained from system.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("buck_hash_sys_bytes"), + "Number of bytes used by the profiling bucket hash table.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("gc_sys_bytes"), + "Number of bytes used for garbage collection system metadata.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("other_sys_bytes"), + "Number of bytes used for other system allocations.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("next_gc_bytes"), + "Number of heap bytes when next garbage collection will take place.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("last_gc_time_seconds"), + "Number of seconds since 1970 of last garbage collection.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC*10 ^ 9) }, + valType: GaugeValue, + }, + }, + } +} + +func memstatNamespace(s string) string { + return fmt.Sprintf("go_memstats_%s", s) +} + +// Describe returns all descriptions of the collector. +func (c *goCollector) Describe(ch chan<- *Desc) { + ch <- c.goroutines.Desc() + ch <- c.gcDesc + + for _, i := range c.metrics { + ch <- i.desc + } +} + +// Collect returns the current state of all metrics of the collector. +func (c *goCollector) Collect(ch chan<- Metric) { + c.goroutines.Set(float64(runtime.NumGoroutine())) + ch <- c.goroutines + + var stats debug.GCStats + stats.PauseQuantiles = make([]time.Duration, 5) + debug.ReadGCStats(&stats) + + quantiles := make(map[float64]float64) + for idx, pq := range stats.PauseQuantiles[1:] { + quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds() + } + quantiles[0.0] = stats.PauseQuantiles[0].Seconds() + ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), float64(stats.PauseTotal.Seconds()), quantiles) + + ms := &runtime.MemStats{} + runtime.ReadMemStats(ms) + for _, i := range c.metrics { + ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms)) + } +} + +// memStatsMetrics provide description, value, and value type for memstat metrics. +type memStatsMetrics []struct { + desc *Desc + eval func(*runtime.MemStats) float64 + valType ValueType +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go new file mode 100644 index 00000000..f98a41bc --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -0,0 +1,450 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "hash/fnv" + "math" + "sort" + "sync/atomic" + + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" +) + +// A Histogram counts individual observations from an event or sample stream in +// configurable buckets. Similar to a summary, it also provides a sum of +// observations and an observation count. +// +// On the Prometheus server, quantiles can be calculated from a Histogram using +// the histogram_quantile function in the query language. +// +// Note that Histograms, in contrast to Summaries, can be aggregated with the +// Prometheus query language (see the documentation for detailed +// procedures). However, Histograms require the user to pre-define suitable +// buckets, and they are in general less accurate. The Observe method of a +// Histogram has a very low performance overhead in comparison with the Observe +// method of a Summary. +// +// To create Histogram instances, use NewHistogram. +type Histogram interface { + Metric + Collector + + // Observe adds a single observation to the histogram. + Observe(float64) +} + +// bucketLabel is used for the label that defines the upper bound of a +// bucket of a histogram ("le" -> "less or equal"). +const bucketLabel = "le" + +var ( + // DefBuckets are the default Histogram buckets. The default buckets are + // tailored to broadly measure the response time (in seconds) of a + // network service. Most likely, however, you will be required to define + // buckets customized to your use case. + DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10} + + errBucketLabelNotAllowed = fmt.Errorf( + "%q is not allowed as label name in histograms", bucketLabel, + ) +) + +// LinearBuckets creates 'count' buckets, each 'width' wide, where the lowest +// bucket has an upper bound of 'start'. The final +Inf bucket is not counted +// and not included in the returned slice. The returned slice is meant to be +// used for the Buckets field of HistogramOpts. +// +// The function panics if 'count' is zero or negative. +func LinearBuckets(start, width float64, count int) []float64 { + if count < 1 { + panic("LinearBuckets needs a positive count") + } + buckets := make([]float64, count) + for i := range buckets { + buckets[i] = start + start += width + } + return buckets +} + +// ExponentialBuckets creates 'count' buckets, where the lowest bucket has an +// upper bound of 'start' and each following bucket's upper bound is 'factor' +// times the previous bucket's upper bound. The final +Inf bucket is not counted +// and not included in the returned slice. The returned slice is meant to be +// used for the Buckets field of HistogramOpts. +// +// The function panics if 'count' is 0 or negative, if 'start' is 0 or negative, +// or if 'factor' is less than or equal 1. +func ExponentialBuckets(start, factor float64, count int) []float64 { + if count < 1 { + panic("ExponentialBuckets needs a positive count") + } + if start <= 0 { + panic("ExponentialBuckets needs a positive start value") + } + if factor <= 1 { + panic("ExponentialBuckets needs a factor greater than 1") + } + buckets := make([]float64, count) + for i := range buckets { + buckets[i] = start + start *= factor + } + return buckets +} + +// HistogramOpts bundles the options for creating a Histogram metric. It is +// mandatory to set Name and Help to a non-empty string. All other fields are +// optional and can safely be left at their zero value. +type HistogramOpts struct { + // Namespace, Subsystem, and Name are components of the fully-qualified + // name of the Histogram (created by joining these components with + // "_"). Only Name is mandatory, the others merely help structuring the + // name. Note that the fully-qualified name of the Histogram must be a + // valid Prometheus metric name. + Namespace string + Subsystem string + Name string + + // Help provides information about this Histogram. Mandatory! + // + // Metrics with the same fully-qualified name must have the same Help + // string. + Help string + + // ConstLabels are used to attach fixed labels to this + // Histogram. Histograms with the same fully-qualified name must have the + // same label names in their ConstLabels. + // + // Note that in most cases, labels have a value that varies during the + // lifetime of a process. Those labels are usually managed with a + // HistogramVec. ConstLabels serve only special purposes. One is for the + // special case where the value of a label does not change during the + // lifetime of a process, e.g. if the revision of the running binary is + // put into a label. Another, more advanced purpose is if more than one + // Collector needs to collect Histograms with the same fully-qualified + // name. In that case, those Summaries must differ in the values of + // their ConstLabels. See the Collector examples. + // + // If the value of a label never changes (not even between binaries), + // that label most likely should not be a label at all (but part of the + // metric name). + ConstLabels Labels + + // Buckets defines the buckets into which observations are counted. Each + // element in the slice is the upper inclusive bound of a bucket. The + // values must be sorted in strictly increasing order. There is no need + // to add a highest bucket with +Inf bound, it will be added + // implicitly. The default value is DefBuckets. + Buckets []float64 +} + +// NewHistogram creates a new Histogram based on the provided HistogramOpts. It +// panics if the buckets in HistogramOpts are not in strictly increasing order. +func NewHistogram(opts HistogramOpts) Histogram { + return newHistogram( + NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), + opts, + ) +} + +func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram { + if len(desc.variableLabels) != len(labelValues) { + panic(errInconsistentCardinality) + } + + for _, n := range desc.variableLabels { + if n == bucketLabel { + panic(errBucketLabelNotAllowed) + } + } + for _, lp := range desc.constLabelPairs { + if lp.GetName() == bucketLabel { + panic(errBucketLabelNotAllowed) + } + } + + if len(opts.Buckets) == 0 { + opts.Buckets = DefBuckets + } + + h := &histogram{ + desc: desc, + upperBounds: opts.Buckets, + labelPairs: makeLabelPairs(desc, labelValues), + } + for i, upperBound := range h.upperBounds { + if i < len(h.upperBounds)-1 { + if upperBound >= h.upperBounds[i+1] { + panic(fmt.Errorf( + "histogram buckets must be in increasing order: %f >= %f", + upperBound, h.upperBounds[i+1], + )) + } + } else { + if math.IsInf(upperBound, +1) { + // The +Inf bucket is implicit. Remove it here. + h.upperBounds = h.upperBounds[:i] + } + } + } + // Finally we know the final length of h.upperBounds and can make counts. + h.counts = make([]uint64, len(h.upperBounds)) + + h.Init(h) // Init self-collection. + return h +} + +type histogram struct { + // sumBits contains the bits of the float64 representing the sum of all + // observations. sumBits and count have to go first in the struct to + // guarantee alignment for atomic operations. + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG + sumBits uint64 + count uint64 + + SelfCollector + // Note that there is no mutex required. + + desc *Desc + + upperBounds []float64 + counts []uint64 + + labelPairs []*dto.LabelPair +} + +func (h *histogram) Desc() *Desc { + return h.desc +} + +func (h *histogram) Observe(v float64) { + // TODO(beorn7): For small numbers of buckets (<30), a linear search is + // slightly faster than the binary search. If we really care, we could + // switch from one search strategy to the other depending on the number + // of buckets. + // + // Microbenchmarks (BenchmarkHistogramNoLabels): + // 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op + // 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op + // 300 buckets: 154 ns/op linear - binary 61.6 ns/op + i := sort.SearchFloat64s(h.upperBounds, v) + if i < len(h.counts) { + atomic.AddUint64(&h.counts[i], 1) + } + atomic.AddUint64(&h.count, 1) + for { + oldBits := atomic.LoadUint64(&h.sumBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + v) + if atomic.CompareAndSwapUint64(&h.sumBits, oldBits, newBits) { + break + } + } +} + +func (h *histogram) Write(out *dto.Metric) error { + his := &dto.Histogram{} + buckets := make([]*dto.Bucket, len(h.upperBounds)) + + his.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&h.sumBits))) + his.SampleCount = proto.Uint64(atomic.LoadUint64(&h.count)) + var count uint64 + for i, upperBound := range h.upperBounds { + count += atomic.LoadUint64(&h.counts[i]) + buckets[i] = &dto.Bucket{ + CumulativeCount: proto.Uint64(count), + UpperBound: proto.Float64(upperBound), + } + } + his.Bucket = buckets + out.Histogram = his + out.Label = h.labelPairs + return nil +} + +// HistogramVec is a Collector that bundles a set of Histograms that all share the +// same Desc, but have different values for their variable labels. This is used +// if you want to count the same thing partitioned by various dimensions +// (e.g. HTTP request latencies, partitioned by status code and method). Create +// instances with NewHistogramVec. +type HistogramVec struct { + MetricVec +} + +// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and +// partitioned by the given label names. At least one label name must be +// provided. +func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &HistogramVec{ + MetricVec: MetricVec{ + children: map[uint64]Metric{}, + desc: desc, + hash: fnv.New64a(), + newMetric: func(lvs ...string) Metric { + return newHistogram(desc, opts, lvs...) + }, + }, + } +} + +// GetMetricWithLabelValues replaces the method of the same name in +// MetricVec. The difference is that this method returns a Histogram and not a +// Metric so that no type conversion is required. +func (m *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Histogram, error) { + metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Histogram), err + } + return nil, err +} + +// GetMetricWith replaces the method of the same name in MetricVec. The +// difference is that this method returns a Histogram and not a Metric so that no +// type conversion is required. +func (m *HistogramVec) GetMetricWith(labels Labels) (Histogram, error) { + metric, err := m.MetricVec.GetMetricWith(labels) + if metric != nil { + return metric.(Histogram), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. By not returning an +// error, WithLabelValues allows shortcuts like +// myVec.WithLabelValues("404", "GET").Observe(42.21) +func (m *HistogramVec) WithLabelValues(lvs ...string) Histogram { + return m.MetricVec.WithLabelValues(lvs...).(Histogram) +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. By not returning an error, With allows shortcuts like +// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21) +func (m *HistogramVec) With(labels Labels) Histogram { + return m.MetricVec.With(labels).(Histogram) +} + +type constHistogram struct { + desc *Desc + count uint64 + sum float64 + buckets map[float64]uint64 + labelPairs []*dto.LabelPair +} + +func (h *constHistogram) Desc() *Desc { + return h.desc +} + +func (h *constHistogram) Write(out *dto.Metric) error { + his := &dto.Histogram{} + buckets := make([]*dto.Bucket, 0, len(h.buckets)) + + his.SampleCount = proto.Uint64(h.count) + his.SampleSum = proto.Float64(h.sum) + + for upperBound, count := range h.buckets { + buckets = append(buckets, &dto.Bucket{ + CumulativeCount: proto.Uint64(count), + UpperBound: proto.Float64(upperBound), + }) + } + + if len(buckets) > 0 { + sort.Sort(buckSort(buckets)) + } + his.Bucket = buckets + + out.Histogram = his + out.Label = h.labelPairs + + return nil +} + +// NewConstHistogram returns a metric representing a Prometheus histogram with +// fixed values for the count, sum, and bucket counts. As those parameters +// cannot be changed, the returned value does not implement the Histogram +// interface (but only the Metric interface). Users of this package will not +// have much use for it in regular operations. However, when implementing custom +// Collectors, it is useful as a throw-away metric that is generated on the fly +// to send it to Prometheus in the Collect method. +// +// buckets is a map of upper bounds to cumulative counts, excluding the +Inf +// bucket. +// +// NewConstHistogram returns an error if the length of labelValues is not +// consistent with the variable labels in Desc. +func NewConstHistogram( + desc *Desc, + count uint64, + sum float64, + buckets map[float64]uint64, + labelValues ...string, +) (Metric, error) { + if len(desc.variableLabels) != len(labelValues) { + return nil, errInconsistentCardinality + } + return &constHistogram{ + desc: desc, + count: count, + sum: sum, + buckets: buckets, + labelPairs: makeLabelPairs(desc, labelValues), + }, nil +} + +// MustNewConstHistogram is a version of NewConstHistogram that panics where +// NewConstMetric would have returned an error. +func MustNewConstHistogram( + desc *Desc, + count uint64, + sum float64, + buckets map[float64]uint64, + labelValues ...string, +) Metric { + m, err := NewConstHistogram(desc, count, sum, buckets, labelValues...) + if err != nil { + panic(err) + } + return m +} + +type buckSort []*dto.Bucket + +func (s buckSort) Len() int { + return len(s) +} + +func (s buckSort) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s buckSort) Less(i, j int) bool { + return s[i].GetUpperBound() < s[j].GetUpperBound() +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/http.go b/vendor/github.com/prometheus/client_golang/prometheus/http.go new file mode 100644 index 00000000..eabe6024 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/http.go @@ -0,0 +1,361 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "bufio" + "io" + "net" + "net/http" + "strconv" + "strings" + "time" +) + +var instLabels = []string{"method", "code"} + +type nower interface { + Now() time.Time +} + +type nowFunc func() time.Time + +func (n nowFunc) Now() time.Time { + return n() +} + +var now nower = nowFunc(func() time.Time { + return time.Now() +}) + +func nowSeries(t ...time.Time) nower { + return nowFunc(func() time.Time { + defer func() { + t = t[1:] + }() + + return t[0] + }) +} + +// InstrumentHandler wraps the given HTTP handler for instrumentation. It +// registers four metric collectors (if not already done) and reports HTTP +// metrics to the (newly or already) registered collectors: http_requests_total +// (CounterVec), http_request_duration_microseconds (Summary), +// http_request_size_bytes (Summary), http_response_size_bytes (Summary). Each +// has a constant label named "handler" with the provided handlerName as +// value. http_requests_total is a metric vector partitioned by HTTP method +// (label name "method") and HTTP status code (label name "code"). +func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc { + return InstrumentHandlerFunc(handlerName, handler.ServeHTTP) +} + +// InstrumentHandlerFunc wraps the given function for instrumentation. It +// otherwise works in the same way as InstrumentHandler. +func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc { + return InstrumentHandlerFuncWithOpts( + SummaryOpts{ + Subsystem: "http", + ConstLabels: Labels{"handler": handlerName}, + }, + handlerFunc, + ) +} + +// InstrumentHandlerWithOpts works like InstrumentHandler but provides more +// flexibility (at the cost of a more complex call syntax). As +// InstrumentHandler, this function registers four metric collectors, but it +// uses the provided SummaryOpts to create them. However, the fields "Name" and +// "Help" in the SummaryOpts are ignored. "Name" is replaced by +// "requests_total", "request_duration_microseconds", "request_size_bytes", and +// "response_size_bytes", respectively. "Help" is replaced by an appropriate +// help string. The names of the variable labels of the http_requests_total +// CounterVec are "method" (get, post, etc.), and "code" (HTTP status code). +// +// If InstrumentHandlerWithOpts is called as follows, it mimics exactly the +// behavior of InstrumentHandler: +// +// prometheus.InstrumentHandlerWithOpts( +// prometheus.SummaryOpts{ +// Subsystem: "http", +// ConstLabels: prometheus.Labels{"handler": handlerName}, +// }, +// handler, +// ) +// +// Technical detail: "requests_total" is a CounterVec, not a SummaryVec, so it +// cannot use SummaryOpts. Instead, a CounterOpts struct is created internally, +// and all its fields are set to the equally named fields in the provided +// SummaryOpts. +func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc { + return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP) +} + +// InstrumentHandlerFuncWithOpts works like InstrumentHandlerFunc but provides +// more flexibility (at the cost of a more complex call syntax). See +// InstrumentHandlerWithOpts for details how the provided SummaryOpts are used. +func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc { + reqCnt := NewCounterVec( + CounterOpts{ + Namespace: opts.Namespace, + Subsystem: opts.Subsystem, + Name: "requests_total", + Help: "Total number of HTTP requests made.", + ConstLabels: opts.ConstLabels, + }, + instLabels, + ) + + opts.Name = "request_duration_microseconds" + opts.Help = "The HTTP request latencies in microseconds." + reqDur := NewSummary(opts) + + opts.Name = "request_size_bytes" + opts.Help = "The HTTP request sizes in bytes." + reqSz := NewSummary(opts) + + opts.Name = "response_size_bytes" + opts.Help = "The HTTP response sizes in bytes." + resSz := NewSummary(opts) + + regReqCnt := MustRegisterOrGet(reqCnt).(*CounterVec) + regReqDur := MustRegisterOrGet(reqDur).(Summary) + regReqSz := MustRegisterOrGet(reqSz).(Summary) + regResSz := MustRegisterOrGet(resSz).(Summary) + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + now := time.Now() + + delegate := &responseWriterDelegator{ResponseWriter: w} + out := make(chan int) + urlLen := 0 + if r.URL != nil { + urlLen = len(r.URL.String()) + } + go computeApproximateRequestSize(r, out, urlLen) + + _, cn := w.(http.CloseNotifier) + _, fl := w.(http.Flusher) + _, hj := w.(http.Hijacker) + _, rf := w.(io.ReaderFrom) + var rw http.ResponseWriter + if cn && fl && hj && rf { + rw = &fancyResponseWriterDelegator{delegate} + } else { + rw = delegate + } + handlerFunc(rw, r) + + elapsed := float64(time.Since(now)) / float64(time.Microsecond) + + method := sanitizeMethod(r.Method) + code := sanitizeCode(delegate.status) + regReqCnt.WithLabelValues(method, code).Inc() + regReqDur.Observe(elapsed) + regResSz.Observe(float64(delegate.written)) + regReqSz.Observe(float64(<-out)) + }) +} + +func computeApproximateRequestSize(r *http.Request, out chan int, s int) { + s += len(r.Method) + s += len(r.Proto) + for name, values := range r.Header { + s += len(name) + for _, value := range values { + s += len(value) + } + } + s += len(r.Host) + + // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL. + + if r.ContentLength != -1 { + s += int(r.ContentLength) + } + out <- s +} + +type responseWriterDelegator struct { + http.ResponseWriter + + handler, method string + status int + written int64 + wroteHeader bool +} + +func (r *responseWriterDelegator) WriteHeader(code int) { + r.status = code + r.wroteHeader = true + r.ResponseWriter.WriteHeader(code) +} + +func (r *responseWriterDelegator) Write(b []byte) (int, error) { + if !r.wroteHeader { + r.WriteHeader(http.StatusOK) + } + n, err := r.ResponseWriter.Write(b) + r.written += int64(n) + return n, err +} + +type fancyResponseWriterDelegator struct { + *responseWriterDelegator +} + +func (f *fancyResponseWriterDelegator) CloseNotify() <-chan bool { + return f.ResponseWriter.(http.CloseNotifier).CloseNotify() +} + +func (f *fancyResponseWriterDelegator) Flush() { + f.ResponseWriter.(http.Flusher).Flush() +} + +func (f *fancyResponseWriterDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) { + return f.ResponseWriter.(http.Hijacker).Hijack() +} + +func (f *fancyResponseWriterDelegator) ReadFrom(r io.Reader) (int64, error) { + if !f.wroteHeader { + f.WriteHeader(http.StatusOK) + } + n, err := f.ResponseWriter.(io.ReaderFrom).ReadFrom(r) + f.written += n + return n, err +} + +func sanitizeMethod(m string) string { + switch m { + case "GET", "get": + return "get" + case "PUT", "put": + return "put" + case "HEAD", "head": + return "head" + case "POST", "post": + return "post" + case "DELETE", "delete": + return "delete" + case "CONNECT", "connect": + return "connect" + case "OPTIONS", "options": + return "options" + case "NOTIFY", "notify": + return "notify" + default: + return strings.ToLower(m) + } +} + +func sanitizeCode(s int) string { + switch s { + case 100: + return "100" + case 101: + return "101" + + case 200: + return "200" + case 201: + return "201" + case 202: + return "202" + case 203: + return "203" + case 204: + return "204" + case 205: + return "205" + case 206: + return "206" + + case 300: + return "300" + case 301: + return "301" + case 302: + return "302" + case 304: + return "304" + case 305: + return "305" + case 307: + return "307" + + case 400: + return "400" + case 401: + return "401" + case 402: + return "402" + case 403: + return "403" + case 404: + return "404" + case 405: + return "405" + case 406: + return "406" + case 407: + return "407" + case 408: + return "408" + case 409: + return "409" + case 410: + return "410" + case 411: + return "411" + case 412: + return "412" + case 413: + return "413" + case 414: + return "414" + case 415: + return "415" + case 416: + return "416" + case 417: + return "417" + case 418: + return "418" + + case 500: + return "500" + case 501: + return "501" + case 502: + return "502" + case 503: + return "503" + case 504: + return "504" + case 505: + return "505" + + case 428: + return "428" + case 429: + return "429" + case 431: + return "431" + case 511: + return "511" + + default: + return strconv.Itoa(s) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go new file mode 100644 index 00000000..86fd81c1 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go @@ -0,0 +1,166 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "strings" + + dto "github.com/prometheus/client_model/go" +) + +const separatorByte byte = 255 + +// A Metric models a single sample value with its meta data being exported to +// Prometheus. Implementers of Metric in this package inclued Gauge, Counter, +// Untyped, and Summary. Users can implement their own Metric types, but that +// should be rarely needed. See the example for SelfCollector, which is also an +// example for a user-implemented Metric. +type Metric interface { + // Desc returns the descriptor for the Metric. This method idempotently + // returns the same descriptor throughout the lifetime of the + // Metric. The returned descriptor is immutable by contract. A Metric + // unable to describe itself must return an invalid descriptor (created + // with NewInvalidDesc). + Desc() *Desc + // Write encodes the Metric into a "Metric" Protocol Buffer data + // transmission object. + // + // Implementers of custom Metric types must observe concurrency safety + // as reads of this metric may occur at any time, and any blocking + // occurs at the expense of total performance of rendering all + // registered metrics. Ideally Metric implementations should support + // concurrent readers. + // + // The Prometheus client library attempts to minimize memory allocations + // and will provide a pre-existing reset dto.Metric pointer. Prometheus + // may recycle the dto.Metric proto message, so Metric implementations + // should just populate the provided dto.Metric and then should not keep + // any reference to it. + // + // While populating dto.Metric, labels must be sorted lexicographically. + // (Implementers may find LabelPairSorter useful for that.) + Write(*dto.Metric) error +} + +// Opts bundles the options for creating most Metric types. Each metric +// implementation XXX has its own XXXOpts type, but in most cases, it is just be +// an alias of this type (which might change when the requirement arises.) +// +// It is mandatory to set Name and Help to a non-empty string. All other fields +// are optional and can safely be left at their zero value. +type Opts struct { + // Namespace, Subsystem, and Name are components of the fully-qualified + // name of the Metric (created by joining these components with + // "_"). Only Name is mandatory, the others merely help structuring the + // name. Note that the fully-qualified name of the metric must be a + // valid Prometheus metric name. + Namespace string + Subsystem string + Name string + + // Help provides information about this metric. Mandatory! + // + // Metrics with the same fully-qualified name must have the same Help + // string. + Help string + + // ConstLabels are used to attach fixed labels to this metric. Metrics + // with the same fully-qualified name must have the same label names in + // their ConstLabels. + // + // Note that in most cases, labels have a value that varies during the + // lifetime of a process. Those labels are usually managed with a metric + // vector collector (like CounterVec, GaugeVec, UntypedVec). ConstLabels + // serve only special purposes. One is for the special case where the + // value of a label does not change during the lifetime of a process, + // e.g. if the revision of the running binary is put into a + // label. Another, more advanced purpose is if more than one Collector + // needs to collect Metrics with the same fully-qualified name. In that + // case, those Metrics must differ in the values of their + // ConstLabels. See the Collector examples. + // + // If the value of a label never changes (not even between binaries), + // that label most likely should not be a label at all (but part of the + // metric name). + ConstLabels Labels +} + +// BuildFQName joins the given three name components by "_". Empty name +// components are ignored. If the name parameter itself is empty, an empty +// string is returned, no matter what. Metric implementations included in this +// library use this function internally to generate the fully-qualified metric +// name from the name component in their Opts. Users of the library will only +// need this function if they implement their own Metric or instantiate a Desc +// (with NewDesc) directly. +func BuildFQName(namespace, subsystem, name string) string { + if name == "" { + return "" + } + switch { + case namespace != "" && subsystem != "": + return strings.Join([]string{namespace, subsystem, name}, "_") + case namespace != "": + return strings.Join([]string{namespace, name}, "_") + case subsystem != "": + return strings.Join([]string{subsystem, name}, "_") + } + return name +} + +// LabelPairSorter implements sort.Interface. It is used to sort a slice of +// dto.LabelPair pointers. This is useful for implementing the Write method of +// custom metrics. +type LabelPairSorter []*dto.LabelPair + +func (s LabelPairSorter) Len() int { + return len(s) +} + +func (s LabelPairSorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s LabelPairSorter) Less(i, j int) bool { + return s[i].GetName() < s[j].GetName() +} + +type hashSorter []uint64 + +func (s hashSorter) Len() int { + return len(s) +} + +func (s hashSorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s hashSorter) Less(i, j int) bool { + return s[i] < s[j] +} + +type invalidMetric struct { + desc *Desc + err error +} + +// NewInvalidMetric returns a metric whose Write method always returns the +// provided error. It is useful if a Collector finds itself unable to collect +// a metric and wishes to report an error to the registry. +func NewInvalidMetric(desc *Desc, err error) Metric { + return &invalidMetric{desc, err} +} + +func (m *invalidMetric) Desc() *Desc { return m.desc } + +func (m *invalidMetric) Write(*dto.Metric) error { return m.err } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go new file mode 100644 index 00000000..d8cf0eda --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go @@ -0,0 +1,142 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import "github.com/prometheus/procfs" + +type processCollector struct { + pid int + collectFn func(chan<- Metric) + pidFn func() (int, error) + cpuTotal Counter + openFDs, maxFDs Gauge + vsize, rss Gauge + startTime Gauge +} + +// NewProcessCollector returns a collector which exports the current state of +// process metrics including cpu, memory and file descriptor usage as well as +// the process start time for the given process id under the given namespace. +func NewProcessCollector(pid int, namespace string) *processCollector { + return NewProcessCollectorPIDFn( + func() (int, error) { return pid, nil }, + namespace, + ) +} + +// NewProcessCollectorPIDFn returns a collector which exports the current state +// of process metrics including cpu, memory and file descriptor usage as well +// as the process start time under the given namespace. The given pidFn is +// called on each collect and is used to determine the process to export +// metrics for. +func NewProcessCollectorPIDFn( + pidFn func() (int, error), + namespace string, +) *processCollector { + c := processCollector{ + pidFn: pidFn, + collectFn: func(chan<- Metric) {}, + + cpuTotal: NewCounter(CounterOpts{ + Namespace: namespace, + Name: "process_cpu_seconds_total", + Help: "Total user and system CPU time spent in seconds.", + }), + openFDs: NewGauge(GaugeOpts{ + Namespace: namespace, + Name: "process_open_fds", + Help: "Number of open file descriptors.", + }), + maxFDs: NewGauge(GaugeOpts{ + Namespace: namespace, + Name: "process_max_fds", + Help: "Maximum number of open file descriptors.", + }), + vsize: NewGauge(GaugeOpts{ + Namespace: namespace, + Name: "process_virtual_memory_bytes", + Help: "Virtual memory size in bytes.", + }), + rss: NewGauge(GaugeOpts{ + Namespace: namespace, + Name: "process_resident_memory_bytes", + Help: "Resident memory size in bytes.", + }), + startTime: NewGauge(GaugeOpts{ + Namespace: namespace, + Name: "process_start_time_seconds", + Help: "Start time of the process since unix epoch in seconds.", + }), + } + + // Set up process metric collection if supported by the runtime. + if _, err := procfs.NewStat(); err == nil { + c.collectFn = c.processCollect + } + + return &c +} + +// Describe returns all descriptions of the collector. +func (c *processCollector) Describe(ch chan<- *Desc) { + ch <- c.cpuTotal.Desc() + ch <- c.openFDs.Desc() + ch <- c.maxFDs.Desc() + ch <- c.vsize.Desc() + ch <- c.rss.Desc() + ch <- c.startTime.Desc() +} + +// Collect returns the current state of all metrics of the collector. +func (c *processCollector) Collect(ch chan<- Metric) { + c.collectFn(ch) +} + +// TODO(ts): Bring back error reporting by reverting 7faf9e7 as soon as the +// client allows users to configure the error behavior. +func (c *processCollector) processCollect(ch chan<- Metric) { + pid, err := c.pidFn() + if err != nil { + return + } + + p, err := procfs.NewProc(pid) + if err != nil { + return + } + + if stat, err := p.NewStat(); err == nil { + c.cpuTotal.Set(stat.CPUTime()) + ch <- c.cpuTotal + c.vsize.Set(float64(stat.VirtualMemory())) + ch <- c.vsize + c.rss.Set(float64(stat.ResidentMemory())) + ch <- c.rss + + if startTime, err := stat.StartTime(); err == nil { + c.startTime.Set(startTime) + ch <- c.startTime + } + } + + if fds, err := p.FileDescriptorsLen(); err == nil { + c.openFDs.Set(float64(fds)) + ch <- c.openFDs + } + + if limits, err := p.NewLimits(); err == nil { + c.maxFDs.Set(float64(limits.OpenFiles)) + ch <- c.maxFDs + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/push.go b/vendor/github.com/prometheus/client_golang/prometheus/push.go new file mode 100644 index 00000000..1c33848a --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/push.go @@ -0,0 +1,65 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Copyright (c) 2013, The Prometheus Authors +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be found +// in the LICENSE file. + +package prometheus + +// Push triggers a metric collection by the default registry and pushes all +// collected metrics to the Pushgateway specified by addr. See the Pushgateway +// documentation for detailed implications of the job and instance +// parameter. instance can be left empty. You can use just host:port or ip:port +// as url, in which case 'http://' is added automatically. You can also include +// the schema in the URL. However, do not include the '/metrics/jobs/...' part. +// +// Note that all previously pushed metrics with the same job and instance will +// be replaced with the metrics pushed by this call. (It uses HTTP method 'PUT' +// to push to the Pushgateway.) +func Push(job, instance, url string) error { + return defRegistry.Push(job, instance, url, "PUT") +} + +// PushAdd works like Push, but only previously pushed metrics with the same +// name (and the same job and instance) will be replaced. (It uses HTTP method +// 'POST' to push to the Pushgateway.) +func PushAdd(job, instance, url string) error { + return defRegistry.Push(job, instance, url, "POST") +} + +// PushCollectors works like Push, but it does not collect from the default +// registry. Instead, it collects from the provided collectors. It is a +// convenient way to push only a few metrics. +func PushCollectors(job, instance, url string, collectors ...Collector) error { + return pushCollectors(job, instance, url, "PUT", collectors...) +} + +// PushAddCollectors works like PushAdd, but it does not collect from the +// default registry. Instead, it collects from the provided collectors. It is a +// convenient way to push only a few metrics. +func PushAddCollectors(job, instance, url string, collectors ...Collector) error { + return pushCollectors(job, instance, url, "POST", collectors...) +} + +func pushCollectors(job, instance, url, method string, collectors ...Collector) error { + r := newRegistry() + for _, collector := range collectors { + if _, err := r.Register(collector); err != nil { + return err + } + } + return r.Push(job, instance, url, method) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go new file mode 100644 index 00000000..5970aaee --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go @@ -0,0 +1,726 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Copyright (c) 2013, The Prometheus Authors +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be found +// in the LICENSE file. + +package prometheus + +import ( + "bytes" + "compress/gzip" + "errors" + "fmt" + "hash/fnv" + "io" + "net/http" + "net/url" + "os" + "sort" + "strings" + "sync" + + "github.com/golang/protobuf/proto" + "github.com/prometheus/common/expfmt" + + dto "github.com/prometheus/client_model/go" +) + +var ( + defRegistry = newDefaultRegistry() + errAlreadyReg = errors.New("duplicate metrics collector registration attempted") +) + +// Constants relevant to the HTTP interface. +const ( + // APIVersion is the version of the format of the exported data. This + // will match this library's version, which subscribes to the Semantic + // Versioning scheme. + APIVersion = "0.0.4" + + // DelimitedTelemetryContentType is the content type set on telemetry + // data responses in delimited protobuf format. + DelimitedTelemetryContentType = `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited` + // TextTelemetryContentType is the content type set on telemetry data + // responses in text format. + TextTelemetryContentType = `text/plain; version=` + APIVersion + // ProtoTextTelemetryContentType is the content type set on telemetry + // data responses in protobuf text format. (Only used for debugging.) + ProtoTextTelemetryContentType = `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=text` + // ProtoCompactTextTelemetryContentType is the content type set on + // telemetry data responses in protobuf compact text format. (Only used + // for debugging.) + ProtoCompactTextTelemetryContentType = `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=compact-text` + + // Constants for object pools. + numBufs = 4 + numMetricFamilies = 1000 + numMetrics = 10000 + + // Capacity for the channel to collect metrics and descriptors. + capMetricChan = 1000 + capDescChan = 10 + + contentTypeHeader = "Content-Type" + contentLengthHeader = "Content-Length" + contentEncodingHeader = "Content-Encoding" + + acceptEncodingHeader = "Accept-Encoding" + acceptHeader = "Accept" +) + +// Handler returns the HTTP handler for the global Prometheus registry. It is +// already instrumented with InstrumentHandler (using "prometheus" as handler +// name). Usually the handler is used to handle the "/metrics" endpoint. +func Handler() http.Handler { + return InstrumentHandler("prometheus", defRegistry) +} + +// UninstrumentedHandler works in the same way as Handler, but the returned HTTP +// handler is not instrumented. This is useful if no instrumentation is desired +// (for whatever reason) or if the instrumentation has to happen with a +// different handler name (or with a different instrumentation approach +// altogether). See the InstrumentHandler example. +func UninstrumentedHandler() http.Handler { + return defRegistry +} + +// Register registers a new Collector to be included in metrics collection. It +// returns an error if the descriptors provided by the Collector are invalid or +// if they - in combination with descriptors of already registered Collectors - +// do not fulfill the consistency and uniqueness criteria described in the Desc +// documentation. +// +// Do not register the same Collector multiple times concurrently. (Registering +// the same Collector twice would result in an error anyway, but on top of that, +// it is not safe to do so concurrently.) +func Register(m Collector) error { + _, err := defRegistry.Register(m) + return err +} + +// MustRegister works like Register but panics where Register would have +// returned an error. +func MustRegister(m Collector) { + err := Register(m) + if err != nil { + panic(err) + } +} + +// RegisterOrGet works like Register but does not return an error if a Collector +// is registered that equals a previously registered Collector. (Two Collectors +// are considered equal if their Describe method yields the same set of +// descriptors.) Instead, the previously registered Collector is returned (which +// is helpful if the new and previously registered Collectors are equal but not +// identical, i.e. not pointers to the same object). +// +// As for Register, it is still not safe to call RegisterOrGet with the same +// Collector multiple times concurrently. +func RegisterOrGet(m Collector) (Collector, error) { + return defRegistry.RegisterOrGet(m) +} + +// MustRegisterOrGet works like Register but panics where RegisterOrGet would +// have returned an error. +func MustRegisterOrGet(m Collector) Collector { + existing, err := RegisterOrGet(m) + if err != nil { + panic(err) + } + return existing +} + +// Unregister unregisters the Collector that equals the Collector passed in as +// an argument. (Two Collectors are considered equal if their Describe method +// yields the same set of descriptors.) The function returns whether a Collector +// was unregistered. +func Unregister(c Collector) bool { + return defRegistry.Unregister(c) +} + +// SetMetricFamilyInjectionHook sets a function that is called whenever metrics +// are collected. The hook function must be set before metrics collection begins +// (i.e. call SetMetricFamilyInjectionHook before setting the HTTP handler.) The +// MetricFamily protobufs returned by the hook function are merged with the +// metrics collected in the usual way. +// +// This is a way to directly inject MetricFamily protobufs managed and owned by +// the caller. The caller has full responsibility. As no registration of the +// injected metrics has happened, there is no descriptor to check against, and +// there are no registration-time checks. If collect-time checks are disabled +// (see function EnableCollectChecks), no sanity checks are performed on the +// returned protobufs at all. If collect-checks are enabled, type and uniqueness +// checks are performed, but no further consistency checks (which would require +// knowledge of a metric descriptor). +// +// Sorting concerns: The caller is responsible for sorting the label pairs in +// each metric. However, the order of metrics will be sorted by the registry as +// it is required anyway after merging with the metric families collected +// conventionally. +// +// The function must be callable at any time and concurrently. +func SetMetricFamilyInjectionHook(hook func() []*dto.MetricFamily) { + defRegistry.metricFamilyInjectionHook = hook +} + +// PanicOnCollectError sets the behavior whether a panic is caused upon an error +// while metrics are collected and served to the HTTP endpoint. By default, an +// internal server error (status code 500) is served with an error message. +func PanicOnCollectError(b bool) { + defRegistry.panicOnCollectError = b +} + +// EnableCollectChecks enables (or disables) additional consistency checks +// during metrics collection. These additional checks are not enabled by default +// because they inflict a performance penalty and the errors they check for can +// only happen if the used Metric and Collector types have internal programming +// errors. It can be helpful to enable these checks while working with custom +// Collectors or Metrics whose correctness is not well established yet. +func EnableCollectChecks(b bool) { + defRegistry.collectChecksEnabled = b +} + +// encoder is a function that writes a dto.MetricFamily to an io.Writer in a +// certain encoding. It returns the number of bytes written and any error +// encountered. Note that pbutil.WriteDelimited and pbutil.MetricFamilyToText +// are encoders. +type encoder func(io.Writer, *dto.MetricFamily) (int, error) + +type registry struct { + mtx sync.RWMutex + collectorsByID map[uint64]Collector // ID is a hash of the descIDs. + descIDs map[uint64]struct{} + dimHashesByName map[string]uint64 + bufPool chan *bytes.Buffer + metricFamilyPool chan *dto.MetricFamily + metricPool chan *dto.Metric + metricFamilyInjectionHook func() []*dto.MetricFamily + + panicOnCollectError, collectChecksEnabled bool +} + +func (r *registry) Register(c Collector) (Collector, error) { + descChan := make(chan *Desc, capDescChan) + go func() { + c.Describe(descChan) + close(descChan) + }() + + newDescIDs := map[uint64]struct{}{} + newDimHashesByName := map[string]uint64{} + var collectorID uint64 // Just a sum of all desc IDs. + var duplicateDescErr error + + r.mtx.Lock() + defer r.mtx.Unlock() + // Coduct various tests... + for desc := range descChan { + + // Is the descriptor valid at all? + if desc.err != nil { + return c, fmt.Errorf("descriptor %s is invalid: %s", desc, desc.err) + } + + // Is the descID unique? + // (In other words: Is the fqName + constLabel combination unique?) + if _, exists := r.descIDs[desc.id]; exists { + duplicateDescErr = fmt.Errorf("descriptor %s already exists with the same fully-qualified name and const label values", desc) + } + // If it is not a duplicate desc in this collector, add it to + // the collectorID. (We allow duplicate descs within the same + // collector, but their existence must be a no-op.) + if _, exists := newDescIDs[desc.id]; !exists { + newDescIDs[desc.id] = struct{}{} + collectorID += desc.id + } + + // Are all the label names and the help string consistent with + // previous descriptors of the same name? + // First check existing descriptors... + if dimHash, exists := r.dimHashesByName[desc.fqName]; exists { + if dimHash != desc.dimHash { + return nil, fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc) + } + } else { + // ...then check the new descriptors already seen. + if dimHash, exists := newDimHashesByName[desc.fqName]; exists { + if dimHash != desc.dimHash { + return nil, fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc) + } + } else { + newDimHashesByName[desc.fqName] = desc.dimHash + } + } + } + // Did anything happen at all? + if len(newDescIDs) == 0 { + return nil, errors.New("collector has no descriptors") + } + if existing, exists := r.collectorsByID[collectorID]; exists { + return existing, errAlreadyReg + } + // If the collectorID is new, but at least one of the descs existed + // before, we are in trouble. + if duplicateDescErr != nil { + return nil, duplicateDescErr + } + + // Only after all tests have passed, actually register. + r.collectorsByID[collectorID] = c + for hash := range newDescIDs { + r.descIDs[hash] = struct{}{} + } + for name, dimHash := range newDimHashesByName { + r.dimHashesByName[name] = dimHash + } + return c, nil +} + +func (r *registry) RegisterOrGet(m Collector) (Collector, error) { + existing, err := r.Register(m) + if err != nil && err != errAlreadyReg { + return nil, err + } + return existing, nil +} + +func (r *registry) Unregister(c Collector) bool { + descChan := make(chan *Desc, capDescChan) + go func() { + c.Describe(descChan) + close(descChan) + }() + + descIDs := map[uint64]struct{}{} + var collectorID uint64 // Just a sum of the desc IDs. + for desc := range descChan { + if _, exists := descIDs[desc.id]; !exists { + collectorID += desc.id + descIDs[desc.id] = struct{}{} + } + } + + r.mtx.RLock() + if _, exists := r.collectorsByID[collectorID]; !exists { + r.mtx.RUnlock() + return false + } + r.mtx.RUnlock() + + r.mtx.Lock() + defer r.mtx.Unlock() + + delete(r.collectorsByID, collectorID) + for id := range descIDs { + delete(r.descIDs, id) + } + // dimHashesByName is left untouched as those must be consistent + // throughout the lifetime of a program. + return true +} + +func (r *registry) Push(job, instance, pushURL, method string) error { + if !strings.Contains(pushURL, "://") { + pushURL = "http://" + pushURL + } + pushURL = fmt.Sprintf("%s/metrics/jobs/%s", pushURL, url.QueryEscape(job)) + if instance != "" { + pushURL += "/instances/" + url.QueryEscape(instance) + } + buf := r.getBuf() + defer r.giveBuf(buf) + if err := r.writePB(expfmt.NewEncoder(buf, expfmt.FmtProtoDelim)); err != nil { + if r.panicOnCollectError { + panic(err) + } + return err + } + req, err := http.NewRequest(method, pushURL, buf) + if err != nil { + return err + } + req.Header.Set(contentTypeHeader, DelimitedTelemetryContentType) + resp, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode != 202 { + return fmt.Errorf("unexpected status code %d while pushing to %s", resp.StatusCode, pushURL) + } + return nil +} + +func (r *registry) ServeHTTP(w http.ResponseWriter, req *http.Request) { + contentType := expfmt.Negotiate(req.Header) + buf := r.getBuf() + defer r.giveBuf(buf) + writer, encoding := decorateWriter(req, buf) + if err := r.writePB(expfmt.NewEncoder(writer, contentType)); err != nil { + if r.panicOnCollectError { + panic(err) + } + http.Error(w, "An error has occurred:\n\n"+err.Error(), http.StatusInternalServerError) + return + } + if closer, ok := writer.(io.Closer); ok { + closer.Close() + } + header := w.Header() + header.Set(contentTypeHeader, string(contentType)) + header.Set(contentLengthHeader, fmt.Sprint(buf.Len())) + if encoding != "" { + header.Set(contentEncodingHeader, encoding) + } + w.Write(buf.Bytes()) +} + +func (r *registry) writePB(encoder expfmt.Encoder) error { + var metricHashes map[uint64]struct{} + if r.collectChecksEnabled { + metricHashes = make(map[uint64]struct{}) + } + metricChan := make(chan Metric, capMetricChan) + wg := sync.WaitGroup{} + + r.mtx.RLock() + metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName)) + + // Scatter. + // (Collectors could be complex and slow, so we call them all at once.) + wg.Add(len(r.collectorsByID)) + go func() { + wg.Wait() + close(metricChan) + }() + for _, collector := range r.collectorsByID { + go func(collector Collector) { + defer wg.Done() + collector.Collect(metricChan) + }(collector) + } + r.mtx.RUnlock() + + // Drain metricChan in case of premature return. + defer func() { + for _ = range metricChan { + } + }() + + // Gather. + for metric := range metricChan { + // This could be done concurrently, too, but it required locking + // of metricFamiliesByName (and of metricHashes if checks are + // enabled). Most likely not worth it. + desc := metric.Desc() + metricFamily, ok := metricFamiliesByName[desc.fqName] + if !ok { + metricFamily = r.getMetricFamily() + defer r.giveMetricFamily(metricFamily) + metricFamily.Name = proto.String(desc.fqName) + metricFamily.Help = proto.String(desc.help) + metricFamiliesByName[desc.fqName] = metricFamily + } + dtoMetric := r.getMetric() + defer r.giveMetric(dtoMetric) + if err := metric.Write(dtoMetric); err != nil { + // TODO: Consider different means of error reporting so + // that a single erroneous metric could be skipped + // instead of blowing up the whole collection. + return fmt.Errorf("error collecting metric %v: %s", desc, err) + } + switch { + case metricFamily.Type != nil: + // Type already set. We are good. + case dtoMetric.Gauge != nil: + metricFamily.Type = dto.MetricType_GAUGE.Enum() + case dtoMetric.Counter != nil: + metricFamily.Type = dto.MetricType_COUNTER.Enum() + case dtoMetric.Summary != nil: + metricFamily.Type = dto.MetricType_SUMMARY.Enum() + case dtoMetric.Untyped != nil: + metricFamily.Type = dto.MetricType_UNTYPED.Enum() + case dtoMetric.Histogram != nil: + metricFamily.Type = dto.MetricType_HISTOGRAM.Enum() + default: + return fmt.Errorf("empty metric collected: %s", dtoMetric) + } + if r.collectChecksEnabled { + if err := r.checkConsistency(metricFamily, dtoMetric, desc, metricHashes); err != nil { + return err + } + } + metricFamily.Metric = append(metricFamily.Metric, dtoMetric) + } + + if r.metricFamilyInjectionHook != nil { + for _, mf := range r.metricFamilyInjectionHook() { + existingMF, exists := metricFamiliesByName[mf.GetName()] + if !exists { + metricFamiliesByName[mf.GetName()] = mf + if r.collectChecksEnabled { + for _, m := range mf.Metric { + if err := r.checkConsistency(mf, m, nil, metricHashes); err != nil { + return err + } + } + } + continue + } + for _, m := range mf.Metric { + if r.collectChecksEnabled { + if err := r.checkConsistency(existingMF, m, nil, metricHashes); err != nil { + return err + } + } + existingMF.Metric = append(existingMF.Metric, m) + } + } + } + + // Now that MetricFamilies are all set, sort their Metrics + // lexicographically by their label values. + for _, mf := range metricFamiliesByName { + sort.Sort(metricSorter(mf.Metric)) + } + + // Write out MetricFamilies sorted by their name. + names := make([]string, 0, len(metricFamiliesByName)) + for name := range metricFamiliesByName { + names = append(names, name) + } + sort.Strings(names) + + for _, name := range names { + if err := encoder.Encode(metricFamiliesByName[name]); err != nil { + return err + } + } + return nil +} + +func (r *registry) checkConsistency(metricFamily *dto.MetricFamily, dtoMetric *dto.Metric, desc *Desc, metricHashes map[uint64]struct{}) error { + + // Type consistency with metric family. + if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil || + metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil || + metricFamily.GetType() == dto.MetricType_SUMMARY && dtoMetric.Summary == nil || + metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil || + metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil { + return fmt.Errorf( + "collected metric %s %s is not a %s", + metricFamily.GetName(), dtoMetric, metricFamily.GetType(), + ) + } + + // Is the metric unique (i.e. no other metric with the same name and the same label values)? + h := fnv.New64a() + var buf bytes.Buffer + buf.WriteString(metricFamily.GetName()) + buf.WriteByte(separatorByte) + h.Write(buf.Bytes()) + // Make sure label pairs are sorted. We depend on it for the consistency + // check. Label pairs must be sorted by contract. But the point of this + // method is to check for contract violations. So we better do the sort + // now. + sort.Sort(LabelPairSorter(dtoMetric.Label)) + for _, lp := range dtoMetric.Label { + buf.Reset() + buf.WriteString(lp.GetValue()) + buf.WriteByte(separatorByte) + h.Write(buf.Bytes()) + } + metricHash := h.Sum64() + if _, exists := metricHashes[metricHash]; exists { + return fmt.Errorf( + "collected metric %s %s was collected before with the same name and label values", + metricFamily.GetName(), dtoMetric, + ) + } + metricHashes[metricHash] = struct{}{} + + if desc == nil { + return nil // Nothing left to check if we have no desc. + } + + // Desc consistency with metric family. + if metricFamily.GetName() != desc.fqName { + return fmt.Errorf( + "collected metric %s %s has name %q but should have %q", + metricFamily.GetName(), dtoMetric, metricFamily.GetName(), desc.fqName, + ) + } + if metricFamily.GetHelp() != desc.help { + return fmt.Errorf( + "collected metric %s %s has help %q but should have %q", + metricFamily.GetName(), dtoMetric, metricFamily.GetHelp(), desc.help, + ) + } + + // Is the desc consistent with the content of the metric? + lpsFromDesc := make([]*dto.LabelPair, 0, len(dtoMetric.Label)) + lpsFromDesc = append(lpsFromDesc, desc.constLabelPairs...) + for _, l := range desc.variableLabels { + lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{ + Name: proto.String(l), + }) + } + if len(lpsFromDesc) != len(dtoMetric.Label) { + return fmt.Errorf( + "labels in collected metric %s %s are inconsistent with descriptor %s", + metricFamily.GetName(), dtoMetric, desc, + ) + } + sort.Sort(LabelPairSorter(lpsFromDesc)) + for i, lpFromDesc := range lpsFromDesc { + lpFromMetric := dtoMetric.Label[i] + if lpFromDesc.GetName() != lpFromMetric.GetName() || + lpFromDesc.Value != nil && lpFromDesc.GetValue() != lpFromMetric.GetValue() { + return fmt.Errorf( + "labels in collected metric %s %s are inconsistent with descriptor %s", + metricFamily.GetName(), dtoMetric, desc, + ) + } + } + + r.mtx.RLock() // Remaining checks need the read lock. + defer r.mtx.RUnlock() + + // Is the desc registered? + if _, exist := r.descIDs[desc.id]; !exist { + return fmt.Errorf( + "collected metric %s %s with unregistered descriptor %s", + metricFamily.GetName(), dtoMetric, desc, + ) + } + + return nil +} + +func (r *registry) getBuf() *bytes.Buffer { + select { + case buf := <-r.bufPool: + return buf + default: + return &bytes.Buffer{} + } +} + +func (r *registry) giveBuf(buf *bytes.Buffer) { + buf.Reset() + select { + case r.bufPool <- buf: + default: + } +} + +func (r *registry) getMetricFamily() *dto.MetricFamily { + select { + case mf := <-r.metricFamilyPool: + return mf + default: + return &dto.MetricFamily{} + } +} + +func (r *registry) giveMetricFamily(mf *dto.MetricFamily) { + mf.Reset() + select { + case r.metricFamilyPool <- mf: + default: + } +} + +func (r *registry) getMetric() *dto.Metric { + select { + case m := <-r.metricPool: + return m + default: + return &dto.Metric{} + } +} + +func (r *registry) giveMetric(m *dto.Metric) { + m.Reset() + select { + case r.metricPool <- m: + default: + } +} + +func newRegistry() *registry { + return ®istry{ + collectorsByID: map[uint64]Collector{}, + descIDs: map[uint64]struct{}{}, + dimHashesByName: map[string]uint64{}, + bufPool: make(chan *bytes.Buffer, numBufs), + metricFamilyPool: make(chan *dto.MetricFamily, numMetricFamilies), + metricPool: make(chan *dto.Metric, numMetrics), + } +} + +func newDefaultRegistry() *registry { + r := newRegistry() + r.Register(NewProcessCollector(os.Getpid(), "")) + r.Register(NewGoCollector()) + return r +} + +// decorateWriter wraps a writer to handle gzip compression if requested. It +// returns the decorated writer and the appropriate "Content-Encoding" header +// (which is empty if no compression is enabled). +func decorateWriter(request *http.Request, writer io.Writer) (io.Writer, string) { + header := request.Header.Get(acceptEncodingHeader) + parts := strings.Split(header, ",") + for _, part := range parts { + part := strings.TrimSpace(part) + if part == "gzip" || strings.HasPrefix(part, "gzip;") { + return gzip.NewWriter(writer), "gzip" + } + } + return writer, "" +} + +type metricSorter []*dto.Metric + +func (s metricSorter) Len() int { + return len(s) +} + +func (s metricSorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s metricSorter) Less(i, j int) bool { + if len(s[i].Label) != len(s[j].Label) { + // This should not happen. The metrics are + // inconsistent. However, we have to deal with the fact, as + // people might use custom collectors or metric family injection + // to create inconsistent metrics. So let's simply compare the + // number of labels in this case. That will still yield + // reproducible sorting. + return len(s[i].Label) < len(s[j].Label) + } + for n, lp := range s[i].Label { + vi := lp.GetValue() + vj := s[j].Label[n].GetValue() + if vi != vj { + return vi < vj + } + } + return true +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go new file mode 100644 index 00000000..fe81e004 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go @@ -0,0 +1,540 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "hash/fnv" + "math" + "sort" + "sync" + "time" + + "github.com/beorn7/perks/quantile" + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" +) + +// quantileLabel is used for the label that defines the quantile in a +// summary. +const quantileLabel = "quantile" + +// A Summary captures individual observations from an event or sample stream and +// summarizes them in a manner similar to traditional summary statistics: 1. sum +// of observations, 2. observation count, 3. rank estimations. +// +// A typical use-case is the observation of request latencies. By default, a +// Summary provides the median, the 90th and the 99th percentile of the latency +// as rank estimations. +// +// Note that the rank estimations cannot be aggregated in a meaningful way with +// the Prometheus query language (i.e. you cannot average or add them). If you +// need aggregatable quantiles (e.g. you want the 99th percentile latency of all +// queries served across all instances of a service), consider the Histogram +// metric type. See the Prometheus documentation for more details. +// +// To create Summary instances, use NewSummary. +type Summary interface { + Metric + Collector + + // Observe adds a single observation to the summary. + Observe(float64) +} + +var ( + // DefObjectives are the default Summary quantile values. + DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001} + + errQuantileLabelNotAllowed = fmt.Errorf( + "%q is not allowed as label name in summaries", quantileLabel, + ) +) + +// Default values for SummaryOpts. +const ( + // DefMaxAge is the default duration for which observations stay + // relevant. + DefMaxAge time.Duration = 10 * time.Minute + // DefAgeBuckets is the default number of buckets used to calculate the + // age of observations. + DefAgeBuckets = 5 + // DefBufCap is the standard buffer size for collecting Summary observations. + DefBufCap = 500 +) + +// SummaryOpts bundles the options for creating a Summary metric. It is +// mandatory to set Name and Help to a non-empty string. All other fields are +// optional and can safely be left at their zero value. +type SummaryOpts struct { + // Namespace, Subsystem, and Name are components of the fully-qualified + // name of the Summary (created by joining these components with + // "_"). Only Name is mandatory, the others merely help structuring the + // name. Note that the fully-qualified name of the Summary must be a + // valid Prometheus metric name. + Namespace string + Subsystem string + Name string + + // Help provides information about this Summary. Mandatory! + // + // Metrics with the same fully-qualified name must have the same Help + // string. + Help string + + // ConstLabels are used to attach fixed labels to this + // Summary. Summaries with the same fully-qualified name must have the + // same label names in their ConstLabels. + // + // Note that in most cases, labels have a value that varies during the + // lifetime of a process. Those labels are usually managed with a + // SummaryVec. ConstLabels serve only special purposes. One is for the + // special case where the value of a label does not change during the + // lifetime of a process, e.g. if the revision of the running binary is + // put into a label. Another, more advanced purpose is if more than one + // Collector needs to collect Summaries with the same fully-qualified + // name. In that case, those Summaries must differ in the values of + // their ConstLabels. See the Collector examples. + // + // If the value of a label never changes (not even between binaries), + // that label most likely should not be a label at all (but part of the + // metric name). + ConstLabels Labels + + // Objectives defines the quantile rank estimates with their respective + // absolute error. If Objectives[q] = e, then the value reported + // for q will be the φ-quantile value for some φ between q-e and q+e. + // The default value is DefObjectives. + Objectives map[float64]float64 + + // MaxAge defines the duration for which an observation stays relevant + // for the summary. Must be positive. The default value is DefMaxAge. + MaxAge time.Duration + + // AgeBuckets is the number of buckets used to exclude observations that + // are older than MaxAge from the summary. A higher number has a + // resource penalty, so only increase it if the higher resolution is + // really required. For very high observation rates, you might want to + // reduce the number of age buckets. With only one age bucket, you will + // effectively see a complete reset of the summary each time MaxAge has + // passed. The default value is DefAgeBuckets. + AgeBuckets uint32 + + // BufCap defines the default sample stream buffer size. The default + // value of DefBufCap should suffice for most uses. If there is a need + // to increase the value, a multiple of 500 is recommended (because that + // is the internal buffer size of the underlying package + // "github.com/bmizerany/perks/quantile"). + BufCap uint32 +} + +// TODO: Great fuck-up with the sliding-window decay algorithm... The Merge +// method of perk/quantile is actually not working as advertised - and it might +// be unfixable, as the underlying algorithm is apparently not capable of +// merging summaries in the first place. To avoid using Merge, we are currently +// adding observations to _each_ age bucket, i.e. the effort to add a sample is +// essentially multiplied by the number of age buckets. When rotating age +// buckets, we empty the previous head stream. On scrape time, we simply take +// the quantiles from the head stream (no merging required). Result: More effort +// on observation time, less effort on scrape time, which is exactly the +// opposite of what we try to accomplish, but at least the results are correct. +// +// The quite elegant previous contraption to merge the age buckets efficiently +// on scrape time (see code up commit 6b9530d72ea715f0ba612c0120e6e09fbf1d49d0) +// can't be used anymore. + +// NewSummary creates a new Summary based on the provided SummaryOpts. +func NewSummary(opts SummaryOpts) Summary { + return newSummary( + NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), + opts, + ) +} + +func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { + if len(desc.variableLabels) != len(labelValues) { + panic(errInconsistentCardinality) + } + + for _, n := range desc.variableLabels { + if n == quantileLabel { + panic(errQuantileLabelNotAllowed) + } + } + for _, lp := range desc.constLabelPairs { + if lp.GetName() == quantileLabel { + panic(errQuantileLabelNotAllowed) + } + } + + if len(opts.Objectives) == 0 { + opts.Objectives = DefObjectives + } + + if opts.MaxAge < 0 { + panic(fmt.Errorf("illegal max age MaxAge=%v", opts.MaxAge)) + } + if opts.MaxAge == 0 { + opts.MaxAge = DefMaxAge + } + + if opts.AgeBuckets == 0 { + opts.AgeBuckets = DefAgeBuckets + } + + if opts.BufCap == 0 { + opts.BufCap = DefBufCap + } + + s := &summary{ + desc: desc, + + objectives: opts.Objectives, + sortedObjectives: make([]float64, 0, len(opts.Objectives)), + + labelPairs: makeLabelPairs(desc, labelValues), + + hotBuf: make([]float64, 0, opts.BufCap), + coldBuf: make([]float64, 0, opts.BufCap), + streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets), + } + s.headStreamExpTime = time.Now().Add(s.streamDuration) + s.hotBufExpTime = s.headStreamExpTime + + for i := uint32(0); i < opts.AgeBuckets; i++ { + s.streams = append(s.streams, s.newStream()) + } + s.headStream = s.streams[0] + + for qu := range s.objectives { + s.sortedObjectives = append(s.sortedObjectives, qu) + } + sort.Float64s(s.sortedObjectives) + + s.Init(s) // Init self-collection. + return s +} + +type summary struct { + SelfCollector + + bufMtx sync.Mutex // Protects hotBuf and hotBufExpTime. + mtx sync.Mutex // Protects every other moving part. + // Lock bufMtx before mtx if both are needed. + + desc *Desc + + objectives map[float64]float64 + sortedObjectives []float64 + + labelPairs []*dto.LabelPair + + sum float64 + cnt uint64 + + hotBuf, coldBuf []float64 + + streams []*quantile.Stream + streamDuration time.Duration + headStream *quantile.Stream + headStreamIdx int + headStreamExpTime, hotBufExpTime time.Time +} + +func (s *summary) Desc() *Desc { + return s.desc +} + +func (s *summary) Observe(v float64) { + s.bufMtx.Lock() + defer s.bufMtx.Unlock() + + now := time.Now() + if now.After(s.hotBufExpTime) { + s.asyncFlush(now) + } + s.hotBuf = append(s.hotBuf, v) + if len(s.hotBuf) == cap(s.hotBuf) { + s.asyncFlush(now) + } +} + +func (s *summary) Write(out *dto.Metric) error { + sum := &dto.Summary{} + qs := make([]*dto.Quantile, 0, len(s.objectives)) + + s.bufMtx.Lock() + s.mtx.Lock() + // Swap bufs even if hotBuf is empty to set new hotBufExpTime. + s.swapBufs(time.Now()) + s.bufMtx.Unlock() + + s.flushColdBuf() + sum.SampleCount = proto.Uint64(s.cnt) + sum.SampleSum = proto.Float64(s.sum) + + for _, rank := range s.sortedObjectives { + var q float64 + if s.headStream.Count() == 0 { + q = math.NaN() + } else { + q = s.headStream.Query(rank) + } + qs = append(qs, &dto.Quantile{ + Quantile: proto.Float64(rank), + Value: proto.Float64(q), + }) + } + + s.mtx.Unlock() + + if len(qs) > 0 { + sort.Sort(quantSort(qs)) + } + sum.Quantile = qs + + out.Summary = sum + out.Label = s.labelPairs + return nil +} + +func (s *summary) newStream() *quantile.Stream { + return quantile.NewTargeted(s.objectives) +} + +// asyncFlush needs bufMtx locked. +func (s *summary) asyncFlush(now time.Time) { + s.mtx.Lock() + s.swapBufs(now) + + // Unblock the original goroutine that was responsible for the mutation + // that triggered the compaction. But hold onto the global non-buffer + // state mutex until the operation finishes. + go func() { + s.flushColdBuf() + s.mtx.Unlock() + }() +} + +// rotateStreams needs mtx AND bufMtx locked. +func (s *summary) maybeRotateStreams() { + for !s.hotBufExpTime.Equal(s.headStreamExpTime) { + s.headStream.Reset() + s.headStreamIdx++ + if s.headStreamIdx >= len(s.streams) { + s.headStreamIdx = 0 + } + s.headStream = s.streams[s.headStreamIdx] + s.headStreamExpTime = s.headStreamExpTime.Add(s.streamDuration) + } +} + +// flushColdBuf needs mtx locked. +func (s *summary) flushColdBuf() { + for _, v := range s.coldBuf { + for _, stream := range s.streams { + stream.Insert(v) + } + s.cnt++ + s.sum += v + } + s.coldBuf = s.coldBuf[0:0] + s.maybeRotateStreams() +} + +// swapBufs needs mtx AND bufMtx locked, coldBuf must be empty. +func (s *summary) swapBufs(now time.Time) { + if len(s.coldBuf) != 0 { + panic("coldBuf is not empty") + } + s.hotBuf, s.coldBuf = s.coldBuf, s.hotBuf + // hotBuf is now empty and gets new expiration set. + for now.After(s.hotBufExpTime) { + s.hotBufExpTime = s.hotBufExpTime.Add(s.streamDuration) + } +} + +type quantSort []*dto.Quantile + +func (s quantSort) Len() int { + return len(s) +} + +func (s quantSort) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s quantSort) Less(i, j int) bool { + return s[i].GetQuantile() < s[j].GetQuantile() +} + +// SummaryVec is a Collector that bundles a set of Summaries that all share the +// same Desc, but have different values for their variable labels. This is used +// if you want to count the same thing partitioned by various dimensions +// (e.g. HTTP request latencies, partitioned by status code and method). Create +// instances with NewSummaryVec. +type SummaryVec struct { + MetricVec +} + +// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and +// partitioned by the given label names. At least one label name must be +// provided. +func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &SummaryVec{ + MetricVec: MetricVec{ + children: map[uint64]Metric{}, + desc: desc, + hash: fnv.New64a(), + newMetric: func(lvs ...string) Metric { + return newSummary(desc, opts, lvs...) + }, + }, + } +} + +// GetMetricWithLabelValues replaces the method of the same name in +// MetricVec. The difference is that this method returns a Summary and not a +// Metric so that no type conversion is required. +func (m *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Summary, error) { + metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Summary), err + } + return nil, err +} + +// GetMetricWith replaces the method of the same name in MetricVec. The +// difference is that this method returns a Summary and not a Metric so that no +// type conversion is required. +func (m *SummaryVec) GetMetricWith(labels Labels) (Summary, error) { + metric, err := m.MetricVec.GetMetricWith(labels) + if metric != nil { + return metric.(Summary), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. By not returning an +// error, WithLabelValues allows shortcuts like +// myVec.WithLabelValues("404", "GET").Observe(42.21) +func (m *SummaryVec) WithLabelValues(lvs ...string) Summary { + return m.MetricVec.WithLabelValues(lvs...).(Summary) +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. By not returning an error, With allows shortcuts like +// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21) +func (m *SummaryVec) With(labels Labels) Summary { + return m.MetricVec.With(labels).(Summary) +} + +type constSummary struct { + desc *Desc + count uint64 + sum float64 + quantiles map[float64]float64 + labelPairs []*dto.LabelPair +} + +func (s *constSummary) Desc() *Desc { + return s.desc +} + +func (s *constSummary) Write(out *dto.Metric) error { + sum := &dto.Summary{} + qs := make([]*dto.Quantile, 0, len(s.quantiles)) + + sum.SampleCount = proto.Uint64(s.count) + sum.SampleSum = proto.Float64(s.sum) + + for rank, q := range s.quantiles { + qs = append(qs, &dto.Quantile{ + Quantile: proto.Float64(rank), + Value: proto.Float64(q), + }) + } + + if len(qs) > 0 { + sort.Sort(quantSort(qs)) + } + sum.Quantile = qs + + out.Summary = sum + out.Label = s.labelPairs + + return nil +} + +// NewConstSummary returns a metric representing a Prometheus summary with fixed +// values for the count, sum, and quantiles. As those parameters cannot be +// changed, the returned value does not implement the Summary interface (but +// only the Metric interface). Users of this package will not have much use for +// it in regular operations. However, when implementing custom Collectors, it is +// useful as a throw-away metric that is generated on the fly to send it to +// Prometheus in the Collect method. +// +// quantiles maps ranks to quantile values. For example, a median latency of +// 0.23s and a 99th percentile latency of 0.56s would be expressed as: +// map[float64]float64{0.5: 0.23, 0.99: 0.56} +// +// NewConstSummary returns an error if the length of labelValues is not +// consistent with the variable labels in Desc. +func NewConstSummary( + desc *Desc, + count uint64, + sum float64, + quantiles map[float64]float64, + labelValues ...string, +) (Metric, error) { + if len(desc.variableLabels) != len(labelValues) { + return nil, errInconsistentCardinality + } + return &constSummary{ + desc: desc, + count: count, + sum: sum, + quantiles: quantiles, + labelPairs: makeLabelPairs(desc, labelValues), + }, nil +} + +// MustNewConstSummary is a version of NewConstSummary that panics where +// NewConstMetric would have returned an error. +func MustNewConstSummary( + desc *Desc, + count uint64, + sum float64, + quantiles map[float64]float64, + labelValues ...string, +) Metric { + m, err := NewConstSummary(desc, count, sum, quantiles, labelValues...) + if err != nil { + panic(err) + } + return m +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/untyped.go b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go new file mode 100644 index 00000000..c65ab1c5 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go @@ -0,0 +1,145 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import "hash/fnv" + +// Untyped is a Metric that represents a single numerical value that can +// arbitrarily go up and down. +// +// An Untyped metric works the same as a Gauge. The only difference is that to +// no type information is implied. +// +// To create Untyped instances, use NewUntyped. +type Untyped interface { + Metric + Collector + + // Set sets the Untyped metric to an arbitrary value. + Set(float64) + // Inc increments the Untyped metric by 1. + Inc() + // Dec decrements the Untyped metric by 1. + Dec() + // Add adds the given value to the Untyped metric. (The value can be + // negative, resulting in a decrease.) + Add(float64) + // Sub subtracts the given value from the Untyped metric. (The value can + // be negative, resulting in an increase.) + Sub(float64) +} + +// UntypedOpts is an alias for Opts. See there for doc comments. +type UntypedOpts Opts + +// NewUntyped creates a new Untyped metric from the provided UntypedOpts. +func NewUntyped(opts UntypedOpts) Untyped { + return newValue(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), UntypedValue, 0) +} + +// UntypedVec is a Collector that bundles a set of Untyped metrics that all +// share the same Desc, but have different values for their variable +// labels. This is used if you want to count the same thing partitioned by +// various dimensions. Create instances with NewUntypedVec. +type UntypedVec struct { + MetricVec +} + +// NewUntypedVec creates a new UntypedVec based on the provided UntypedOpts and +// partitioned by the given label names. At least one label name must be +// provided. +func NewUntypedVec(opts UntypedOpts, labelNames []string) *UntypedVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &UntypedVec{ + MetricVec: MetricVec{ + children: map[uint64]Metric{}, + desc: desc, + hash: fnv.New64a(), + newMetric: func(lvs ...string) Metric { + return newValue(desc, UntypedValue, 0, lvs...) + }, + }, + } +} + +// GetMetricWithLabelValues replaces the method of the same name in +// MetricVec. The difference is that this method returns an Untyped and not a +// Metric so that no type conversion is required. +func (m *UntypedVec) GetMetricWithLabelValues(lvs ...string) (Untyped, error) { + metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Untyped), err + } + return nil, err +} + +// GetMetricWith replaces the method of the same name in MetricVec. The +// difference is that this method returns an Untyped and not a Metric so that no +// type conversion is required. +func (m *UntypedVec) GetMetricWith(labels Labels) (Untyped, error) { + metric, err := m.MetricVec.GetMetricWith(labels) + if metric != nil { + return metric.(Untyped), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. By not returning an +// error, WithLabelValues allows shortcuts like +// myVec.WithLabelValues("404", "GET").Add(42) +func (m *UntypedVec) WithLabelValues(lvs ...string) Untyped { + return m.MetricVec.WithLabelValues(lvs...).(Untyped) +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. By not returning an error, With allows shortcuts like +// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) +func (m *UntypedVec) With(labels Labels) Untyped { + return m.MetricVec.With(labels).(Untyped) +} + +// UntypedFunc is an Untyped whose value is determined at collect time by +// calling a provided function. +// +// To create UntypedFunc instances, use NewUntypedFunc. +type UntypedFunc interface { + Metric + Collector +} + +// NewUntypedFunc creates a new UntypedFunc based on the provided +// UntypedOpts. The value reported is determined by calling the given function +// from within the Write method. Take into account that metric collection may +// happen concurrently. If that results in concurrent calls to Write, like in +// the case where an UntypedFunc is directly registered with Prometheus, the +// provided function must be concurrency-safe. +func NewUntypedFunc(opts UntypedOpts, function func() float64) UntypedFunc { + return newValueFunc(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), UntypedValue, function) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go new file mode 100644 index 00000000..b54ac11e --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/value.go @@ -0,0 +1,234 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "errors" + "fmt" + "math" + "sort" + "sync/atomic" + + dto "github.com/prometheus/client_model/go" + + "github.com/golang/protobuf/proto" +) + +// ValueType is an enumeration of metric types that represent a simple value. +type ValueType int + +// Possible values for the ValueType enum. +const ( + _ ValueType = iota + CounterValue + GaugeValue + UntypedValue +) + +var errInconsistentCardinality = errors.New("inconsistent label cardinality") + +// value is a generic metric for simple values. It implements Metric, Collector, +// Counter, Gauge, and Untyped. Its effective type is determined by +// ValueType. This is a low-level building block used by the library to back the +// implementations of Counter, Gauge, and Untyped. +type value struct { + // valBits containst the bits of the represented float64 value. It has + // to go first in the struct to guarantee alignment for atomic + // operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG + valBits uint64 + + SelfCollector + + desc *Desc + valType ValueType + labelPairs []*dto.LabelPair +} + +// newValue returns a newly allocated value with the given Desc, ValueType, +// sample value and label values. It panics if the number of label +// values is different from the number of variable labels in Desc. +func newValue(desc *Desc, valueType ValueType, val float64, labelValues ...string) *value { + if len(labelValues) != len(desc.variableLabels) { + panic(errInconsistentCardinality) + } + result := &value{ + desc: desc, + valType: valueType, + valBits: math.Float64bits(val), + labelPairs: makeLabelPairs(desc, labelValues), + } + result.Init(result) + return result +} + +func (v *value) Desc() *Desc { + return v.desc +} + +func (v *value) Set(val float64) { + atomic.StoreUint64(&v.valBits, math.Float64bits(val)) +} + +func (v *value) Inc() { + v.Add(1) +} + +func (v *value) Dec() { + v.Add(-1) +} + +func (v *value) Add(val float64) { + for { + oldBits := atomic.LoadUint64(&v.valBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + val) + if atomic.CompareAndSwapUint64(&v.valBits, oldBits, newBits) { + return + } + } +} + +func (v *value) Sub(val float64) { + v.Add(val * -1) +} + +func (v *value) Write(out *dto.Metric) error { + val := math.Float64frombits(atomic.LoadUint64(&v.valBits)) + return populateMetric(v.valType, val, v.labelPairs, out) +} + +// valueFunc is a generic metric for simple values retrieved on collect time +// from a function. It implements Metric and Collector. Its effective type is +// determined by ValueType. This is a low-level building block used by the +// library to back the implementations of CounterFunc, GaugeFunc, and +// UntypedFunc. +type valueFunc struct { + SelfCollector + + desc *Desc + valType ValueType + function func() float64 + labelPairs []*dto.LabelPair +} + +// newValueFunc returns a newly allocated valueFunc with the given Desc and +// ValueType. The value reported is determined by calling the given function +// from within the Write method. Take into account that metric collection may +// happen concurrently. If that results in concurrent calls to Write, like in +// the case where a valueFunc is directly registered with Prometheus, the +// provided function must be concurrency-safe. +func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *valueFunc { + result := &valueFunc{ + desc: desc, + valType: valueType, + function: function, + labelPairs: makeLabelPairs(desc, nil), + } + result.Init(result) + return result +} + +func (v *valueFunc) Desc() *Desc { + return v.desc +} + +func (v *valueFunc) Write(out *dto.Metric) error { + return populateMetric(v.valType, v.function(), v.labelPairs, out) +} + +// NewConstMetric returns a metric with one fixed value that cannot be +// changed. Users of this package will not have much use for it in regular +// operations. However, when implementing custom Collectors, it is useful as a +// throw-away metric that is generated on the fly to send it to Prometheus in +// the Collect method. NewConstMetric returns an error if the length of +// labelValues is not consistent with the variable labels in Desc. +func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) { + if len(desc.variableLabels) != len(labelValues) { + return nil, errInconsistentCardinality + } + return &constMetric{ + desc: desc, + valType: valueType, + val: value, + labelPairs: makeLabelPairs(desc, labelValues), + }, nil +} + +// MustNewConstMetric is a version of NewConstMetric that panics where +// NewConstMetric would have returned an error. +func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) Metric { + m, err := NewConstMetric(desc, valueType, value, labelValues...) + if err != nil { + panic(err) + } + return m +} + +type constMetric struct { + desc *Desc + valType ValueType + val float64 + labelPairs []*dto.LabelPair +} + +func (m *constMetric) Desc() *Desc { + return m.desc +} + +func (m *constMetric) Write(out *dto.Metric) error { + return populateMetric(m.valType, m.val, m.labelPairs, out) +} + +func populateMetric( + t ValueType, + v float64, + labelPairs []*dto.LabelPair, + m *dto.Metric, +) error { + m.Label = labelPairs + switch t { + case CounterValue: + m.Counter = &dto.Counter{Value: proto.Float64(v)} + case GaugeValue: + m.Gauge = &dto.Gauge{Value: proto.Float64(v)} + case UntypedValue: + m.Untyped = &dto.Untyped{Value: proto.Float64(v)} + default: + return fmt.Errorf("encountered unknown type %v", t) + } + return nil +} + +func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair { + totalLen := len(desc.variableLabels) + len(desc.constLabelPairs) + if totalLen == 0 { + // Super fast path. + return nil + } + if len(desc.variableLabels) == 0 { + // Moderately fast path. + return desc.constLabelPairs + } + labelPairs := make([]*dto.LabelPair, 0, totalLen) + for i, n := range desc.variableLabels { + labelPairs = append(labelPairs, &dto.LabelPair{ + Name: proto.String(n), + Value: proto.String(labelValues[i]), + }) + } + for _, lp := range desc.constLabelPairs { + labelPairs = append(labelPairs, lp) + } + sort.Sort(LabelPairSorter(labelPairs)) + return labelPairs +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go new file mode 100644 index 00000000..a1f3bdf3 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go @@ -0,0 +1,247 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "bytes" + "fmt" + "hash" + "sync" +) + +// MetricVec is a Collector to bundle metrics of the same name that +// differ in their label values. MetricVec is usually not used directly but as a +// building block for implementations of vectors of a given metric +// type. GaugeVec, CounterVec, SummaryVec, and UntypedVec are examples already +// provided in this package. +type MetricVec struct { + mtx sync.RWMutex // Protects not only children, but also hash and buf. + children map[uint64]Metric + desc *Desc + + // hash is our own hash instance to avoid repeated allocations. + hash hash.Hash64 + // buf is used to copy string contents into it for hashing, + // again to avoid allocations. + buf bytes.Buffer + + newMetric func(labelValues ...string) Metric +} + +// Describe implements Collector. The length of the returned slice +// is always one. +func (m *MetricVec) Describe(ch chan<- *Desc) { + ch <- m.desc +} + +// Collect implements Collector. +func (m *MetricVec) Collect(ch chan<- Metric) { + m.mtx.RLock() + defer m.mtx.RUnlock() + + for _, metric := range m.children { + ch <- metric + } +} + +// GetMetricWithLabelValues returns the Metric for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new Metric is created. +// +// It is possible to call this method without using the returned Metric to only +// create the new Metric but leave it at its start value (e.g. a Summary or +// Histogram without any observations). See also the SummaryVec example. +// +// Keeping the Metric for later use is possible (and should be considered if +// performance is critical), but keep in mind that Reset, DeleteLabelValues and +// Delete can be used to delete the Metric from the MetricVec. In that case, the +// Metric will still exist, but it will not be exported anymore, even if a +// Metric with the same label values is created later. See also the CounterVec +// example. +// +// An error is returned if the number of label values is not the same as the +// number of VariableLabels in Desc. +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as +// an alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the GaugeVec example. +func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) { + m.mtx.Lock() + defer m.mtx.Unlock() + + h, err := m.hashLabelValues(lvs) + if err != nil { + return nil, err + } + return m.getOrCreateMetric(h, lvs...), nil +} + +// GetMetricWith returns the Metric for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new Metric is created. Implications of +// creating a Metric without using it and keeping the Metric for later use are +// the same as for GetMetricWithLabelValues. +// +// An error is returned if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc. +// +// This method is used for the same purpose as +// GetMetricWithLabelValues(...string). See there for pros and cons of the two +// methods. +func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) { + m.mtx.Lock() + defer m.mtx.Unlock() + + h, err := m.hashLabels(labels) + if err != nil { + return nil, err + } + lvs := make([]string, len(labels)) + for i, label := range m.desc.variableLabels { + lvs[i] = labels[label] + } + return m.getOrCreateMetric(h, lvs...), nil +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics if an error +// occurs. The method allows neat syntax like: +// httpReqs.WithLabelValues("404", "POST").Inc() +func (m *MetricVec) WithLabelValues(lvs ...string) Metric { + metric, err := m.GetMetricWithLabelValues(lvs...) + if err != nil { + panic(err) + } + return metric +} + +// With works as GetMetricWith, but panics if an error occurs. The method allows +// neat syntax like: +// httpReqs.With(Labels{"status":"404", "method":"POST"}).Inc() +func (m *MetricVec) With(labels Labels) Metric { + metric, err := m.GetMetricWith(labels) + if err != nil { + panic(err) + } + return metric +} + +// DeleteLabelValues removes the metric where the variable labels are the same +// as those passed in as labels (same order as the VariableLabels in Desc). It +// returns true if a metric was deleted. +// +// It is not an error if the number of label values is not the same as the +// number of VariableLabels in Desc. However, such inconsistent label count can +// never match an actual Metric, so the method will always return false in that +// case. +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider Delete(Labels) as an +// alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the CounterVec example. +func (m *MetricVec) DeleteLabelValues(lvs ...string) bool { + m.mtx.Lock() + defer m.mtx.Unlock() + + h, err := m.hashLabelValues(lvs) + if err != nil { + return false + } + if _, has := m.children[h]; !has { + return false + } + delete(m.children, h) + return true +} + +// Delete deletes the metric where the variable labels are the same as those +// passed in as labels. It returns true if a metric was deleted. +// +// It is not an error if the number and names of the Labels are inconsistent +// with those of the VariableLabels in the Desc of the MetricVec. However, such +// inconsistent Labels can never match an actual Metric, so the method will +// always return false in that case. +// +// This method is used for the same purpose as DeleteLabelValues(...string). See +// there for pros and cons of the two methods. +func (m *MetricVec) Delete(labels Labels) bool { + m.mtx.Lock() + defer m.mtx.Unlock() + + h, err := m.hashLabels(labels) + if err != nil { + return false + } + if _, has := m.children[h]; !has { + return false + } + delete(m.children, h) + return true +} + +// Reset deletes all metrics in this vector. +func (m *MetricVec) Reset() { + m.mtx.Lock() + defer m.mtx.Unlock() + + for h := range m.children { + delete(m.children, h) + } +} + +func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) { + if len(vals) != len(m.desc.variableLabels) { + return 0, errInconsistentCardinality + } + m.hash.Reset() + for _, val := range vals { + m.buf.Reset() + m.buf.WriteString(val) + m.hash.Write(m.buf.Bytes()) + } + return m.hash.Sum64(), nil +} + +func (m *MetricVec) hashLabels(labels Labels) (uint64, error) { + if len(labels) != len(m.desc.variableLabels) { + return 0, errInconsistentCardinality + } + m.hash.Reset() + for _, label := range m.desc.variableLabels { + val, ok := labels[label] + if !ok { + return 0, fmt.Errorf("label name %q missing in label map", label) + } + m.buf.Reset() + m.buf.WriteString(val) + m.hash.Write(m.buf.Bytes()) + } + return m.hash.Sum64(), nil +} + +func (m *MetricVec) getOrCreateMetric(hash uint64, labelValues ...string) Metric { + metric, ok := m.children[hash] + if !ok { + // Copy labelValues. Otherwise, they would be allocated even if we don't go + // down this code path. + copiedLabelValues := append(make([]string, 0, len(labelValues)), labelValues...) + metric = m.newMetric(copiedLabelValues...) + m.children[hash] = metric + } + return metric +} diff --git a/vendor/github.com/prometheus/client_model/LICENSE b/vendor/github.com/prometheus/client_model/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/github.com/prometheus/client_model/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/prometheus/client_model/NOTICE b/vendor/github.com/prometheus/client_model/NOTICE new file mode 100644 index 00000000..20110e41 --- /dev/null +++ b/vendor/github.com/prometheus/client_model/NOTICE @@ -0,0 +1,5 @@ +Data model artifacts for Prometheus. +Copyright 2012-2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go new file mode 100644 index 00000000..b065f868 --- /dev/null +++ b/vendor/github.com/prometheus/client_model/go/metrics.pb.go @@ -0,0 +1,364 @@ +// Code generated by protoc-gen-go. +// source: metrics.proto +// DO NOT EDIT! + +/* +Package io_prometheus_client is a generated protocol buffer package. + +It is generated from these files: + metrics.proto + +It has these top-level messages: + LabelPair + Gauge + Counter + Quantile + Summary + Untyped + Histogram + Bucket + Metric + MetricFamily +*/ +package io_prometheus_client + +import proto "github.com/golang/protobuf/proto" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = math.Inf + +type MetricType int32 + +const ( + MetricType_COUNTER MetricType = 0 + MetricType_GAUGE MetricType = 1 + MetricType_SUMMARY MetricType = 2 + MetricType_UNTYPED MetricType = 3 + MetricType_HISTOGRAM MetricType = 4 +) + +var MetricType_name = map[int32]string{ + 0: "COUNTER", + 1: "GAUGE", + 2: "SUMMARY", + 3: "UNTYPED", + 4: "HISTOGRAM", +} +var MetricType_value = map[string]int32{ + "COUNTER": 0, + "GAUGE": 1, + "SUMMARY": 2, + "UNTYPED": 3, + "HISTOGRAM": 4, +} + +func (x MetricType) Enum() *MetricType { + p := new(MetricType) + *p = x + return p +} +func (x MetricType) String() string { + return proto.EnumName(MetricType_name, int32(x)) +} +func (x *MetricType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MetricType_value, data, "MetricType") + if err != nil { + return err + } + *x = MetricType(value) + return nil +} + +type LabelPair struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *LabelPair) Reset() { *m = LabelPair{} } +func (m *LabelPair) String() string { return proto.CompactTextString(m) } +func (*LabelPair) ProtoMessage() {} + +func (m *LabelPair) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *LabelPair) GetValue() string { + if m != nil && m.Value != nil { + return *m.Value + } + return "" +} + +type Gauge struct { + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Gauge) Reset() { *m = Gauge{} } +func (m *Gauge) String() string { return proto.CompactTextString(m) } +func (*Gauge) ProtoMessage() {} + +func (m *Gauge) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type Counter struct { + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Counter) Reset() { *m = Counter{} } +func (m *Counter) String() string { return proto.CompactTextString(m) } +func (*Counter) ProtoMessage() {} + +func (m *Counter) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type Quantile struct { + Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"` + Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Quantile) Reset() { *m = Quantile{} } +func (m *Quantile) String() string { return proto.CompactTextString(m) } +func (*Quantile) ProtoMessage() {} + +func (m *Quantile) GetQuantile() float64 { + if m != nil && m.Quantile != nil { + return *m.Quantile + } + return 0 +} + +func (m *Quantile) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type Summary struct { + SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count" json:"sample_count,omitempty"` + SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum" json:"sample_sum,omitempty"` + Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Summary) Reset() { *m = Summary{} } +func (m *Summary) String() string { return proto.CompactTextString(m) } +func (*Summary) ProtoMessage() {} + +func (m *Summary) GetSampleCount() uint64 { + if m != nil && m.SampleCount != nil { + return *m.SampleCount + } + return 0 +} + +func (m *Summary) GetSampleSum() float64 { + if m != nil && m.SampleSum != nil { + return *m.SampleSum + } + return 0 +} + +func (m *Summary) GetQuantile() []*Quantile { + if m != nil { + return m.Quantile + } + return nil +} + +type Untyped struct { + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Untyped) Reset() { *m = Untyped{} } +func (m *Untyped) String() string { return proto.CompactTextString(m) } +func (*Untyped) ProtoMessage() {} + +func (m *Untyped) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type Histogram struct { + SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count" json:"sample_count,omitempty"` + SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum" json:"sample_sum,omitempty"` + Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Histogram) Reset() { *m = Histogram{} } +func (m *Histogram) String() string { return proto.CompactTextString(m) } +func (*Histogram) ProtoMessage() {} + +func (m *Histogram) GetSampleCount() uint64 { + if m != nil && m.SampleCount != nil { + return *m.SampleCount + } + return 0 +} + +func (m *Histogram) GetSampleSum() float64 { + if m != nil && m.SampleSum != nil { + return *m.SampleSum + } + return 0 +} + +func (m *Histogram) GetBucket() []*Bucket { + if m != nil { + return m.Bucket + } + return nil +} + +type Bucket struct { + CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count" json:"cumulative_count,omitempty"` + UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound" json:"upper_bound,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Bucket) Reset() { *m = Bucket{} } +func (m *Bucket) String() string { return proto.CompactTextString(m) } +func (*Bucket) ProtoMessage() {} + +func (m *Bucket) GetCumulativeCount() uint64 { + if m != nil && m.CumulativeCount != nil { + return *m.CumulativeCount + } + return 0 +} + +func (m *Bucket) GetUpperBound() float64 { + if m != nil && m.UpperBound != nil { + return *m.UpperBound + } + return 0 +} + +type Metric struct { + Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` + Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"` + Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"` + Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"` + Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"` + Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"` + TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms" json:"timestamp_ms,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Metric) Reset() { *m = Metric{} } +func (m *Metric) String() string { return proto.CompactTextString(m) } +func (*Metric) ProtoMessage() {} + +func (m *Metric) GetLabel() []*LabelPair { + if m != nil { + return m.Label + } + return nil +} + +func (m *Metric) GetGauge() *Gauge { + if m != nil { + return m.Gauge + } + return nil +} + +func (m *Metric) GetCounter() *Counter { + if m != nil { + return m.Counter + } + return nil +} + +func (m *Metric) GetSummary() *Summary { + if m != nil { + return m.Summary + } + return nil +} + +func (m *Metric) GetUntyped() *Untyped { + if m != nil { + return m.Untyped + } + return nil +} + +func (m *Metric) GetHistogram() *Histogram { + if m != nil { + return m.Histogram + } + return nil +} + +func (m *Metric) GetTimestampMs() int64 { + if m != nil && m.TimestampMs != nil { + return *m.TimestampMs + } + return 0 +} + +type MetricFamily struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"` + Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"` + Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MetricFamily) Reset() { *m = MetricFamily{} } +func (m *MetricFamily) String() string { return proto.CompactTextString(m) } +func (*MetricFamily) ProtoMessage() {} + +func (m *MetricFamily) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MetricFamily) GetHelp() string { + if m != nil && m.Help != nil { + return *m.Help + } + return "" +} + +func (m *MetricFamily) GetType() MetricType { + if m != nil && m.Type != nil { + return *m.Type + } + return MetricType_COUNTER +} + +func (m *MetricFamily) GetMetric() []*Metric { + if m != nil { + return m.Metric + } + return nil +} + +func init() { + proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value) +} diff --git a/vendor/github.com/prometheus/common/LICENSE b/vendor/github.com/prometheus/common/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/github.com/prometheus/common/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/prometheus/common/NOTICE b/vendor/github.com/prometheus/common/NOTICE new file mode 100644 index 00000000..636a2c1a --- /dev/null +++ b/vendor/github.com/prometheus/common/NOTICE @@ -0,0 +1,5 @@ +Common libraries shared by Prometheus Go components. +Copyright 2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go new file mode 100644 index 00000000..b72c9bed --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/decode.go @@ -0,0 +1,411 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "fmt" + "io" + "math" + "mime" + "net/http" + + dto "github.com/prometheus/client_model/go" + + "github.com/matttproud/golang_protobuf_extensions/pbutil" + "github.com/prometheus/common/model" +) + +// Decoder types decode an input stream into metric families. +type Decoder interface { + Decode(*dto.MetricFamily) error +} + +type DecodeOptions struct { + // Timestamp is added to each value from the stream that has no explicit timestamp set. + Timestamp model.Time +} + +// ResponseFormat extracts the correct format from a HTTP response header. +// If no matching format can be found FormatUnknown is returned. +func ResponseFormat(h http.Header) Format { + ct := h.Get(hdrContentType) + + mediatype, params, err := mime.ParseMediaType(ct) + if err != nil { + return FmtUnknown + } + + const ( + textType = "text/plain" + jsonType = "application/json" + ) + + switch mediatype { + case ProtoType: + if p, ok := params["proto"]; ok && p != ProtoProtocol { + return FmtUnknown + } + if e, ok := params["encoding"]; ok && e != "delimited" { + return FmtUnknown + } + return FmtProtoDelim + + case textType: + if v, ok := params["version"]; ok && v != TextVersion { + return FmtUnknown + } + return FmtText + + case jsonType: + var prometheusAPIVersion string + + if params["schema"] == "prometheus/telemetry" && params["version"] != "" { + prometheusAPIVersion = params["version"] + } else { + prometheusAPIVersion = h.Get("X-Prometheus-API-Version") + } + + switch prometheusAPIVersion { + case "0.0.2", "": + return fmtJSON2 + default: + return FmtUnknown + } + } + + return FmtUnknown +} + +// NewDecoder returns a new decoder based on the given input format. +// If the input format does not imply otherwise, a text format decoder is returned. +func NewDecoder(r io.Reader, format Format) Decoder { + switch format { + case FmtProtoDelim: + return &protoDecoder{r: r} + case fmtJSON2: + return newJSON2Decoder(r) + } + return &textDecoder{r: r} +} + +// protoDecoder implements the Decoder interface for protocol buffers. +type protoDecoder struct { + r io.Reader +} + +// Decode implements the Decoder interface. +func (d *protoDecoder) Decode(v *dto.MetricFamily) error { + _, err := pbutil.ReadDelimited(d.r, v) + return err +} + +// textDecoder implements the Decoder interface for the text protcol. +type textDecoder struct { + r io.Reader + p TextParser + fams []*dto.MetricFamily +} + +// Decode implements the Decoder interface. +func (d *textDecoder) Decode(v *dto.MetricFamily) error { + // TODO(fabxc): Wrap this as a line reader to make streaming safer. + if len(d.fams) == 0 { + // No cached metric families, read everything and parse metrics. + fams, err := d.p.TextToMetricFamilies(d.r) + if err != nil { + return err + } + if len(fams) == 0 { + return io.EOF + } + d.fams = make([]*dto.MetricFamily, 0, len(fams)) + for _, f := range fams { + d.fams = append(d.fams, f) + } + } + + *v = *d.fams[0] + d.fams = d.fams[1:] + + return nil +} + +type SampleDecoder struct { + Dec Decoder + Opts *DecodeOptions + + f dto.MetricFamily +} + +func (sd *SampleDecoder) Decode(s *model.Vector) error { + if err := sd.Dec.Decode(&sd.f); err != nil { + return err + } + *s = extractSamples(&sd.f, sd.Opts) + return nil +} + +// Extract samples builds a slice of samples from the provided metric families. +func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) model.Vector { + var all model.Vector + for _, f := range fams { + all = append(all, extractSamples(f, o)...) + } + return all +} + +func extractSamples(f *dto.MetricFamily, o *DecodeOptions) model.Vector { + switch f.GetType() { + case dto.MetricType_COUNTER: + return extractCounter(o, f) + case dto.MetricType_GAUGE: + return extractGauge(o, f) + case dto.MetricType_SUMMARY: + return extractSummary(o, f) + case dto.MetricType_UNTYPED: + return extractUntyped(o, f) + case dto.MetricType_HISTOGRAM: + return extractHistogram(o, f) + } + panic("expfmt.extractSamples: unknown metric family type") +} + +func extractCounter(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Counter == nil { + continue + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) + + smpl := &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Counter.GetValue()), + } + + if m.TimestampMs != nil { + smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } else { + smpl.Timestamp = o.Timestamp + } + + samples = append(samples, smpl) + } + + return samples +} + +func extractGauge(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Gauge == nil { + continue + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) + + smpl := &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Gauge.GetValue()), + } + + if m.TimestampMs != nil { + smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } else { + smpl.Timestamp = o.Timestamp + } + + samples = append(samples, smpl) + } + + return samples +} + +func extractUntyped(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Untyped == nil { + continue + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) + + smpl := &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Untyped.GetValue()), + } + + if m.TimestampMs != nil { + smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } else { + smpl.Timestamp = o.Timestamp + } + + samples = append(samples, smpl) + } + + return samples +} + +func extractSummary(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Summary == nil { + continue + } + + timestamp := o.Timestamp + if m.TimestampMs != nil { + timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } + + for _, q := range m.Summary.Quantile { + lset := make(model.LabelSet, len(m.Label)+2) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + // BUG(matt): Update other names to "quantile". + lset[model.LabelName(model.QuantileLabel)] = model.LabelValue(fmt.Sprint(q.GetQuantile())) + lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(q.GetValue()), + Timestamp: timestamp, + }) + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum") + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Summary.GetSampleSum()), + Timestamp: timestamp, + }) + + lset = make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count") + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Summary.GetSampleCount()), + Timestamp: timestamp, + }) + } + + return samples +} + +func extractHistogram(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Histogram == nil { + continue + } + + timestamp := o.Timestamp + if m.TimestampMs != nil { + timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } + + infSeen := false + + for _, q := range m.Histogram.Bucket { + lset := make(model.LabelSet, len(m.Label)+2) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.LabelName(model.BucketLabel)] = model.LabelValue(fmt.Sprint(q.GetUpperBound())) + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket") + + if math.IsInf(q.GetUpperBound(), +1) { + infSeen = true + } + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(q.GetCumulativeCount()), + Timestamp: timestamp, + }) + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum") + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Histogram.GetSampleSum()), + Timestamp: timestamp, + }) + + lset = make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count") + + count := &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Histogram.GetSampleCount()), + Timestamp: timestamp, + } + samples = append(samples, count) + + if !infSeen { + // Append an infinity bucket sample. + lset := make(model.LabelSet, len(m.Label)+2) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.LabelName(model.BucketLabel)] = model.LabelValue("+Inf") + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket") + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: count.Value, + Timestamp: timestamp, + }) + } + } + + return samples +} diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go new file mode 100644 index 00000000..392ca90e --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/encode.go @@ -0,0 +1,88 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "fmt" + "io" + "net/http" + + "bitbucket.org/ww/goautoneg" + "github.com/golang/protobuf/proto" + "github.com/matttproud/golang_protobuf_extensions/pbutil" + + dto "github.com/prometheus/client_model/go" +) + +// Encoder types encode metric families into an underlying wire protocol. +type Encoder interface { + Encode(*dto.MetricFamily) error +} + +type encoder func(*dto.MetricFamily) error + +func (e encoder) Encode(v *dto.MetricFamily) error { + return e(v) +} + +// Negotiate returns the Content-Type based on the given Accept header. +// If no appropriate accepted type is found, FmtText is returned. +func Negotiate(h http.Header) Format { + for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) { + // Check for protocol buffer + if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { + switch ac.Params["encoding"] { + case "delimited": + return FmtProtoDelim + case "text": + return FmtProtoText + case "compact-text": + return FmtProtoCompact + } + } + // Check for text format. + ver := ac.Params["version"] + if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { + return FmtText + } + } + return FmtText +} + +// NewEncoder returns a new encoder based on content type negotiation. +func NewEncoder(w io.Writer, format Format) Encoder { + switch format { + case FmtProtoDelim: + return encoder(func(v *dto.MetricFamily) error { + _, err := pbutil.WriteDelimited(w, v) + return err + }) + case FmtProtoCompact: + return encoder(func(v *dto.MetricFamily) error { + _, err := fmt.Fprintln(w, v.String()) + return err + }) + case FmtProtoText: + return encoder(func(v *dto.MetricFamily) error { + _, err := fmt.Fprintln(w, proto.MarshalTextString(v)) + return err + }) + case FmtText: + return encoder(func(v *dto.MetricFamily) error { + _, err := MetricFamilyToText(w, v) + return err + }) + } + panic("expfmt.NewEncoder: unknown format") +} diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go new file mode 100644 index 00000000..366fbde9 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go @@ -0,0 +1,40 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// A package for reading and writing Prometheus metrics. +package expfmt + +type Format string + +const ( + TextVersion = "0.0.4" + + ProtoType = `application/vnd.google.protobuf` + ProtoProtocol = `io.prometheus.client.MetricFamily` + ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" + + // The Content-Type values for the different wire protocols. + FmtUnknown Format = `` + FmtText Format = `text/plain; version=` + TextVersion + FmtProtoDelim Format = ProtoFmt + ` encoding=delimited` + FmtProtoText Format = ProtoFmt + ` encoding=text` + FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` + + // fmtJSON2 is hidden as it is deprecated. + fmtJSON2 Format = `application/json; version=0.0.2` +) + +const ( + hdrContentType = "Content-Type" + hdrAccept = "Accept" +) diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz.go b/vendor/github.com/prometheus/common/expfmt/fuzz.go new file mode 100644 index 00000000..14f92014 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/fuzz.go @@ -0,0 +1,36 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Build only when actually fuzzing +// +build gofuzz + +package expfmt + +import "bytes" + +// Fuzz text metric parser with with github.com/dvyukov/go-fuzz: +// +// go-fuzz-build github.com/prometheus/client_golang/text +// go-fuzz -bin text-fuzz.zip -workdir fuzz +// +// Further input samples should go in the folder fuzz/corpus. +func Fuzz(in []byte) int { + parser := TextParser{} + _, err := parser.TextToMetricFamilies(bytes.NewReader(in)) + + if err != nil { + return 0 + } + + return 1 +} diff --git a/vendor/github.com/prometheus/common/expfmt/json_decode.go b/vendor/github.com/prometheus/common/expfmt/json_decode.go new file mode 100644 index 00000000..67e3a0d4 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/json_decode.go @@ -0,0 +1,162 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "encoding/json" + "fmt" + "io" + "sort" + + "github.com/golang/protobuf/proto" + dto "github.com/prometheus/client_model/go" + + "github.com/prometheus/common/model" +) + +type json2Decoder struct { + dec *json.Decoder + fams []*dto.MetricFamily +} + +func newJSON2Decoder(r io.Reader) Decoder { + return &json2Decoder{ + dec: json.NewDecoder(r), + } +} + +type histogram002 struct { + Labels model.LabelSet `json:"labels"` + Values map[string]float64 `json:"value"` +} + +type counter002 struct { + Labels model.LabelSet `json:"labels"` + Value float64 `json:"value"` +} + +func protoLabelSet(base, ext model.LabelSet) []*dto.LabelPair { + labels := base.Clone().Merge(ext) + delete(labels, model.MetricNameLabel) + + names := make([]string, 0, len(labels)) + for ln := range labels { + names = append(names, string(ln)) + } + sort.Strings(names) + + pairs := make([]*dto.LabelPair, 0, len(labels)) + + for _, ln := range names { + lv := labels[model.LabelName(ln)] + + pairs = append(pairs, &dto.LabelPair{ + Name: proto.String(ln), + Value: proto.String(string(lv)), + }) + } + + return pairs +} + +func (d *json2Decoder) more() error { + var entities []struct { + BaseLabels model.LabelSet `json:"baseLabels"` + Docstring string `json:"docstring"` + Metric struct { + Type string `json:"type"` + Values json.RawMessage `json:"value"` + } `json:"metric"` + } + + if err := d.dec.Decode(&entities); err != nil { + return err + } + for _, e := range entities { + f := &dto.MetricFamily{ + Name: proto.String(string(e.BaseLabels[model.MetricNameLabel])), + Help: proto.String(e.Docstring), + Type: dto.MetricType_UNTYPED.Enum(), + Metric: []*dto.Metric{}, + } + + d.fams = append(d.fams, f) + + switch e.Metric.Type { + case "counter", "gauge": + var values []counter002 + + if err := json.Unmarshal(e.Metric.Values, &values); err != nil { + return fmt.Errorf("could not extract %s value: %s", e.Metric.Type, err) + } + + for _, ctr := range values { + f.Metric = append(f.Metric, &dto.Metric{ + Label: protoLabelSet(e.BaseLabels, ctr.Labels), + Untyped: &dto.Untyped{ + Value: proto.Float64(ctr.Value), + }, + }) + } + + case "histogram": + var values []histogram002 + + if err := json.Unmarshal(e.Metric.Values, &values); err != nil { + return fmt.Errorf("could not extract %s value: %s", e.Metric.Type, err) + } + + for _, hist := range values { + quants := make([]string, 0, len(values)) + for q := range hist.Values { + quants = append(quants, q) + } + + sort.Strings(quants) + + for _, q := range quants { + value := hist.Values[q] + // The correct label is "quantile" but to not break old expressions + // this remains "percentile" + hist.Labels["percentile"] = model.LabelValue(q) + + f.Metric = append(f.Metric, &dto.Metric{ + Label: protoLabelSet(e.BaseLabels, hist.Labels), + Untyped: &dto.Untyped{ + Value: proto.Float64(value), + }, + }) + } + } + + default: + return fmt.Errorf("unknown metric type %q", e.Metric.Type) + } + } + return nil +} + +// Decode implements the Decoder interface. +func (d *json2Decoder) Decode(v *dto.MetricFamily) error { + if len(d.fams) == 0 { + if err := d.more(); err != nil { + return err + } + } + + *v = *d.fams[0] + d.fams = d.fams[1:] + + return nil +} diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go new file mode 100644 index 00000000..0bb9c14c --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/text_create.go @@ -0,0 +1,305 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "bytes" + "fmt" + "io" + "math" + "strings" + + dto "github.com/prometheus/client_model/go" + "github.com/prometheus/common/model" +) + +// MetricFamilyToText converts a MetricFamily proto message into text format and +// writes the resulting lines to 'out'. It returns the number of bytes written +// and any error encountered. This function does not perform checks on the +// content of the metric and label names, i.e. invalid metric or label names +// will result in invalid text format output. +// This method fulfills the type 'prometheus.encoder'. +func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) { + var written int + + // Fail-fast checks. + if len(in.Metric) == 0 { + return written, fmt.Errorf("MetricFamily has no metrics: %s", in) + } + name := in.GetName() + if name == "" { + return written, fmt.Errorf("MetricFamily has no name: %s", in) + } + + // Comments, first HELP, then TYPE. + if in.Help != nil { + n, err := fmt.Fprintf( + out, "# HELP %s %s\n", + name, escapeString(*in.Help, false), + ) + written += n + if err != nil { + return written, err + } + } + metricType := in.GetType() + n, err := fmt.Fprintf( + out, "# TYPE %s %s\n", + name, strings.ToLower(metricType.String()), + ) + written += n + if err != nil { + return written, err + } + + // Finally the samples, one line for each. + for _, metric := range in.Metric { + switch metricType { + case dto.MetricType_COUNTER: + if metric.Counter == nil { + return written, fmt.Errorf( + "expected counter in metric %s %s", name, metric, + ) + } + n, err = writeSample( + name, metric, "", "", + metric.Counter.GetValue(), + out, + ) + case dto.MetricType_GAUGE: + if metric.Gauge == nil { + return written, fmt.Errorf( + "expected gauge in metric %s %s", name, metric, + ) + } + n, err = writeSample( + name, metric, "", "", + metric.Gauge.GetValue(), + out, + ) + case dto.MetricType_UNTYPED: + if metric.Untyped == nil { + return written, fmt.Errorf( + "expected untyped in metric %s %s", name, metric, + ) + } + n, err = writeSample( + name, metric, "", "", + metric.Untyped.GetValue(), + out, + ) + case dto.MetricType_SUMMARY: + if metric.Summary == nil { + return written, fmt.Errorf( + "expected summary in metric %s %s", name, metric, + ) + } + for _, q := range metric.Summary.Quantile { + n, err = writeSample( + name, metric, + model.QuantileLabel, fmt.Sprint(q.GetQuantile()), + q.GetValue(), + out, + ) + written += n + if err != nil { + return written, err + } + } + n, err = writeSample( + name+"_sum", metric, "", "", + metric.Summary.GetSampleSum(), + out, + ) + if err != nil { + return written, err + } + written += n + n, err = writeSample( + name+"_count", metric, "", "", + float64(metric.Summary.GetSampleCount()), + out, + ) + case dto.MetricType_HISTOGRAM: + if metric.Histogram == nil { + return written, fmt.Errorf( + "expected histogram in metric %s %s", name, metric, + ) + } + infSeen := false + for _, q := range metric.Histogram.Bucket { + n, err = writeSample( + name+"_bucket", metric, + model.BucketLabel, fmt.Sprint(q.GetUpperBound()), + float64(q.GetCumulativeCount()), + out, + ) + written += n + if err != nil { + return written, err + } + if math.IsInf(q.GetUpperBound(), +1) { + infSeen = true + } + } + if !infSeen { + n, err = writeSample( + name+"_bucket", metric, + model.BucketLabel, "+Inf", + float64(metric.Histogram.GetSampleCount()), + out, + ) + if err != nil { + return written, err + } + written += n + } + n, err = writeSample( + name+"_sum", metric, "", "", + metric.Histogram.GetSampleSum(), + out, + ) + if err != nil { + return written, err + } + written += n + n, err = writeSample( + name+"_count", metric, "", "", + float64(metric.Histogram.GetSampleCount()), + out, + ) + default: + return written, fmt.Errorf( + "unexpected type in metric %s %s", name, metric, + ) + } + written += n + if err != nil { + return written, err + } + } + return written, nil +} + +// writeSample writes a single sample in text format to out, given the metric +// name, the metric proto message itself, optionally an additional label name +// and value (use empty strings if not required), and the value. The function +// returns the number of bytes written and any error encountered. +func writeSample( + name string, + metric *dto.Metric, + additionalLabelName, additionalLabelValue string, + value float64, + out io.Writer, +) (int, error) { + var written int + n, err := fmt.Fprint(out, name) + written += n + if err != nil { + return written, err + } + n, err = labelPairsToText( + metric.Label, + additionalLabelName, additionalLabelValue, + out, + ) + written += n + if err != nil { + return written, err + } + n, err = fmt.Fprintf(out, " %v", value) + written += n + if err != nil { + return written, err + } + if metric.TimestampMs != nil { + n, err = fmt.Fprintf(out, " %v", *metric.TimestampMs) + written += n + if err != nil { + return written, err + } + } + n, err = out.Write([]byte{'\n'}) + written += n + if err != nil { + return written, err + } + return written, nil +} + +// labelPairsToText converts a slice of LabelPair proto messages plus the +// explicitly given additional label pair into text formatted as required by the +// text format and writes it to 'out'. An empty slice in combination with an +// empty string 'additionalLabelName' results in nothing being +// written. Otherwise, the label pairs are written, escaped as required by the +// text format, and enclosed in '{...}'. The function returns the number of +// bytes written and any error encountered. +func labelPairsToText( + in []*dto.LabelPair, + additionalLabelName, additionalLabelValue string, + out io.Writer, +) (int, error) { + if len(in) == 0 && additionalLabelName == "" { + return 0, nil + } + var written int + separator := '{' + for _, lp := range in { + n, err := fmt.Fprintf( + out, `%c%s="%s"`, + separator, lp.GetName(), escapeString(lp.GetValue(), true), + ) + written += n + if err != nil { + return written, err + } + separator = ',' + } + if additionalLabelName != "" { + n, err := fmt.Fprintf( + out, `%c%s="%s"`, + separator, additionalLabelName, + escapeString(additionalLabelValue, true), + ) + written += n + if err != nil { + return written, err + } + } + n, err := out.Write([]byte{'}'}) + written += n + if err != nil { + return written, err + } + return written, nil +} + +// escapeString replaces '\' by '\\', new line character by '\n', and - if +// includeDoubleQuote is true - '"' by '\"'. +func escapeString(v string, includeDoubleQuote bool) string { + result := bytes.NewBuffer(make([]byte, 0, len(v))) + for _, c := range v { + switch { + case c == '\\': + result.WriteString(`\\`) + case includeDoubleQuote && c == '"': + result.WriteString(`\"`) + case c == '\n': + result.WriteString(`\n`) + default: + result.WriteRune(c) + } + } + return result.String() +} diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go new file mode 100644 index 00000000..84433bc4 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -0,0 +1,746 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "bufio" + "bytes" + "fmt" + "io" + "math" + "strconv" + "strings" + + dto "github.com/prometheus/client_model/go" + + "github.com/golang/protobuf/proto" + "github.com/prometheus/common/model" +) + +// A stateFn is a function that represents a state in a state machine. By +// executing it, the state is progressed to the next state. The stateFn returns +// another stateFn, which represents the new state. The end state is represented +// by nil. +type stateFn func() stateFn + +// ParseError signals errors while parsing the simple and flat text-based +// exchange format. +type ParseError struct { + Line int + Msg string +} + +// Error implements the error interface. +func (e ParseError) Error() string { + return fmt.Sprintf("text format parsing error in line %d: %s", e.Line, e.Msg) +} + +// TextParser is used to parse the simple and flat text-based exchange format. Its +// nil value is ready to use. +type TextParser struct { + metricFamiliesByName map[string]*dto.MetricFamily + buf *bufio.Reader // Where the parsed input is read through. + err error // Most recent error. + lineCount int // Tracks the line count for error messages. + currentByte byte // The most recent byte read. + currentToken bytes.Buffer // Re-used each time a token has to be gathered from multiple bytes. + currentMF *dto.MetricFamily + currentMetric *dto.Metric + currentLabelPair *dto.LabelPair + + // The remaining member variables are only used for summaries/histograms. + currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le' + // Summary specific. + summaries map[uint64]*dto.Metric // Key is created with LabelsToSignature. + currentQuantile float64 + // Histogram specific. + histograms map[uint64]*dto.Metric // Key is created with LabelsToSignature. + currentBucket float64 + // These tell us if the currently processed line ends on '_count' or + // '_sum' respectively and belong to a summary/histogram, representing the sample + // count and sum of that summary/histogram. + currentIsSummaryCount, currentIsSummarySum bool + currentIsHistogramCount, currentIsHistogramSum bool +} + +// TextToMetricFamilies reads 'in' as the simple and flat text-based exchange +// format and creates MetricFamily proto messages. It returns the MetricFamily +// proto messages in a map where the metric names are the keys, along with any +// error encountered. +// +// If the input contains duplicate metrics (i.e. lines with the same metric name +// and exactly the same label set), the resulting MetricFamily will contain +// duplicate Metric proto messages. Similar is true for duplicate label +// names. Checks for duplicates have to be performed separately, if required. +// Also note that neither the metrics within each MetricFamily are sorted nor +// the label pairs within each Metric. Sorting is not required for the most +// frequent use of this method, which is sample ingestion in the Prometheus +// server. However, for presentation purposes, you might want to sort the +// metrics, and in some cases, you must sort the labels, e.g. for consumption by +// the metric family injection hook of the Prometheus registry. +// +// Summaries and histograms are rather special beasts. You would probably not +// use them in the simple text format anyway. This method can deal with +// summaries and histograms if they are presented in exactly the way the +// text.Create function creates them. +// +// This method must not be called concurrently. If you want to parse different +// input concurrently, instantiate a separate Parser for each goroutine. +func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricFamily, error) { + p.reset(in) + for nextState := p.startOfLine; nextState != nil; nextState = nextState() { + // Magic happens here... + } + // Get rid of empty metric families. + for k, mf := range p.metricFamiliesByName { + if len(mf.GetMetric()) == 0 { + delete(p.metricFamiliesByName, k) + } + } + return p.metricFamiliesByName, p.err +} + +func (p *TextParser) reset(in io.Reader) { + p.metricFamiliesByName = map[string]*dto.MetricFamily{} + if p.buf == nil { + p.buf = bufio.NewReader(in) + } else { + p.buf.Reset(in) + } + p.err = nil + p.lineCount = 0 + if p.summaries == nil || len(p.summaries) > 0 { + p.summaries = map[uint64]*dto.Metric{} + } + if p.histograms == nil || len(p.histograms) > 0 { + p.histograms = map[uint64]*dto.Metric{} + } + p.currentQuantile = math.NaN() + p.currentBucket = math.NaN() +} + +// startOfLine represents the state where the next byte read from p.buf is the +// start of a line (or whitespace leading up to it). +func (p *TextParser) startOfLine() stateFn { + p.lineCount++ + if p.skipBlankTab(); p.err != nil { + // End of input reached. This is the only case where + // that is not an error but a signal that we are done. + p.err = nil + return nil + } + switch p.currentByte { + case '#': + return p.startComment + case '\n': + return p.startOfLine // Empty line, start the next one. + } + return p.readingMetricName +} + +// startComment represents the state where the next byte read from p.buf is the +// start of a comment (or whitespace leading up to it). +func (p *TextParser) startComment() stateFn { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte == '\n' { + return p.startOfLine + } + if p.readTokenUntilWhitespace(); p.err != nil { + return nil // Unexpected end of input. + } + // If we have hit the end of line already, there is nothing left + // to do. This is not considered a syntax error. + if p.currentByte == '\n' { + return p.startOfLine + } + keyword := p.currentToken.String() + if keyword != "HELP" && keyword != "TYPE" { + // Generic comment, ignore by fast forwarding to end of line. + for p.currentByte != '\n' { + if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil { + return nil // Unexpected end of input. + } + } + return p.startOfLine + } + // There is something. Next has to be a metric name. + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.readTokenAsMetricName(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte == '\n' { + // At the end of the line already. + // Again, this is not considered a syntax error. + return p.startOfLine + } + if !isBlankOrTab(p.currentByte) { + p.parseError("invalid metric name in comment") + return nil + } + p.setOrCreateCurrentMF() + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte == '\n' { + // At the end of the line already. + // Again, this is not considered a syntax error. + return p.startOfLine + } + switch keyword { + case "HELP": + return p.readingHelp + case "TYPE": + return p.readingType + } + panic(fmt.Sprintf("code error: unexpected keyword %q", keyword)) +} + +// readingMetricName represents the state where the last byte read (now in +// p.currentByte) is the first byte of a metric name. +func (p *TextParser) readingMetricName() stateFn { + if p.readTokenAsMetricName(); p.err != nil { + return nil + } + if p.currentToken.Len() == 0 { + p.parseError("invalid metric name") + return nil + } + p.setOrCreateCurrentMF() + // Now is the time to fix the type if it hasn't happened yet. + if p.currentMF.Type == nil { + p.currentMF.Type = dto.MetricType_UNTYPED.Enum() + } + p.currentMetric = &dto.Metric{} + // Do not append the newly created currentMetric to + // currentMF.Metric right now. First wait if this is a summary, + // and the metric exists already, which we can only know after + // having read all the labels. + if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + return p.readingLabels +} + +// readingLabels represents the state where the last byte read (now in +// p.currentByte) is either the first byte of the label set (i.e. a '{'), or the +// first byte of the value (otherwise). +func (p *TextParser) readingLabels() stateFn { + // Summaries/histograms are special. We have to reset the + // currentLabels map, currentQuantile and currentBucket before starting to + // read labels. + if p.currentMF.GetType() == dto.MetricType_SUMMARY || p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + p.currentLabels = map[string]string{} + p.currentLabels[string(model.MetricNameLabel)] = p.currentMF.GetName() + p.currentQuantile = math.NaN() + p.currentBucket = math.NaN() + } + if p.currentByte != '{' { + return p.readingValue + } + return p.startLabelName +} + +// startLabelName represents the state where the next byte read from p.buf is +// the start of a label name (or whitespace leading up to it). +func (p *TextParser) startLabelName() stateFn { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte == '}' { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + return p.readingValue + } + if p.readTokenAsLabelName(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentToken.Len() == 0 { + p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName())) + return nil + } + p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())} + if p.currentLabelPair.GetName() == string(model.MetricNameLabel) { + p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel)) + return nil + } + // Special summary/histogram treatment. Don't add 'quantile' and 'le' + // labels to 'real' labels. + if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) && + !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) { + p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair) + } + if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte != '=' { + p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte)) + return nil + } + return p.startLabelValue +} + +// startLabelValue represents the state where the next byte read from p.buf is +// the start of a (quoted) label value (or whitespace leading up to it). +func (p *TextParser) startLabelValue() stateFn { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte != '"' { + p.parseError(fmt.Sprintf("expected '\"' at start of label value, found %q", p.currentByte)) + return nil + } + if p.readTokenAsLabelValue(); p.err != nil { + return nil + } + p.currentLabelPair.Value = proto.String(p.currentToken.String()) + // Special treatment of summaries: + // - Quantile labels are special, will result in dto.Quantile later. + // - Other labels have to be added to currentLabels for signature calculation. + if p.currentMF.GetType() == dto.MetricType_SUMMARY { + if p.currentLabelPair.GetName() == model.QuantileLabel { + if p.currentQuantile, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil { + // Create a more helpful error message. + p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue())) + return nil + } + } else { + p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue() + } + } + // Similar special treatment of histograms. + if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + if p.currentLabelPair.GetName() == model.BucketLabel { + if p.currentBucket, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil { + // Create a more helpful error message. + p.parseError(fmt.Sprintf("expected float as value for 'le' label, got %q", p.currentLabelPair.GetValue())) + return nil + } + } else { + p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue() + } + } + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + switch p.currentByte { + case ',': + return p.startLabelName + + case '}': + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + return p.readingValue + default: + p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.Value)) + return nil + } +} + +// readingValue represents the state where the last byte read (now in +// p.currentByte) is the first byte of the sample value (i.e. a float). +func (p *TextParser) readingValue() stateFn { + // When we are here, we have read all the labels, so for the + // special case of a summary/histogram, we can finally find out + // if the metric already exists. + if p.currentMF.GetType() == dto.MetricType_SUMMARY { + signature := model.LabelsToSignature(p.currentLabels) + if summary := p.summaries[signature]; summary != nil { + p.currentMetric = summary + } else { + p.summaries[signature] = p.currentMetric + p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) + } + } else if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + signature := model.LabelsToSignature(p.currentLabels) + if histogram := p.histograms[signature]; histogram != nil { + p.currentMetric = histogram + } else { + p.histograms[signature] = p.currentMetric + p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) + } + } else { + p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) + } + if p.readTokenUntilWhitespace(); p.err != nil { + return nil // Unexpected end of input. + } + value, err := strconv.ParseFloat(p.currentToken.String(), 64) + if err != nil { + // Create a more helpful error message. + p.parseError(fmt.Sprintf("expected float as value, got %q", p.currentToken.String())) + return nil + } + switch p.currentMF.GetType() { + case dto.MetricType_COUNTER: + p.currentMetric.Counter = &dto.Counter{Value: proto.Float64(value)} + case dto.MetricType_GAUGE: + p.currentMetric.Gauge = &dto.Gauge{Value: proto.Float64(value)} + case dto.MetricType_UNTYPED: + p.currentMetric.Untyped = &dto.Untyped{Value: proto.Float64(value)} + case dto.MetricType_SUMMARY: + // *sigh* + if p.currentMetric.Summary == nil { + p.currentMetric.Summary = &dto.Summary{} + } + switch { + case p.currentIsSummaryCount: + p.currentMetric.Summary.SampleCount = proto.Uint64(uint64(value)) + case p.currentIsSummarySum: + p.currentMetric.Summary.SampleSum = proto.Float64(value) + case !math.IsNaN(p.currentQuantile): + p.currentMetric.Summary.Quantile = append( + p.currentMetric.Summary.Quantile, + &dto.Quantile{ + Quantile: proto.Float64(p.currentQuantile), + Value: proto.Float64(value), + }, + ) + } + case dto.MetricType_HISTOGRAM: + // *sigh* + if p.currentMetric.Histogram == nil { + p.currentMetric.Histogram = &dto.Histogram{} + } + switch { + case p.currentIsHistogramCount: + p.currentMetric.Histogram.SampleCount = proto.Uint64(uint64(value)) + case p.currentIsHistogramSum: + p.currentMetric.Histogram.SampleSum = proto.Float64(value) + case !math.IsNaN(p.currentBucket): + p.currentMetric.Histogram.Bucket = append( + p.currentMetric.Histogram.Bucket, + &dto.Bucket{ + UpperBound: proto.Float64(p.currentBucket), + CumulativeCount: proto.Uint64(uint64(value)), + }, + ) + } + default: + p.err = fmt.Errorf("unexpected type for metric name %q", p.currentMF.GetName()) + } + if p.currentByte == '\n' { + return p.startOfLine + } + return p.startTimestamp +} + +// startTimestamp represents the state where the next byte read from p.buf is +// the start of the timestamp (or whitespace leading up to it). +func (p *TextParser) startTimestamp() stateFn { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.readTokenUntilWhitespace(); p.err != nil { + return nil // Unexpected end of input. + } + timestamp, err := strconv.ParseInt(p.currentToken.String(), 10, 64) + if err != nil { + // Create a more helpful error message. + p.parseError(fmt.Sprintf("expected integer as timestamp, got %q", p.currentToken.String())) + return nil + } + p.currentMetric.TimestampMs = proto.Int64(timestamp) + if p.readTokenUntilNewline(false); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentToken.Len() > 0 { + p.parseError(fmt.Sprintf("spurious string after timestamp: %q", p.currentToken.String())) + return nil + } + return p.startOfLine +} + +// readingHelp represents the state where the last byte read (now in +// p.currentByte) is the first byte of the docstring after 'HELP'. +func (p *TextParser) readingHelp() stateFn { + if p.currentMF.Help != nil { + p.parseError(fmt.Sprintf("second HELP line for metric name %q", p.currentMF.GetName())) + return nil + } + // Rest of line is the docstring. + if p.readTokenUntilNewline(true); p.err != nil { + return nil // Unexpected end of input. + } + p.currentMF.Help = proto.String(p.currentToken.String()) + return p.startOfLine +} + +// readingType represents the state where the last byte read (now in +// p.currentByte) is the first byte of the type hint after 'HELP'. +func (p *TextParser) readingType() stateFn { + if p.currentMF.Type != nil { + p.parseError(fmt.Sprintf("second TYPE line for metric name %q, or TYPE reported after samples", p.currentMF.GetName())) + return nil + } + // Rest of line is the type. + if p.readTokenUntilNewline(false); p.err != nil { + return nil // Unexpected end of input. + } + metricType, ok := dto.MetricType_value[strings.ToUpper(p.currentToken.String())] + if !ok { + p.parseError(fmt.Sprintf("unknown metric type %q", p.currentToken.String())) + return nil + } + p.currentMF.Type = dto.MetricType(metricType).Enum() + return p.startOfLine +} + +// parseError sets p.err to a ParseError at the current line with the given +// message. +func (p *TextParser) parseError(msg string) { + p.err = ParseError{ + Line: p.lineCount, + Msg: msg, + } +} + +// skipBlankTab reads (and discards) bytes from p.buf until it encounters a byte +// that is neither ' ' nor '\t'. That byte is left in p.currentByte. +func (p *TextParser) skipBlankTab() { + for { + if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil || !isBlankOrTab(p.currentByte) { + return + } + } +} + +// skipBlankTabIfCurrentBlankTab works exactly as skipBlankTab but doesn't do +// anything if p.currentByte is neither ' ' nor '\t'. +func (p *TextParser) skipBlankTabIfCurrentBlankTab() { + if isBlankOrTab(p.currentByte) { + p.skipBlankTab() + } +} + +// readTokenUntilWhitespace copies bytes from p.buf into p.currentToken. The +// first byte considered is the byte already read (now in p.currentByte). The +// first whitespace byte encountered is still copied into p.currentByte, but not +// into p.currentToken. +func (p *TextParser) readTokenUntilWhitespace() { + p.currentToken.Reset() + for p.err == nil && !isBlankOrTab(p.currentByte) && p.currentByte != '\n' { + p.currentToken.WriteByte(p.currentByte) + p.currentByte, p.err = p.buf.ReadByte() + } +} + +// readTokenUntilNewline copies bytes from p.buf into p.currentToken. The first +// byte considered is the byte already read (now in p.currentByte). The first +// newline byte encountered is still copied into p.currentByte, but not into +// p.currentToken. If recognizeEscapeSequence is true, two escape sequences are +// recognized: '\\' tranlates into '\', and '\n' into a line-feed character. All +// other escape sequences are invalid and cause an error. +func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) { + p.currentToken.Reset() + escaped := false + for p.err == nil { + if recognizeEscapeSequence && escaped { + switch p.currentByte { + case '\\': + p.currentToken.WriteByte(p.currentByte) + case 'n': + p.currentToken.WriteByte('\n') + default: + p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) + return + } + escaped = false + } else { + switch p.currentByte { + case '\n': + return + case '\\': + escaped = true + default: + p.currentToken.WriteByte(p.currentByte) + } + } + p.currentByte, p.err = p.buf.ReadByte() + } +} + +// readTokenAsMetricName copies a metric name from p.buf into p.currentToken. +// The first byte considered is the byte already read (now in p.currentByte). +// The first byte not part of a metric name is still copied into p.currentByte, +// but not into p.currentToken. +func (p *TextParser) readTokenAsMetricName() { + p.currentToken.Reset() + if !isValidMetricNameStart(p.currentByte) { + return + } + for { + p.currentToken.WriteByte(p.currentByte) + p.currentByte, p.err = p.buf.ReadByte() + if p.err != nil || !isValidMetricNameContinuation(p.currentByte) { + return + } + } +} + +// readTokenAsLabelName copies a label name from p.buf into p.currentToken. +// The first byte considered is the byte already read (now in p.currentByte). +// The first byte not part of a label name is still copied into p.currentByte, +// but not into p.currentToken. +func (p *TextParser) readTokenAsLabelName() { + p.currentToken.Reset() + if !isValidLabelNameStart(p.currentByte) { + return + } + for { + p.currentToken.WriteByte(p.currentByte) + p.currentByte, p.err = p.buf.ReadByte() + if p.err != nil || !isValidLabelNameContinuation(p.currentByte) { + return + } + } +} + +// readTokenAsLabelValue copies a label value from p.buf into p.currentToken. +// In contrast to the other 'readTokenAs...' functions, which start with the +// last read byte in p.currentByte, this method ignores p.currentByte and starts +// with reading a new byte from p.buf. The first byte not part of a label value +// is still copied into p.currentByte, but not into p.currentToken. +func (p *TextParser) readTokenAsLabelValue() { + p.currentToken.Reset() + escaped := false + for { + if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil { + return + } + if escaped { + switch p.currentByte { + case '"', '\\': + p.currentToken.WriteByte(p.currentByte) + case 'n': + p.currentToken.WriteByte('\n') + default: + p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) + return + } + escaped = false + continue + } + switch p.currentByte { + case '"': + return + case '\n': + p.parseError(fmt.Sprintf("label value %q contains unescaped new-line", p.currentToken.String())) + return + case '\\': + escaped = true + default: + p.currentToken.WriteByte(p.currentByte) + } + } +} + +func (p *TextParser) setOrCreateCurrentMF() { + p.currentIsSummaryCount = false + p.currentIsSummarySum = false + p.currentIsHistogramCount = false + p.currentIsHistogramSum = false + name := p.currentToken.String() + if p.currentMF = p.metricFamiliesByName[name]; p.currentMF != nil { + return + } + // Try out if this is a _sum or _count for a summary/histogram. + summaryName := summaryMetricName(name) + if p.currentMF = p.metricFamiliesByName[summaryName]; p.currentMF != nil { + if p.currentMF.GetType() == dto.MetricType_SUMMARY { + if isCount(name) { + p.currentIsSummaryCount = true + } + if isSum(name) { + p.currentIsSummarySum = true + } + return + } + } + histogramName := histogramMetricName(name) + if p.currentMF = p.metricFamiliesByName[histogramName]; p.currentMF != nil { + if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + if isCount(name) { + p.currentIsHistogramCount = true + } + if isSum(name) { + p.currentIsHistogramSum = true + } + return + } + } + p.currentMF = &dto.MetricFamily{Name: proto.String(name)} + p.metricFamiliesByName[name] = p.currentMF +} + +func isValidLabelNameStart(b byte) bool { + return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' +} + +func isValidLabelNameContinuation(b byte) bool { + return isValidLabelNameStart(b) || (b >= '0' && b <= '9') +} + +func isValidMetricNameStart(b byte) bool { + return isValidLabelNameStart(b) || b == ':' +} + +func isValidMetricNameContinuation(b byte) bool { + return isValidLabelNameContinuation(b) || b == ':' +} + +func isBlankOrTab(b byte) bool { + return b == ' ' || b == '\t' +} + +func isCount(name string) bool { + return len(name) > 6 && name[len(name)-6:] == "_count" +} + +func isSum(name string) bool { + return len(name) > 4 && name[len(name)-4:] == "_sum" +} + +func isBucket(name string) bool { + return len(name) > 7 && name[len(name)-7:] == "_bucket" +} + +func summaryMetricName(name string) string { + switch { + case isCount(name): + return name[:len(name)-6] + case isSum(name): + return name[:len(name)-4] + default: + return name + } +} + +func histogramMetricName(name string) string { + switch { + case isCount(name): + return name[:len(name)-6] + case isSum(name): + return name[:len(name)-4] + case isBucket(name): + return name[:len(name)-7] + default: + return name + } +} diff --git a/vendor/github.com/prometheus/common/model/alert.go b/vendor/github.com/prometheus/common/model/alert.go new file mode 100644 index 00000000..b027e9f3 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/alert.go @@ -0,0 +1,109 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "time" +) + +type AlertStatus string + +const ( + AlertFiring AlertStatus = "firing" + AlertResolved AlertStatus = "resolved" +) + +// Alert is a generic representation of an alert in the Prometheus eco-system. +type Alert struct { + // Label value pairs for purpose of aggregation, matching, and disposition + // dispatching. This must minimally include an "alertname" label. + Labels LabelSet `json:"labels"` + + // Extra key/value information which does not define alert identity. + Annotations LabelSet `json:"annotations"` + + // The known time range for this alert. Both ends are optional. + StartsAt time.Time `json:"startsAt,omitempty"` + EndsAt time.Time `json:"endsAt,omitempty"` +} + +// Name returns the name of the alert. It is equivalent to the "alertname" label. +func (a *Alert) Name() string { + return string(a.Labels[AlertNameLabel]) +} + +// Fingerprint returns a unique hash for the alert. It is equivalent to +// the fingerprint of the alert's label set. +func (a *Alert) Fingerprint() Fingerprint { + return a.Labels.Fingerprint() +} + +func (a *Alert) String() string { + s := fmt.Sprintf("%s[%s]", a.Name(), a.Fingerprint().String()[:7]) + if a.Resolved() { + return s + "[resolved]" + } + return s + "[active]" +} + +// Resolved returns true iff the activity interval ended in the past. +func (a *Alert) Resolved() bool { + if a.EndsAt.IsZero() { + return false + } + return !a.EndsAt.After(time.Now()) +} + +// Status returns the status of the alert. +func (a *Alert) Status() AlertStatus { + if a.Resolved() { + return AlertResolved + } + return AlertFiring +} + +// Alert is a list of alerts that can be sorted in chronological order. +type Alerts []*Alert + +func (as Alerts) Len() int { return len(as) } +func (as Alerts) Swap(i, j int) { as[i], as[j] = as[j], as[i] } + +func (as Alerts) Less(i, j int) bool { + if as[i].StartsAt.Before(as[j].StartsAt) { + return true + } + if as[i].EndsAt.Before(as[j].EndsAt) { + return true + } + return as[i].Fingerprint() < as[j].Fingerprint() +} + +// HasFiring returns true iff one of the alerts is not resolved. +func (as Alerts) HasFiring() bool { + for _, a := range as { + if !a.Resolved() { + return true + } + } + return false +} + +// Status returns StatusFiring iff at least one of the alerts is firing. +func (as Alerts) Status() AlertStatus { + if as.HasFiring() { + return AlertFiring + } + return AlertResolved +} diff --git a/vendor/github.com/prometheus/common/model/fingerprinting.go b/vendor/github.com/prometheus/common/model/fingerprinting.go new file mode 100644 index 00000000..fc4de410 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/fingerprinting.go @@ -0,0 +1,105 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "strconv" +) + +// Fingerprint provides a hash-capable representation of a Metric. +// For our purposes, FNV-1A 64-bit is used. +type Fingerprint uint64 + +// FingerprintFromString transforms a string representation into a Fingerprint. +func FingerprintFromString(s string) (Fingerprint, error) { + num, err := strconv.ParseUint(s, 16, 64) + return Fingerprint(num), err +} + +// ParseFingerprint parses the input string into a fingerprint. +func ParseFingerprint(s string) (Fingerprint, error) { + num, err := strconv.ParseUint(s, 16, 64) + if err != nil { + return 0, err + } + return Fingerprint(num), nil +} + +func (f Fingerprint) String() string { + return fmt.Sprintf("%016x", uint64(f)) +} + +// Fingerprints represents a collection of Fingerprint subject to a given +// natural sorting scheme. It implements sort.Interface. +type Fingerprints []Fingerprint + +// Len implements sort.Interface. +func (f Fingerprints) Len() int { + return len(f) +} + +// Less implements sort.Interface. +func (f Fingerprints) Less(i, j int) bool { + return f[i] < f[j] +} + +// Swap implements sort.Interface. +func (f Fingerprints) Swap(i, j int) { + f[i], f[j] = f[j], f[i] +} + +// FingerprintSet is a set of Fingerprints. +type FingerprintSet map[Fingerprint]struct{} + +// Equal returns true if both sets contain the same elements (and not more). +func (s FingerprintSet) Equal(o FingerprintSet) bool { + if len(s) != len(o) { + return false + } + + for k := range s { + if _, ok := o[k]; !ok { + return false + } + } + + return true +} + +// Intersection returns the elements contained in both sets. +func (s FingerprintSet) Intersection(o FingerprintSet) FingerprintSet { + myLength, otherLength := len(s), len(o) + if myLength == 0 || otherLength == 0 { + return FingerprintSet{} + } + + subSet := s + superSet := o + + if otherLength < myLength { + subSet = o + superSet = s + } + + out := FingerprintSet{} + + for k := range subSet { + if _, ok := superSet[k]; ok { + out[k] = struct{}{} + } + } + + return out +} diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go new file mode 100644 index 00000000..6459c8f7 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/labels.go @@ -0,0 +1,188 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "regexp" + "sort" + "strings" +) + +const ( + // AlertNameLabel is the name of the label containing the an alert's name. + AlertNameLabel = "alertname" + + // ExportedLabelPrefix is the prefix to prepend to the label names present in + // exported metrics if a label of the same name is added by the server. + ExportedLabelPrefix = "exported_" + + // MetricNameLabel is the label name indicating the metric name of a + // timeseries. + MetricNameLabel = "__name__" + + // SchemeLabel is the name of the label that holds the scheme on which to + // scrape a target. + SchemeLabel = "__scheme__" + + // AddressLabel is the name of the label that holds the address of + // a scrape target. + AddressLabel = "__address__" + + // MetricsPathLabel is the name of the label that holds the path on which to + // scrape a target. + MetricsPathLabel = "__metrics_path__" + + // ReservedLabelPrefix is a prefix which is not legal in user-supplied + // label names. + ReservedLabelPrefix = "__" + + // MetaLabelPrefix is a prefix for labels that provide meta information. + // Labels with this prefix are used for intermediate label processing and + // will not be attached to time series. + MetaLabelPrefix = "__meta_" + + // TmpLabelPrefix is a prefix for temporary labels as part of relabelling. + // Labels with this prefix are used for intermediate label processing and + // will not be attached to time series. This is reserved for use in + // Prometheus configuration files by users. + TmpLabelPrefix = "__tmp_" + + // ParamLabelPrefix is a prefix for labels that provide URL parameters + // used to scrape a target. + ParamLabelPrefix = "__param_" + + // JobLabel is the label name indicating the job from which a timeseries + // was scraped. + JobLabel = "job" + + // InstanceLabel is the label name used for the instance label. + InstanceLabel = "instance" + + // BucketLabel is used for the label that defines the upper bound of a + // bucket of a histogram ("le" -> "less or equal"). + BucketLabel = "le" + + // QuantileLabel is used for the label that defines the quantile in a + // summary. + QuantileLabel = "quantile" +) + +// LabelNameRE is a regular expression matching valid label names. +var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") + +// A LabelName is a key for a LabelSet or Metric. It has a value associated +// therewith. +type LabelName string + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error { + var s string + if err := unmarshal(&s); err != nil { + return err + } + if !LabelNameRE.MatchString(s) { + return fmt.Errorf("%q is not a valid label name", s) + } + *ln = LabelName(s) + return nil +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (ln *LabelName) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + if !LabelNameRE.MatchString(s) { + return fmt.Errorf("%q is not a valid label name", s) + } + *ln = LabelName(s) + return nil +} + +// LabelNames is a sortable LabelName slice. In implements sort.Interface. +type LabelNames []LabelName + +func (l LabelNames) Len() int { + return len(l) +} + +func (l LabelNames) Less(i, j int) bool { + return l[i] < l[j] +} + +func (l LabelNames) Swap(i, j int) { + l[i], l[j] = l[j], l[i] +} + +func (l LabelNames) String() string { + labelStrings := make([]string, 0, len(l)) + for _, label := range l { + labelStrings = append(labelStrings, string(label)) + } + return strings.Join(labelStrings, ", ") +} + +// A LabelValue is an associated value for a LabelName. +type LabelValue string + +// LabelValues is a sortable LabelValue slice. It implements sort.Interface. +type LabelValues []LabelValue + +func (l LabelValues) Len() int { + return len(l) +} + +func (l LabelValues) Less(i, j int) bool { + return sort.StringsAreSorted([]string{string(l[i]), string(l[j])}) +} + +func (l LabelValues) Swap(i, j int) { + l[i], l[j] = l[j], l[i] +} + +// LabelPair pairs a name with a value. +type LabelPair struct { + Name LabelName + Value LabelValue +} + +// LabelPairs is a sortable slice of LabelPair pointers. It implements +// sort.Interface. +type LabelPairs []*LabelPair + +func (l LabelPairs) Len() int { + return len(l) +} + +func (l LabelPairs) Less(i, j int) bool { + switch { + case l[i].Name > l[j].Name: + return false + case l[i].Name < l[j].Name: + return true + case l[i].Value > l[j].Value: + return false + case l[i].Value < l[j].Value: + return true + default: + return false + } +} + +func (l LabelPairs) Swap(i, j int) { + l[i], l[j] = l[j], l[i] +} diff --git a/vendor/github.com/prometheus/common/model/labelset.go b/vendor/github.com/prometheus/common/model/labelset.go new file mode 100644 index 00000000..142b9d1e --- /dev/null +++ b/vendor/github.com/prometheus/common/model/labelset.go @@ -0,0 +1,153 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "sort" + "strings" +) + +// A LabelSet is a collection of LabelName and LabelValue pairs. The LabelSet +// may be fully-qualified down to the point where it may resolve to a single +// Metric in the data store or not. All operations that occur within the realm +// of a LabelSet can emit a vector of Metric entities to which the LabelSet may +// match. +type LabelSet map[LabelName]LabelValue + +func (ls LabelSet) Equal(o LabelSet) bool { + if len(ls) != len(o) { + return false + } + for ln, lv := range ls { + olv, ok := o[ln] + if !ok { + return false + } + if olv != lv { + return false + } + } + return true +} + +// Before compares the metrics, using the following criteria: +// +// If m has fewer labels than o, it is before o. If it has more, it is not. +// +// If the number of labels is the same, the superset of all label names is +// sorted alphanumerically. The first differing label pair found in that order +// determines the outcome: If the label does not exist at all in m, then m is +// before o, and vice versa. Otherwise the label value is compared +// alphanumerically. +// +// If m and o are equal, the method returns false. +func (ls LabelSet) Before(o LabelSet) bool { + if len(ls) < len(o) { + return true + } + if len(ls) > len(o) { + return false + } + + lns := make(LabelNames, 0, len(ls)+len(o)) + for ln := range ls { + lns = append(lns, ln) + } + for ln := range o { + lns = append(lns, ln) + } + // It's probably not worth it to de-dup lns. + sort.Sort(lns) + for _, ln := range lns { + mlv, ok := ls[ln] + if !ok { + return true + } + olv, ok := o[ln] + if !ok { + return false + } + if mlv < olv { + return true + } + if mlv > olv { + return false + } + } + return false +} + +func (ls LabelSet) Clone() LabelSet { + lsn := make(LabelSet, len(ls)) + for ln, lv := range ls { + lsn[ln] = lv + } + return lsn +} + +// Merge is a helper function to non-destructively merge two label sets. +func (l LabelSet) Merge(other LabelSet) LabelSet { + result := make(LabelSet, len(l)) + + for k, v := range l { + result[k] = v + } + + for k, v := range other { + result[k] = v + } + + return result +} + +func (l LabelSet) String() string { + lstrs := make([]string, 0, len(l)) + for l, v := range l { + lstrs = append(lstrs, fmt.Sprintf("%s=%q", l, v)) + } + + sort.Strings(lstrs) + return fmt.Sprintf("{%s}", strings.Join(lstrs, ", ")) +} + +// Fingerprint returns the LabelSet's fingerprint. +func (ls LabelSet) Fingerprint() Fingerprint { + return labelSetToFingerprint(ls) +} + +// FastFingerprint returns the LabelSet's Fingerprint calculated by a faster hashing +// algorithm, which is, however, more susceptible to hash collisions. +func (ls LabelSet) FastFingerprint() Fingerprint { + return labelSetToFastFingerprint(ls) +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (l *LabelSet) UnmarshalJSON(b []byte) error { + var m map[LabelName]LabelValue + if err := json.Unmarshal(b, &m); err != nil { + return err + } + // encoding/json only unmarshals maps of the form map[string]T. It treats + // LabelName as a string and does not call its UnmarshalJSON method. + // Thus, we have to replicate the behavior here. + for ln := range m { + if !LabelNameRE.MatchString(string(ln)) { + return fmt.Errorf("%q is not a valid label name", ln) + } + } + *l = LabelSet(m) + return nil +} diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go new file mode 100644 index 00000000..25fc3c94 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/metric.go @@ -0,0 +1,81 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "sort" + "strings" +) + +var separator = []byte{0} + +// A Metric is similar to a LabelSet, but the key difference is that a Metric is +// a singleton and refers to one and only one stream of samples. +type Metric LabelSet + +// Equal compares the metrics. +func (m Metric) Equal(o Metric) bool { + return LabelSet(m).Equal(LabelSet(o)) +} + +// Before compares the metrics' underlying label sets. +func (m Metric) Before(o Metric) bool { + return LabelSet(m).Before(LabelSet(o)) +} + +// Clone returns a copy of the Metric. +func (m Metric) Clone() Metric { + clone := Metric{} + for k, v := range m { + clone[k] = v + } + return clone +} + +func (m Metric) String() string { + metricName, hasName := m[MetricNameLabel] + numLabels := len(m) - 1 + if !hasName { + numLabels = len(m) + } + labelStrings := make([]string, 0, numLabels) + for label, value := range m { + if label != MetricNameLabel { + labelStrings = append(labelStrings, fmt.Sprintf("%s=%q", label, value)) + } + } + + switch numLabels { + case 0: + if hasName { + return string(metricName) + } + return "{}" + default: + sort.Strings(labelStrings) + return fmt.Sprintf("%s{%s}", metricName, strings.Join(labelStrings, ", ")) + } +} + +// Fingerprint returns a Metric's Fingerprint. +func (m Metric) Fingerprint() Fingerprint { + return LabelSet(m).Fingerprint() +} + +// FastFingerprint returns a Metric's Fingerprint calculated by a faster hashing +// algorithm, which is, however, more susceptible to hash collisions. +func (m Metric) FastFingerprint() Fingerprint { + return LabelSet(m).FastFingerprint() +} diff --git a/vendor/github.com/prometheus/common/model/model.go b/vendor/github.com/prometheus/common/model/model.go new file mode 100644 index 00000000..88f013a4 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/model.go @@ -0,0 +1,16 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package model contains common data structures that are shared across +// Prometheus componenets and libraries. +package model diff --git a/vendor/github.com/prometheus/common/model/signature.go b/vendor/github.com/prometheus/common/model/signature.go new file mode 100644 index 00000000..28f37006 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/signature.go @@ -0,0 +1,190 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "bytes" + "hash" + "hash/fnv" + "sort" + "sync" +) + +// SeparatorByte is a byte that cannot occur in valid UTF-8 sequences and is +// used to separate label names, label values, and other strings from each other +// when calculating their combined hash value (aka signature aka fingerprint). +const SeparatorByte byte = 255 + +var ( + // cache the signature of an empty label set. + emptyLabelSignature = fnv.New64a().Sum64() + + hashAndBufPool sync.Pool +) + +type hashAndBuf struct { + h hash.Hash64 + b bytes.Buffer +} + +func getHashAndBuf() *hashAndBuf { + hb := hashAndBufPool.Get() + if hb == nil { + return &hashAndBuf{h: fnv.New64a()} + } + return hb.(*hashAndBuf) +} + +func putHashAndBuf(hb *hashAndBuf) { + hb.h.Reset() + hb.b.Reset() + hashAndBufPool.Put(hb) +} + +// LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a +// given label set. (Collisions are possible but unlikely if the number of label +// sets the function is applied to is small.) +func LabelsToSignature(labels map[string]string) uint64 { + if len(labels) == 0 { + return emptyLabelSignature + } + + labelNames := make([]string, 0, len(labels)) + for labelName := range labels { + labelNames = append(labelNames, labelName) + } + sort.Strings(labelNames) + + hb := getHashAndBuf() + defer putHashAndBuf(hb) + + for _, labelName := range labelNames { + hb.b.WriteString(labelName) + hb.b.WriteByte(SeparatorByte) + hb.b.WriteString(labels[labelName]) + hb.b.WriteByte(SeparatorByte) + hb.h.Write(hb.b.Bytes()) + hb.b.Reset() + } + return hb.h.Sum64() +} + +// labelSetToFingerprint works exactly as LabelsToSignature but takes a LabelSet as +// parameter (rather than a label map) and returns a Fingerprint. +func labelSetToFingerprint(ls LabelSet) Fingerprint { + if len(ls) == 0 { + return Fingerprint(emptyLabelSignature) + } + + labelNames := make(LabelNames, 0, len(ls)) + for labelName := range ls { + labelNames = append(labelNames, labelName) + } + sort.Sort(labelNames) + + hb := getHashAndBuf() + defer putHashAndBuf(hb) + + for _, labelName := range labelNames { + hb.b.WriteString(string(labelName)) + hb.b.WriteByte(SeparatorByte) + hb.b.WriteString(string(ls[labelName])) + hb.b.WriteByte(SeparatorByte) + hb.h.Write(hb.b.Bytes()) + hb.b.Reset() + } + return Fingerprint(hb.h.Sum64()) +} + +// labelSetToFastFingerprint works similar to labelSetToFingerprint but uses a +// faster and less allocation-heavy hash function, which is more susceptible to +// create hash collisions. Therefore, collision detection should be applied. +func labelSetToFastFingerprint(ls LabelSet) Fingerprint { + if len(ls) == 0 { + return Fingerprint(emptyLabelSignature) + } + + var result uint64 + hb := getHashAndBuf() + defer putHashAndBuf(hb) + + for labelName, labelValue := range ls { + hb.b.WriteString(string(labelName)) + hb.b.WriteByte(SeparatorByte) + hb.b.WriteString(string(labelValue)) + hb.h.Write(hb.b.Bytes()) + result ^= hb.h.Sum64() + hb.h.Reset() + hb.b.Reset() + } + return Fingerprint(result) +} + +// SignatureForLabels works like LabelsToSignature but takes a Metric as +// parameter (rather than a label map) and only includes the labels with the +// specified LabelNames into the signature calculation. The labels passed in +// will be sorted by this function. +func SignatureForLabels(m Metric, labels ...LabelName) uint64 { + if len(m) == 0 || len(labels) == 0 { + return emptyLabelSignature + } + + sort.Sort(LabelNames(labels)) + + hb := getHashAndBuf() + defer putHashAndBuf(hb) + + for _, label := range labels { + hb.b.WriteString(string(label)) + hb.b.WriteByte(SeparatorByte) + hb.b.WriteString(string(m[label])) + hb.b.WriteByte(SeparatorByte) + hb.h.Write(hb.b.Bytes()) + hb.b.Reset() + } + return hb.h.Sum64() +} + +// SignatureWithoutLabels works like LabelsToSignature but takes a Metric as +// parameter (rather than a label map) and excludes the labels with any of the +// specified LabelNames from the signature calculation. +func SignatureWithoutLabels(m Metric, labels map[LabelName]struct{}) uint64 { + if len(m) == 0 { + return emptyLabelSignature + } + + labelNames := make(LabelNames, 0, len(m)) + for labelName := range m { + if _, exclude := labels[labelName]; !exclude { + labelNames = append(labelNames, labelName) + } + } + if len(labelNames) == 0 { + return emptyLabelSignature + } + sort.Sort(labelNames) + + hb := getHashAndBuf() + defer putHashAndBuf(hb) + + for _, labelName := range labelNames { + hb.b.WriteString(string(labelName)) + hb.b.WriteByte(SeparatorByte) + hb.b.WriteString(string(m[labelName])) + hb.b.WriteByte(SeparatorByte) + hb.h.Write(hb.b.Bytes()) + hb.b.Reset() + } + return hb.h.Sum64() +} diff --git a/vendor/github.com/prometheus/common/model/silence.go b/vendor/github.com/prometheus/common/model/silence.go new file mode 100644 index 00000000..b4b96eae --- /dev/null +++ b/vendor/github.com/prometheus/common/model/silence.go @@ -0,0 +1,60 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "regexp" + "time" +) + +// Matcher describes a matches the value of a given label. +type Matcher struct { + Name LabelName `json:"name"` + Value string `json:"value"` + IsRegex bool `json:"isRegex"` +} + +func (m *Matcher) UnmarshalJSON(b []byte) error { + type plain Matcher + if err := json.Unmarshal(b, (*plain)(m)); err != nil { + return err + } + + if len(m.Name) == 0 { + return fmt.Errorf("label name in matcher must not be empty") + } + if m.IsRegex { + if _, err := regexp.Compile(m.Value); err != nil { + return err + } + } + return nil +} + +// Silence defines the representation of a silence definiton +// in the Prometheus eco-system. +type Silence struct { + ID uint64 `json:"id,omitempty"` + + Matchers []*Matcher `json:"matchers"` + + StartsAt time.Time `json:"startsAt"` + EndsAt time.Time `json:"endsAt"` + + CreatedAt time.Time `json:"createdAt,omitempty"` + CreatedBy string `json:"createdBy"` + Comment string `json:"comment,omitempty"` +} diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go new file mode 100644 index 00000000..ebc8bf6c --- /dev/null +++ b/vendor/github.com/prometheus/common/model/time.go @@ -0,0 +1,230 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "math" + "regexp" + "strconv" + "strings" + "time" +) + +const ( + // MinimumTick is the minimum supported time resolution. This has to be + // at least time.Second in order for the code below to work. + minimumTick = time.Millisecond + // second is the Time duration equivalent to one second. + second = int64(time.Second / minimumTick) + // The number of nanoseconds per minimum tick. + nanosPerTick = int64(minimumTick / time.Nanosecond) + + // Earliest is the earliest Time representable. Handy for + // initializing a high watermark. + Earliest = Time(math.MinInt64) + // Latest is the latest Time representable. Handy for initializing + // a low watermark. + Latest = Time(math.MaxInt64) +) + +// Time is the number of milliseconds since the epoch +// (1970-01-01 00:00 UTC) excluding leap seconds. +type Time int64 + +// Interval describes and interval between two timestamps. +type Interval struct { + Start, End Time +} + +// Now returns the current time as a Time. +func Now() Time { + return TimeFromUnixNano(time.Now().UnixNano()) +} + +// TimeFromUnix returns the Time equivalent to the Unix Time t +// provided in seconds. +func TimeFromUnix(t int64) Time { + return Time(t * second) +} + +// TimeFromUnixNano returns the Time equivalent to the Unix Time +// t provided in nanoseconds. +func TimeFromUnixNano(t int64) Time { + return Time(t / nanosPerTick) +} + +// Equal reports whether two Times represent the same instant. +func (t Time) Equal(o Time) bool { + return t == o +} + +// Before reports whether the Time t is before o. +func (t Time) Before(o Time) bool { + return t < o +} + +// After reports whether the Time t is after o. +func (t Time) After(o Time) bool { + return t > o +} + +// Add returns the Time t + d. +func (t Time) Add(d time.Duration) Time { + return t + Time(d/minimumTick) +} + +// Sub returns the Duration t - o. +func (t Time) Sub(o Time) time.Duration { + return time.Duration(t-o) * minimumTick +} + +// Time returns the time.Time representation of t. +func (t Time) Time() time.Time { + return time.Unix(int64(t)/second, (int64(t)%second)*nanosPerTick) +} + +// Unix returns t as a Unix time, the number of seconds elapsed +// since January 1, 1970 UTC. +func (t Time) Unix() int64 { + return int64(t) / second +} + +// UnixNano returns t as a Unix time, the number of nanoseconds elapsed +// since January 1, 1970 UTC. +func (t Time) UnixNano() int64 { + return int64(t) * nanosPerTick +} + +// The number of digits after the dot. +var dotPrecision = int(math.Log10(float64(second))) + +// String returns a string representation of the Time. +func (t Time) String() string { + return strconv.FormatFloat(float64(t)/float64(second), 'f', -1, 64) +} + +// MarshalJSON implements the json.Marshaler interface. +func (t Time) MarshalJSON() ([]byte, error) { + return []byte(t.String()), nil +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (t *Time) UnmarshalJSON(b []byte) error { + p := strings.Split(string(b), ".") + switch len(p) { + case 1: + v, err := strconv.ParseInt(string(p[0]), 10, 64) + if err != nil { + return err + } + *t = Time(v * second) + + case 2: + v, err := strconv.ParseInt(string(p[0]), 10, 64) + if err != nil { + return err + } + v *= second + + prec := dotPrecision - len(p[1]) + if prec < 0 { + p[1] = p[1][:dotPrecision] + } else if prec > 0 { + p[1] = p[1] + strings.Repeat("0", prec) + } + + va, err := strconv.ParseInt(p[1], 10, 32) + if err != nil { + return err + } + + *t = Time(v + va) + + default: + return fmt.Errorf("invalid time %q", string(b)) + } + return nil +} + +// Duration wraps time.Duration. It is used to parse the custom duration format +// from YAML. +// This type should not propagate beyond the scope of input/output processing. +type Duration time.Duration + +// StringToDuration parses a string into a time.Duration, assuming that a year +// a day always has 24h. +func ParseDuration(durationStr string) (Duration, error) { + matches := durationRE.FindStringSubmatch(durationStr) + if len(matches) != 3 { + return 0, fmt.Errorf("not a valid duration string: %q", durationStr) + } + durSeconds, _ := strconv.Atoi(matches[1]) + dur := time.Duration(durSeconds) * time.Second + unit := matches[2] + switch unit { + case "d": + dur *= 60 * 60 * 24 + case "h": + dur *= 60 * 60 + case "m": + dur *= 60 + case "s": + dur *= 1 + default: + return 0, fmt.Errorf("invalid time unit in duration string: %q", unit) + } + return Duration(dur), nil +} + +var durationRE = regexp.MustCompile("^([0-9]+)([ywdhms]+)$") + +func (d Duration) String() string { + seconds := int64(time.Duration(d) / time.Second) + factors := map[string]int64{ + "d": 60 * 60 * 24, + "h": 60 * 60, + "m": 60, + "s": 1, + } + unit := "s" + switch int64(0) { + case seconds % factors["d"]: + unit = "d" + case seconds % factors["h"]: + unit = "h" + case seconds % factors["m"]: + unit = "m" + } + return fmt.Sprintf("%v%v", seconds/factors[unit], unit) +} + +// MarshalYAML implements the yaml.Marshaler interface. +func (d Duration) MarshalYAML() (interface{}, error) { + return d.String(), nil +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (d *Duration) UnmarshalYAML(unmarshal func(interface{}) error) error { + var s string + if err := unmarshal(&s); err != nil { + return err + } + dur, err := ParseDuration(s) + if err != nil { + return err + } + *d = dur + return nil +} diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go new file mode 100644 index 00000000..10ffb0bd --- /dev/null +++ b/vendor/github.com/prometheus/common/model/value.go @@ -0,0 +1,395 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "sort" + "strconv" + "strings" +) + +// A SampleValue is a representation of a value for a given sample at a given +// time. +type SampleValue float64 + +// MarshalJSON implements json.Marshaler. +func (v SampleValue) MarshalJSON() ([]byte, error) { + return json.Marshal(v.String()) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (v *SampleValue) UnmarshalJSON(b []byte) error { + if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { + return fmt.Errorf("sample value must be a quoted string") + } + f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) + if err != nil { + return err + } + *v = SampleValue(f) + return nil +} + +func (v SampleValue) Equal(o SampleValue) bool { + return v == o +} + +func (v SampleValue) String() string { + return strconv.FormatFloat(float64(v), 'f', -1, 64) +} + +// SamplePair pairs a SampleValue with a Timestamp. +type SamplePair struct { + Timestamp Time + Value SampleValue +} + +// MarshalJSON implements json.Marshaler. +func (s SamplePair) MarshalJSON() ([]byte, error) { + t, err := json.Marshal(s.Timestamp) + if err != nil { + return nil, err + } + v, err := json.Marshal(s.Value) + if err != nil { + return nil, err + } + return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *SamplePair) UnmarshalJSON(b []byte) error { + v := [...]json.Unmarshaler{&s.Timestamp, &s.Value} + return json.Unmarshal(b, &v) +} + +// Equal returns true if this SamplePair and o have equal Values and equal +// Timestamps. +func (s *SamplePair) Equal(o *SamplePair) bool { + return s == o || (s.Value == o.Value && s.Timestamp.Equal(o.Timestamp)) +} + +func (s SamplePair) String() string { + return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp) +} + +// Sample is a sample pair associated with a metric. +type Sample struct { + Metric Metric `json:"metric"` + Value SampleValue `json:"value"` + Timestamp Time `json:"timestamp"` +} + +// Equal compares first the metrics, then the timestamp, then the value. +func (s *Sample) Equal(o *Sample) bool { + if s == o { + return true + } + + if !s.Metric.Equal(o.Metric) { + return false + } + if !s.Timestamp.Equal(o.Timestamp) { + return false + } + if s.Value != o.Value { + return false + } + + return true +} + +func (s Sample) String() string { + return fmt.Sprintf("%s => %s", s.Metric, SamplePair{ + Timestamp: s.Timestamp, + Value: s.Value, + }) +} + +// MarshalJSON implements json.Marshaler. +func (s Sample) MarshalJSON() ([]byte, error) { + v := struct { + Metric Metric `json:"metric"` + Value SamplePair `json:"value"` + }{ + Metric: s.Metric, + Value: SamplePair{ + Timestamp: s.Timestamp, + Value: s.Value, + }, + } + + return json.Marshal(&v) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *Sample) UnmarshalJSON(b []byte) error { + v := struct { + Metric Metric `json:"metric"` + Value SamplePair `json:"value"` + }{ + Metric: s.Metric, + Value: SamplePair{ + Timestamp: s.Timestamp, + Value: s.Value, + }, + } + + if err := json.Unmarshal(b, &v); err != nil { + return err + } + + s.Metric = v.Metric + s.Timestamp = v.Value.Timestamp + s.Value = v.Value.Value + + return nil +} + +// Samples is a sortable Sample slice. It implements sort.Interface. +type Samples []*Sample + +func (s Samples) Len() int { + return len(s) +} + +// Less compares first the metrics, then the timestamp. +func (s Samples) Less(i, j int) bool { + switch { + case s[i].Metric.Before(s[j].Metric): + return true + case s[j].Metric.Before(s[i].Metric): + return false + case s[i].Timestamp.Before(s[j].Timestamp): + return true + default: + return false + } +} + +func (s Samples) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +// Equal compares two sets of samples and returns true if they are equal. +func (s Samples) Equal(o Samples) bool { + if len(s) != len(o) { + return false + } + + for i, sample := range s { + if !sample.Equal(o[i]) { + return false + } + } + return true +} + +// SampleStream is a stream of Values belonging to an attached COWMetric. +type SampleStream struct { + Metric Metric `json:"metric"` + Values []SamplePair `json:"values"` +} + +func (ss SampleStream) String() string { + vals := make([]string, len(ss.Values)) + for i, v := range ss.Values { + vals[i] = v.String() + } + return fmt.Sprintf("%s =>\n%s", ss.Metric, strings.Join(vals, "\n")) +} + +// Value is a generic interface for values resulting from a query evaluation. +type Value interface { + Type() ValueType + String() string +} + +func (Matrix) Type() ValueType { return ValMatrix } +func (Vector) Type() ValueType { return ValVector } +func (*Scalar) Type() ValueType { return ValScalar } +func (*String) Type() ValueType { return ValString } + +type ValueType int + +const ( + ValNone ValueType = iota + ValScalar + ValVector + ValMatrix + ValString +) + +// MarshalJSON implements json.Marshaler. +func (et ValueType) MarshalJSON() ([]byte, error) { + return json.Marshal(et.String()) +} + +func (et *ValueType) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + switch s { + case "": + *et = ValNone + case "scalar": + *et = ValScalar + case "vector": + *et = ValVector + case "matrix": + *et = ValMatrix + case "string": + *et = ValString + default: + return fmt.Errorf("unknown value type %q", s) + } + return nil +} + +func (e ValueType) String() string { + switch e { + case ValNone: + return "" + case ValScalar: + return "scalar" + case ValVector: + return "vector" + case ValMatrix: + return "matrix" + case ValString: + return "string" + } + panic("ValueType.String: unhandled value type") +} + +// Scalar is a scalar value evaluated at the set timestamp. +type Scalar struct { + Value SampleValue `json:"value"` + Timestamp Time `json:"timestamp"` +} + +func (s Scalar) String() string { + return fmt.Sprintf("scalar: %v @[%v]", s.Value, s.Timestamp) +} + +// MarshalJSON implements json.Marshaler. +func (s Scalar) MarshalJSON() ([]byte, error) { + v := strconv.FormatFloat(float64(s.Value), 'f', -1, 64) + return json.Marshal([...]interface{}{s.Timestamp, string(v)}) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *Scalar) UnmarshalJSON(b []byte) error { + var f string + v := [...]interface{}{&s.Timestamp, &f} + + if err := json.Unmarshal(b, &v); err != nil { + return err + } + + value, err := strconv.ParseFloat(f, 64) + if err != nil { + return fmt.Errorf("error parsing sample value: %s", err) + } + s.Value = SampleValue(value) + return nil +} + +// String is a string value evaluated at the set timestamp. +type String struct { + Value string `json:"value"` + Timestamp Time `json:"timestamp"` +} + +func (s *String) String() string { + return s.Value +} + +// MarshalJSON implements json.Marshaler. +func (s String) MarshalJSON() ([]byte, error) { + return json.Marshal([]interface{}{s.Timestamp, s.Value}) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *String) UnmarshalJSON(b []byte) error { + v := [...]interface{}{&s.Timestamp, &s.Value} + return json.Unmarshal(b, &v) +} + +// Vector is basically only an alias for Samples, but the +// contract is that in a Vector, all Samples have the same timestamp. +type Vector []*Sample + +func (vec Vector) String() string { + entries := make([]string, len(vec)) + for i, s := range vec { + entries[i] = s.String() + } + return strings.Join(entries, "\n") +} + +func (vec Vector) Len() int { return len(vec) } +func (vec Vector) Swap(i, j int) { vec[i], vec[j] = vec[j], vec[i] } + +// Less compares first the metrics, then the timestamp. +func (vec Vector) Less(i, j int) bool { + switch { + case vec[i].Metric.Before(vec[j].Metric): + return true + case vec[j].Metric.Before(vec[i].Metric): + return false + case vec[i].Timestamp.Before(vec[j].Timestamp): + return true + default: + return false + } +} + +// Equal compares two sets of samples and returns true if they are equal. +func (vec Vector) Equal(o Vector) bool { + if len(vec) != len(o) { + return false + } + + for i, sample := range vec { + if !sample.Equal(o[i]) { + return false + } + } + return true +} + +// Matrix is a list of time series. +type Matrix []*SampleStream + +func (m Matrix) Len() int { return len(m) } +func (m Matrix) Less(i, j int) bool { return m[i].Metric.Before(m[j].Metric) } +func (m Matrix) Swap(i, j int) { m[i], m[j] = m[j], m[i] } + +func (mat Matrix) String() string { + matCp := make(Matrix, len(mat)) + copy(matCp, mat) + sort.Sort(matCp) + + strs := make([]string, len(matCp)) + + for i, ss := range matCp { + strs[i] = ss.String() + } + + return strings.Join(strs, "\n") +} diff --git a/vendor/github.com/prometheus/procfs/LICENSE b/vendor/github.com/prometheus/procfs/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/github.com/prometheus/procfs/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/prometheus/procfs/NOTICE b/vendor/github.com/prometheus/procfs/NOTICE new file mode 100644 index 00000000..53c5e9aa --- /dev/null +++ b/vendor/github.com/prometheus/procfs/NOTICE @@ -0,0 +1,7 @@ +procfs provides functions to retrieve system, kernel and process +metrics from the pseudo-filesystem proc. + +Copyright 2014-2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). diff --git a/vendor/github.com/prometheus/procfs/doc.go b/vendor/github.com/prometheus/procfs/doc.go new file mode 100644 index 00000000..e2acd6d4 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/doc.go @@ -0,0 +1,45 @@ +// Copyright 2014 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package procfs provides functions to retrieve system, kernel and process +// metrics from the pseudo-filesystem proc. +// +// Example: +// +// package main +// +// import ( +// "fmt" +// "log" +// +// "github.com/prometheus/procfs" +// ) +// +// func main() { +// p, err := procfs.Self() +// if err != nil { +// log.Fatalf("could not get process: %s", err) +// } +// +// stat, err := p.NewStat() +// if err != nil { +// log.Fatalf("could not get process stat: %s", err) +// } +// +// fmt.Printf("command: %s\n", stat.Comm) +// fmt.Printf("cpu time: %fs\n", stat.CPUTime()) +// fmt.Printf("vsize: %dB\n", stat.VirtualMemory()) +// fmt.Printf("rss: %dB\n", stat.ResidentMemory()) +// } +// +package procfs diff --git a/vendor/github.com/prometheus/procfs/fs.go b/vendor/github.com/prometheus/procfs/fs.go new file mode 100644 index 00000000..6a8d97b1 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/fs.go @@ -0,0 +1,40 @@ +package procfs + +import ( + "fmt" + "os" + "path" +) + +// FS represents the pseudo-filesystem proc, which provides an interface to +// kernel data structures. +type FS string + +// DefaultMountPoint is the common mount point of the proc filesystem. +const DefaultMountPoint = "/proc" + +// NewFS returns a new FS mounted under the given mountPoint. It will error +// if the mount point can't be read. +func NewFS(mountPoint string) (FS, error) { + info, err := os.Stat(mountPoint) + if err != nil { + return "", fmt.Errorf("could not read %s: %s", mountPoint, err) + } + if !info.IsDir() { + return "", fmt.Errorf("mount point %s is not a directory", mountPoint) + } + + return FS(mountPoint), nil +} + +func (fs FS) stat(p string) (os.FileInfo, error) { + return os.Stat(path.Join(string(fs), p)) +} + +func (fs FS) open(p string) (*os.File, error) { + return os.Open(path.Join(string(fs), p)) +} + +func (fs FS) readlink(p string) (string, error) { + return os.Readlink(path.Join(string(fs), p)) +} diff --git a/vendor/github.com/prometheus/procfs/ipvs.go b/vendor/github.com/prometheus/procfs/ipvs.go new file mode 100644 index 00000000..26da5000 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/ipvs.go @@ -0,0 +1,223 @@ +package procfs + +import ( + "bufio" + "encoding/hex" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "strconv" + "strings" +) + +// IPVSStats holds IPVS statistics, as exposed by the kernel in `/proc/net/ip_vs_stats`. +type IPVSStats struct { + // Total count of connections. + Connections uint64 + // Total incoming packages processed. + IncomingPackets uint64 + // Total outgoing packages processed. + OutgoingPackets uint64 + // Total incoming traffic. + IncomingBytes uint64 + // Total outgoing traffic. + OutgoingBytes uint64 +} + +// IPVSBackendStatus holds current metrics of one virtual / real address pair. +type IPVSBackendStatus struct { + // The local (virtual) IP address. + LocalAddress net.IP + // The local (virtual) port. + LocalPort uint16 + // The transport protocol (TCP, UDP). + Proto string + // The remote (real) IP address. + RemoteAddress net.IP + // The remote (real) port. + RemotePort uint16 + // The current number of active connections for this virtual/real address pair. + ActiveConn uint64 + // The current number of inactive connections for this virtual/real address pair. + InactConn uint64 + // The current weight of this virtual/real address pair. + Weight uint64 +} + +// NewIPVSStats reads the IPVS statistics. +func NewIPVSStats() (IPVSStats, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return IPVSStats{}, err + } + + return fs.NewIPVSStats() +} + +// NewIPVSStats reads the IPVS statistics from the specified `proc` filesystem. +func (fs FS) NewIPVSStats() (IPVSStats, error) { + file, err := fs.open("net/ip_vs_stats") + if err != nil { + return IPVSStats{}, err + } + defer file.Close() + + return parseIPVSStats(file) +} + +// parseIPVSStats performs the actual parsing of `ip_vs_stats`. +func parseIPVSStats(file io.Reader) (IPVSStats, error) { + var ( + statContent []byte + statLines []string + statFields []string + stats IPVSStats + ) + + statContent, err := ioutil.ReadAll(file) + if err != nil { + return IPVSStats{}, err + } + + statLines = strings.SplitN(string(statContent), "\n", 4) + if len(statLines) != 4 { + return IPVSStats{}, errors.New("ip_vs_stats corrupt: too short") + } + + statFields = strings.Fields(statLines[2]) + if len(statFields) != 5 { + return IPVSStats{}, errors.New("ip_vs_stats corrupt: unexpected number of fields") + } + + stats.Connections, err = strconv.ParseUint(statFields[0], 16, 64) + if err != nil { + return IPVSStats{}, err + } + stats.IncomingPackets, err = strconv.ParseUint(statFields[1], 16, 64) + if err != nil { + return IPVSStats{}, err + } + stats.OutgoingPackets, err = strconv.ParseUint(statFields[2], 16, 64) + if err != nil { + return IPVSStats{}, err + } + stats.IncomingBytes, err = strconv.ParseUint(statFields[3], 16, 64) + if err != nil { + return IPVSStats{}, err + } + stats.OutgoingBytes, err = strconv.ParseUint(statFields[4], 16, 64) + if err != nil { + return IPVSStats{}, err + } + + return stats, nil +} + +// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs. +func NewIPVSBackendStatus() ([]IPVSBackendStatus, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return []IPVSBackendStatus{}, err + } + + return fs.NewIPVSBackendStatus() +} + +// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem. +func (fs FS) NewIPVSBackendStatus() ([]IPVSBackendStatus, error) { + file, err := fs.open("net/ip_vs") + if err != nil { + return nil, err + } + defer file.Close() + + return parseIPVSBackendStatus(file) +} + +func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) { + var ( + status []IPVSBackendStatus + scanner = bufio.NewScanner(file) + proto string + localAddress net.IP + localPort uint16 + err error + ) + + for scanner.Scan() { + fields := strings.Fields(string(scanner.Text())) + if len(fields) == 0 { + continue + } + switch { + case fields[0] == "IP" || fields[0] == "Prot" || fields[1] == "RemoteAddress:Port": + continue + case fields[0] == "TCP" || fields[0] == "UDP": + if len(fields) < 2 { + continue + } + proto = fields[0] + localAddress, localPort, err = parseIPPort(fields[1]) + if err != nil { + return nil, err + } + case fields[0] == "->": + if len(fields) < 6 { + continue + } + remoteAddress, remotePort, err := parseIPPort(fields[1]) + if err != nil { + return nil, err + } + weight, err := strconv.ParseUint(fields[3], 10, 64) + if err != nil { + return nil, err + } + activeConn, err := strconv.ParseUint(fields[4], 10, 64) + if err != nil { + return nil, err + } + inactConn, err := strconv.ParseUint(fields[5], 10, 64) + if err != nil { + return nil, err + } + status = append(status, IPVSBackendStatus{ + LocalAddress: localAddress, + LocalPort: localPort, + RemoteAddress: remoteAddress, + RemotePort: remotePort, + Proto: proto, + Weight: weight, + ActiveConn: activeConn, + InactConn: inactConn, + }) + } + } + return status, nil +} + +func parseIPPort(s string) (net.IP, uint16, error) { + tmp := strings.SplitN(s, ":", 2) + + if len(tmp) != 2 { + return nil, 0, fmt.Errorf("invalid IP:Port: %s", s) + } + + if len(tmp[0]) != 8 && len(tmp[0]) != 32 { + return nil, 0, fmt.Errorf("invalid IP: %s", tmp[0]) + } + + ip, err := hex.DecodeString(tmp[0]) + if err != nil { + return nil, 0, err + } + + port, err := strconv.ParseUint(tmp[1], 16, 16) + if err != nil { + return nil, 0, err + } + + return ip, uint16(port), nil +} diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go new file mode 100644 index 00000000..09ed6b5e --- /dev/null +++ b/vendor/github.com/prometheus/procfs/mdstat.go @@ -0,0 +1,158 @@ +package procfs + +import ( + "fmt" + "io/ioutil" + "path" + "regexp" + "strconv" + "strings" +) + +var ( + statuslineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[[U_]+\]`) + buildlineRE = regexp.MustCompile(`\((\d+)/\d+\)`) +) + +// MDStat holds info parsed from /proc/mdstat. +type MDStat struct { + // Name of the device. + Name string + // activity-state of the device. + ActivityState string + // Number of active disks. + DisksActive int64 + // Total number of disks the device consists of. + DisksTotal int64 + // Number of blocks the device holds. + BlocksTotal int64 + // Number of blocks on the device that are in sync. + BlocksSynced int64 +} + +// ParseMDStat parses an mdstat-file and returns a struct with the relevant infos. +func (fs FS) ParseMDStat() (mdstates []MDStat, err error) { + mdStatusFilePath := path.Join(string(fs), "mdstat") + content, err := ioutil.ReadFile(mdStatusFilePath) + if err != nil { + return []MDStat{}, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) + } + + mdStatusFile := string(content) + + lines := strings.Split(mdStatusFile, "\n") + var currentMD string + + // Each md has at least the deviceline, statusline and one empty line afterwards + // so we will have probably something of the order len(lines)/3 devices + // so we use that for preallocation. + estimateMDs := len(lines) / 3 + mdStates := make([]MDStat, 0, estimateMDs) + + for i, l := range lines { + if l == "" { + // Skip entirely empty lines. + continue + } + + if l[0] == ' ' { + // Those lines are not the beginning of a md-section. + continue + } + + if strings.HasPrefix(l, "Personalities") || strings.HasPrefix(l, "unused") { + // We aren't interested in lines with general info. + continue + } + + mainLine := strings.Split(l, " ") + if len(mainLine) < 3 { + return mdStates, fmt.Errorf("error parsing mdline: %s", l) + } + currentMD = mainLine[0] // name of md-device + activityState := mainLine[2] // activity status of said md-device + + if len(lines) <= i+3 { + return mdStates, fmt.Errorf("error parsing %s: entry for %s has fewer lines than expected", mdStatusFilePath, currentMD) + } + + active, total, size, err := evalStatusline(lines[i+1]) // parse statusline, always present + if err != nil { + return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) + } + + // + // Now get the number of synced blocks. + // + + // Get the line number of the syncing-line. + var j int + if strings.Contains(lines[i+2], "bitmap") { // then skip the bitmap line + j = i + 3 + } else { + j = i + 2 + } + + // If device is syncing at the moment, get the number of currently synced bytes, + // otherwise that number equals the size of the device. + syncedBlocks := size + if strings.Contains(lines[j], "recovery") || strings.Contains(lines[j], "resync") { + syncedBlocks, err = evalBuildline(lines[j]) + if err != nil { + return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) + } + } + + mdStates = append(mdStates, MDStat{currentMD, activityState, active, total, size, syncedBlocks}) + + } + + return mdStates, nil +} + +func evalStatusline(statusline string) (active, total, size int64, err error) { + matches := statuslineRE.FindStringSubmatch(statusline) + + // +1 to make it more obvious that the whole string containing the info is also returned as matches[0]. + if len(matches) != 3+1 { + return 0, 0, 0, fmt.Errorf("unexpected number matches found in statusline: %s", statusline) + } + + size, err = strconv.ParseInt(matches[1], 10, 64) + if err != nil { + return 0, 0, 0, fmt.Errorf("%s in statusline: %s", err, statusline) + } + + total, err = strconv.ParseInt(matches[2], 10, 64) + if err != nil { + return 0, 0, 0, fmt.Errorf("%s in statusline: %s", err, statusline) + } + + active, err = strconv.ParseInt(matches[3], 10, 64) + if err != nil { + return 0, 0, 0, fmt.Errorf("%s in statusline: %s", err, statusline) + } + + return active, total, size, nil +} + +// Gets the size that has already been synced out of the sync-line. +func evalBuildline(buildline string) (int64, error) { + matches := buildlineRE.FindStringSubmatch(buildline) + + // +1 to make it more obvious that the whole string containing the info is also returned as matches[0]. + if len(matches) < 1+1 { + return 0, fmt.Errorf("too few matches found in buildline: %s", buildline) + } + + if len(matches) > 1+1 { + return 0, fmt.Errorf("too many matches found in buildline: %s", buildline) + } + + syncedSize, err := strconv.ParseInt(matches[1], 10, 64) + if err != nil { + return 0, fmt.Errorf("%s in buildline: %s", err, buildline) + } + + return syncedSize, nil +} diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go new file mode 100644 index 00000000..efc85027 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc.go @@ -0,0 +1,202 @@ +package procfs + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "strconv" + "strings" +) + +// Proc provides information about a running process. +type Proc struct { + // The process ID. + PID int + + fs FS +} + +// Procs represents a list of Proc structs. +type Procs []Proc + +func (p Procs) Len() int { return len(p) } +func (p Procs) Swap(i, j int) { p[i], p[j] = p[j], p[i] } +func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID } + +// Self returns a process for the current process read via /proc/self. +func Self() (Proc, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return Proc{}, err + } + return fs.Self() +} + +// NewProc returns a process for the given pid under /proc. +func NewProc(pid int) (Proc, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return Proc{}, err + } + return fs.NewProc(pid) +} + +// AllProcs returns a list of all currently avaible processes under /proc. +func AllProcs() (Procs, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return Procs{}, err + } + return fs.AllProcs() +} + +// Self returns a process for the current process. +func (fs FS) Self() (Proc, error) { + p, err := fs.readlink("self") + if err != nil { + return Proc{}, err + } + pid, err := strconv.Atoi(strings.Replace(p, string(fs), "", -1)) + if err != nil { + return Proc{}, err + } + return fs.NewProc(pid) +} + +// NewProc returns a process for the given pid. +func (fs FS) NewProc(pid int) (Proc, error) { + if _, err := fs.stat(strconv.Itoa(pid)); err != nil { + return Proc{}, err + } + return Proc{PID: pid, fs: fs}, nil +} + +// AllProcs returns a list of all currently avaible processes. +func (fs FS) AllProcs() (Procs, error) { + d, err := fs.open("") + if err != nil { + return Procs{}, err + } + defer d.Close() + + names, err := d.Readdirnames(-1) + if err != nil { + return Procs{}, fmt.Errorf("could not read %s: %s", d.Name(), err) + } + + p := Procs{} + for _, n := range names { + pid, err := strconv.ParseInt(n, 10, 64) + if err != nil { + continue + } + p = append(p, Proc{PID: int(pid), fs: fs}) + } + + return p, nil +} + +// CmdLine returns the command line of a process. +func (p Proc) CmdLine() ([]string, error) { + f, err := p.open("cmdline") + if err != nil { + return nil, err + } + defer f.Close() + + data, err := ioutil.ReadAll(f) + if err != nil { + return nil, err + } + + if len(data) < 1 { + return []string{}, nil + } + + return strings.Split(string(data[:len(data)-1]), string(byte(0))), nil +} + +// Executable returns the absolute path of the executable command of a process. +func (p Proc) Executable() (string, error) { + exe, err := p.readlink("exe") + + if os.IsNotExist(err) { + return "", nil + } + + return exe, err +} + +// FileDescriptors returns the currently open file descriptors of a process. +func (p Proc) FileDescriptors() ([]uintptr, error) { + names, err := p.fileDescriptors() + if err != nil { + return nil, err + } + + fds := make([]uintptr, len(names)) + for i, n := range names { + fd, err := strconv.ParseInt(n, 10, 32) + if err != nil { + return nil, fmt.Errorf("could not parse fd %s: %s", n, err) + } + fds[i] = uintptr(fd) + } + + return fds, nil +} + +// FileDescriptorTargets returns the targets of all file descriptors of a process. +// If a file descriptor is not a symlink to a file (like a socket), that value will be the empty string. +func (p Proc) FileDescriptorTargets() ([]string, error) { + names, err := p.fileDescriptors() + if err != nil { + return nil, err + } + + targets := make([]string, len(names)) + + for i, name := range names { + target, err := p.readlink("fd/" + name) + if err == nil { + targets[i] = target + } + } + + return targets, nil +} + +// FileDescriptorsLen returns the number of currently open file descriptors of +// a process. +func (p Proc) FileDescriptorsLen() (int, error) { + fds, err := p.fileDescriptors() + if err != nil { + return 0, err + } + + return len(fds), nil +} + +func (p Proc) fileDescriptors() ([]string, error) { + d, err := p.open("fd") + if err != nil { + return nil, err + } + defer d.Close() + + names, err := d.Readdirnames(-1) + if err != nil { + return nil, fmt.Errorf("could not read %s: %s", d.Name(), err) + } + + return names, nil +} + +func (p Proc) open(pa string) (*os.File, error) { + return p.fs.open(path.Join(strconv.Itoa(p.PID), pa)) +} + +func (p Proc) readlink(pa string) (string, error) { + return p.fs.readlink(path.Join(strconv.Itoa(p.PID), pa)) +} diff --git a/vendor/github.com/prometheus/procfs/proc_io.go b/vendor/github.com/prometheus/procfs/proc_io.go new file mode 100644 index 00000000..7c6dc869 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_io.go @@ -0,0 +1,54 @@ +package procfs + +import ( + "fmt" + "io/ioutil" +) + +// ProcIO models the content of /proc//io. +type ProcIO struct { + // Chars read. + RChar uint64 + // Chars written. + WChar uint64 + // Read syscalls. + SyscR uint64 + // Write syscalls. + SyscW uint64 + // Bytes read. + ReadBytes uint64 + // Bytes written. + WriteBytes uint64 + // Bytes written, but taking into account truncation. See + // Documentation/filesystems/proc.txt in the kernel sources for + // detailed explanation. + CancelledWriteBytes int64 +} + +// NewIO creates a new ProcIO instance from a given Proc instance. +func (p Proc) NewIO() (ProcIO, error) { + pio := ProcIO{} + + f, err := p.open("io") + if err != nil { + return pio, err + } + defer f.Close() + + data, err := ioutil.ReadAll(f) + if err != nil { + return pio, err + } + + ioFormat := "rchar: %d\nwchar: %d\nsyscr: %d\nsyscw: %d\n" + + "read_bytes: %d\nwrite_bytes: %d\n" + + "cancelled_write_bytes: %d\n" + + _, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR, + &pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes) + if err != nil { + return pio, err + } + + return pio, nil +} diff --git a/vendor/github.com/prometheus/procfs/proc_limits.go b/vendor/github.com/prometheus/procfs/proc_limits.go new file mode 100644 index 00000000..9f080b9f --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_limits.go @@ -0,0 +1,111 @@ +package procfs + +import ( + "bufio" + "fmt" + "regexp" + "strconv" +) + +// ProcLimits represents the soft limits for each of the process's resource +// limits. +type ProcLimits struct { + CPUTime int + FileSize int + DataSize int + StackSize int + CoreFileSize int + ResidentSet int + Processes int + OpenFiles int + LockedMemory int + AddressSpace int + FileLocks int + PendingSignals int + MsqqueueSize int + NicePriority int + RealtimePriority int + RealtimeTimeout int +} + +const ( + limitsFields = 3 + limitsUnlimited = "unlimited" +) + +var ( + limitsDelimiter = regexp.MustCompile(" +") +) + +// NewLimits returns the current soft limits of the process. +func (p Proc) NewLimits() (ProcLimits, error) { + f, err := p.open("limits") + if err != nil { + return ProcLimits{}, err + } + defer f.Close() + + var ( + l = ProcLimits{} + s = bufio.NewScanner(f) + ) + for s.Scan() { + fields := limitsDelimiter.Split(s.Text(), limitsFields) + if len(fields) != limitsFields { + return ProcLimits{}, fmt.Errorf( + "couldn't parse %s line %s", f.Name(), s.Text()) + } + + switch fields[0] { + case "Max cpu time": + l.CPUTime, err = parseInt(fields[1]) + case "Max file size": + l.FileLocks, err = parseInt(fields[1]) + case "Max data size": + l.DataSize, err = parseInt(fields[1]) + case "Max stack size": + l.StackSize, err = parseInt(fields[1]) + case "Max core file size": + l.CoreFileSize, err = parseInt(fields[1]) + case "Max resident set": + l.ResidentSet, err = parseInt(fields[1]) + case "Max processes": + l.Processes, err = parseInt(fields[1]) + case "Max open files": + l.OpenFiles, err = parseInt(fields[1]) + case "Max locked memory": + l.LockedMemory, err = parseInt(fields[1]) + case "Max address space": + l.AddressSpace, err = parseInt(fields[1]) + case "Max file locks": + l.FileLocks, err = parseInt(fields[1]) + case "Max pending signals": + l.PendingSignals, err = parseInt(fields[1]) + case "Max msgqueue size": + l.MsqqueueSize, err = parseInt(fields[1]) + case "Max nice priority": + l.NicePriority, err = parseInt(fields[1]) + case "Max realtime priority": + l.RealtimePriority, err = parseInt(fields[1]) + case "Max realtime timeout": + l.RealtimeTimeout, err = parseInt(fields[1]) + } + + if err != nil { + return ProcLimits{}, err + } + } + + return l, s.Err() +} + +func parseInt(s string) (int, error) { + if s == limitsUnlimited { + return -1, nil + } + i, err := strconv.ParseInt(s, 10, 32) + if err != nil { + return 0, fmt.Errorf("couldn't parse value %s: %s", s, err) + } + return int(i), nil +} diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go new file mode 100644 index 00000000..30a403b6 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_stat.go @@ -0,0 +1,175 @@ +package procfs + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" +) + +// Originally, this USER_HZ value was dynamically retrieved via a sysconf call which +// required cgo. However, that caused a lot of problems regarding +// cross-compilation. Alternatives such as running a binary to determine the +// value, or trying to derive it in some other way were all problematic. +// After much research it was determined that USER_HZ is actually hardcoded to +// 100 on all Go-supported platforms as of the time of this writing. This is +// why we decided to hardcode it here as well. It is not impossible that there +// could be systems with exceptions, but they should be very exotic edge cases, +// and in that case, the worst outcome will be two misreported metrics. +// +// See also the following discussions: +// +// - https://github.com/prometheus/node_exporter/issues/52 +// - https://github.com/prometheus/procfs/pull/2 +// - http://stackoverflow.com/questions/17410841/how-does-user-hz-solve-the-jiffy-scaling-issue +const userHZ = 100 + +// ProcStat provides status information about the process, +// read from /proc/[pid]/stat. +type ProcStat struct { + // The process ID. + PID int + // The filename of the executable. + Comm string + // The process state. + State string + // The PID of the parent of this process. + PPID int + // The process group ID of the process. + PGRP int + // The session ID of the process. + Session int + // The controlling terminal of the process. + TTY int + // The ID of the foreground process group of the controlling terminal of + // the process. + TPGID int + // The kernel flags word of the process. + Flags uint + // The number of minor faults the process has made which have not required + // loading a memory page from disk. + MinFlt uint + // The number of minor faults that the process's waited-for children have + // made. + CMinFlt uint + // The number of major faults the process has made which have required + // loading a memory page from disk. + MajFlt uint + // The number of major faults that the process's waited-for children have + // made. + CMajFlt uint + // Amount of time that this process has been scheduled in user mode, + // measured in clock ticks. + UTime uint + // Amount of time that this process has been scheduled in kernel mode, + // measured in clock ticks. + STime uint + // Amount of time that this process's waited-for children have been + // scheduled in user mode, measured in clock ticks. + CUTime uint + // Amount of time that this process's waited-for children have been + // scheduled in kernel mode, measured in clock ticks. + CSTime uint + // For processes running a real-time scheduling policy, this is the negated + // scheduling priority, minus one. + Priority int + // The nice value, a value in the range 19 (low priority) to -20 (high + // priority). + Nice int + // Number of threads in this process. + NumThreads int + // The time the process started after system boot, the value is expressed + // in clock ticks. + Starttime uint64 + // Virtual memory size in bytes. + VSize int + // Resident set size in pages. + RSS int + + fs FS +} + +// NewStat returns the current status information of the process. +func (p Proc) NewStat() (ProcStat, error) { + f, err := p.open("stat") + if err != nil { + return ProcStat{}, err + } + defer f.Close() + + data, err := ioutil.ReadAll(f) + if err != nil { + return ProcStat{}, err + } + + var ( + ignore int + + s = ProcStat{PID: p.PID, fs: p.fs} + l = bytes.Index(data, []byte("(")) + r = bytes.LastIndex(data, []byte(")")) + ) + + if l < 0 || r < 0 { + return ProcStat{}, fmt.Errorf( + "unexpected format, couldn't extract comm: %s", + data, + ) + } + + s.Comm = string(data[l+1 : r]) + _, err = fmt.Fscan( + bytes.NewBuffer(data[r+2:]), + &s.State, + &s.PPID, + &s.PGRP, + &s.Session, + &s.TTY, + &s.TPGID, + &s.Flags, + &s.MinFlt, + &s.CMinFlt, + &s.MajFlt, + &s.CMajFlt, + &s.UTime, + &s.STime, + &s.CUTime, + &s.CSTime, + &s.Priority, + &s.Nice, + &s.NumThreads, + &ignore, + &s.Starttime, + &s.VSize, + &s.RSS, + ) + if err != nil { + return ProcStat{}, err + } + + return s, nil +} + +// VirtualMemory returns the virtual memory size in bytes. +func (s ProcStat) VirtualMemory() int { + return s.VSize +} + +// ResidentMemory returns the resident memory size in bytes. +func (s ProcStat) ResidentMemory() int { + return s.RSS * os.Getpagesize() +} + +// StartTime returns the unix timestamp of the process in seconds. +func (s ProcStat) StartTime() (float64, error) { + stat, err := s.fs.NewStat() + if err != nil { + return 0, err + } + return float64(stat.BootTime) + (float64(s.Starttime) / userHZ), nil +} + +// CPUTime returns the total CPU user and system time in seconds. +func (s ProcStat) CPUTime() float64 { + return float64(s.UTime+s.STime) / userHZ +} diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go new file mode 100644 index 00000000..26fefb0f --- /dev/null +++ b/vendor/github.com/prometheus/procfs/stat.go @@ -0,0 +1,55 @@ +package procfs + +import ( + "bufio" + "fmt" + "strconv" + "strings" +) + +// Stat represents kernel/system statistics. +type Stat struct { + // Boot time in seconds since the Epoch. + BootTime int64 +} + +// NewStat returns kernel/system statistics read from /proc/stat. +func NewStat() (Stat, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return Stat{}, err + } + + return fs.NewStat() +} + +// NewStat returns an information about current kernel/system statistics. +func (fs FS) NewStat() (Stat, error) { + f, err := fs.open("stat") + if err != nil { + return Stat{}, err + } + defer f.Close() + + s := bufio.NewScanner(f) + for s.Scan() { + line := s.Text() + if !strings.HasPrefix(line, "btime") { + continue + } + fields := strings.Fields(line) + if len(fields) != 2 { + return Stat{}, fmt.Errorf("couldn't parse %s line %s", f.Name(), line) + } + i, err := strconv.ParseInt(fields[1], 10, 32) + if err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s: %s", fields[1], err) + } + return Stat{BootTime: i}, nil + } + if err := s.Err(); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s: %s", f.Name(), err) + } + + return Stat{}, fmt.Errorf("couldn't parse %s, missing btime", f.Name()) +} diff --git a/vendor/github.com/russross/blackfriday/LICENSE.txt b/vendor/github.com/russross/blackfriday/LICENSE.txt new file mode 100644 index 00000000..2885af36 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/LICENSE.txt @@ -0,0 +1,29 @@ +Blackfriday is distributed under the Simplified BSD License: + +> Copyright © 2011 Russ Ross +> All rights reserved. +> +> Redistribution and use in source and binary forms, with or without +> modification, are permitted provided that the following conditions +> are met: +> +> 1. Redistributions of source code must retain the above copyright +> notice, this list of conditions and the following disclaimer. +> +> 2. Redistributions in binary form must reproduce the above +> copyright notice, this list of conditions and the following +> disclaimer in the documentation and/or other materials provided with +> the distribution. +> +> THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +> "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +> LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +> FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +> COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +> INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +> BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +> LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +> CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +> LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +> ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +> POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/russross/blackfriday/block.go b/vendor/github.com/russross/blackfriday/block.go new file mode 100644 index 00000000..b5b08411 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/block.go @@ -0,0 +1,1398 @@ +// +// Blackfriday Markdown Processor +// Available at http://github.com/russross/blackfriday +// +// Copyright © 2011 Russ Ross . +// Distributed under the Simplified BSD License. +// See README.md for details. +// + +// +// Functions to parse block-level elements. +// + +package blackfriday + +import ( + "bytes" + + "github.com/shurcooL/sanitized_anchor_name" +) + +// Parse block-level data. +// Note: this function and many that it calls assume that +// the input buffer ends with a newline. +func (p *parser) block(out *bytes.Buffer, data []byte) { + if len(data) == 0 || data[len(data)-1] != '\n' { + panic("block input is missing terminating newline") + } + + // this is called recursively: enforce a maximum depth + if p.nesting >= p.maxNesting { + return + } + p.nesting++ + + // parse out one block-level construct at a time + for len(data) > 0 { + // prefixed header: + // + // # Header 1 + // ## Header 2 + // ... + // ###### Header 6 + if p.isPrefixHeader(data) { + data = data[p.prefixHeader(out, data):] + continue + } + + // block of preformatted HTML: + // + //
+ // ... + //
+ if data[0] == '<' { + if i := p.html(out, data, true); i > 0 { + data = data[i:] + continue + } + } + + // title block + // + // % stuff + // % more stuff + // % even more stuff + if p.flags&EXTENSION_TITLEBLOCK != 0 { + if data[0] == '%' { + if i := p.titleBlock(out, data, true); i > 0 { + data = data[i:] + continue + } + } + } + + // blank lines. note: returns the # of bytes to skip + if i := p.isEmpty(data); i > 0 { + data = data[i:] + continue + } + + // indented code block: + // + // func max(a, b int) int { + // if a > b { + // return a + // } + // return b + // } + if p.codePrefix(data) > 0 { + data = data[p.code(out, data):] + continue + } + + // fenced code block: + // + // ``` go + // func fact(n int) int { + // if n <= 1 { + // return n + // } + // return n * fact(n-1) + // } + // ``` + if p.flags&EXTENSION_FENCED_CODE != 0 { + if i := p.fencedCode(out, data, true); i > 0 { + data = data[i:] + continue + } + } + + // horizontal rule: + // + // ------ + // or + // ****** + // or + // ______ + if p.isHRule(data) { + p.r.HRule(out) + var i int + for i = 0; data[i] != '\n'; i++ { + } + data = data[i:] + continue + } + + // block quote: + // + // > A big quote I found somewhere + // > on the web + if p.quotePrefix(data) > 0 { + data = data[p.quote(out, data):] + continue + } + + // table: + // + // Name | Age | Phone + // ------|-----|--------- + // Bob | 31 | 555-1234 + // Alice | 27 | 555-4321 + if p.flags&EXTENSION_TABLES != 0 { + if i := p.table(out, data); i > 0 { + data = data[i:] + continue + } + } + + // an itemized/unordered list: + // + // * Item 1 + // * Item 2 + // + // also works with + or - + if p.uliPrefix(data) > 0 { + data = data[p.list(out, data, 0):] + continue + } + + // a numbered/ordered list: + // + // 1. Item 1 + // 2. Item 2 + if p.oliPrefix(data) > 0 { + data = data[p.list(out, data, LIST_TYPE_ORDERED):] + continue + } + + // definition lists: + // + // Term 1 + // : Definition a + // : Definition b + // + // Term 2 + // : Definition c + if p.flags&EXTENSION_DEFINITION_LISTS != 0 { + if p.dliPrefix(data) > 0 { + data = data[p.list(out, data, LIST_TYPE_DEFINITION):] + continue + } + } + + // anything else must look like a normal paragraph + // note: this finds underlined headers, too + data = data[p.paragraph(out, data):] + } + + p.nesting-- +} + +func (p *parser) isPrefixHeader(data []byte) bool { + if data[0] != '#' { + return false + } + + if p.flags&EXTENSION_SPACE_HEADERS != 0 { + level := 0 + for level < 6 && data[level] == '#' { + level++ + } + if data[level] != ' ' { + return false + } + } + return true +} + +func (p *parser) prefixHeader(out *bytes.Buffer, data []byte) int { + level := 0 + for level < 6 && data[level] == '#' { + level++ + } + i := skipChar(data, level, ' ') + end := skipUntilChar(data, i, '\n') + skip := end + id := "" + if p.flags&EXTENSION_HEADER_IDS != 0 { + j, k := 0, 0 + // find start/end of header id + for j = i; j < end-1 && (data[j] != '{' || data[j+1] != '#'); j++ { + } + for k = j + 1; k < end && data[k] != '}'; k++ { + } + // extract header id iff found + if j < end && k < end { + id = string(data[j+2 : k]) + end = j + skip = k + 1 + for end > 0 && data[end-1] == ' ' { + end-- + } + } + } + for end > 0 && data[end-1] == '#' { + if isBackslashEscaped(data, end-1) { + break + } + end-- + } + for end > 0 && data[end-1] == ' ' { + end-- + } + if end > i { + if id == "" && p.flags&EXTENSION_AUTO_HEADER_IDS != 0 { + id = sanitized_anchor_name.Create(string(data[i:end])) + } + work := func() bool { + p.inline(out, data[i:end]) + return true + } + p.r.Header(out, work, level, id) + } + return skip +} + +func (p *parser) isUnderlinedHeader(data []byte) int { + // test of level 1 header + if data[0] == '=' { + i := skipChar(data, 1, '=') + i = skipChar(data, i, ' ') + if data[i] == '\n' { + return 1 + } else { + return 0 + } + } + + // test of level 2 header + if data[0] == '-' { + i := skipChar(data, 1, '-') + i = skipChar(data, i, ' ') + if data[i] == '\n' { + return 2 + } else { + return 0 + } + } + + return 0 +} + +func (p *parser) titleBlock(out *bytes.Buffer, data []byte, doRender bool) int { + if data[0] != '%' { + return 0 + } + splitData := bytes.Split(data, []byte("\n")) + var i int + for idx, b := range splitData { + if !bytes.HasPrefix(b, []byte("%")) { + i = idx // - 1 + break + } + } + + data = bytes.Join(splitData[0:i], []byte("\n")) + p.r.TitleBlock(out, data) + + return len(data) +} + +func (p *parser) html(out *bytes.Buffer, data []byte, doRender bool) int { + var i, j int + + // identify the opening tag + if data[0] != '<' { + return 0 + } + curtag, tagfound := p.htmlFindTag(data[1:]) + + // handle special cases + if !tagfound { + // check for an HTML comment + if size := p.htmlComment(out, data, doRender); size > 0 { + return size + } + + // check for an
tag + if size := p.htmlHr(out, data, doRender); size > 0 { + return size + } + + // no special case recognized + return 0 + } + + // look for an unindented matching closing tag + // followed by a blank line + found := false + /* + closetag := []byte("\n") + j = len(curtag) + 1 + for !found { + // scan for a closing tag at the beginning of a line + if skip := bytes.Index(data[j:], closetag); skip >= 0 { + j += skip + len(closetag) + } else { + break + } + + // see if it is the only thing on the line + if skip := p.isEmpty(data[j:]); skip > 0 { + // see if it is followed by a blank line/eof + j += skip + if j >= len(data) { + found = true + i = j + } else { + if skip := p.isEmpty(data[j:]); skip > 0 { + j += skip + found = true + i = j + } + } + } + } + */ + + // if not found, try a second pass looking for indented match + // but not if tag is "ins" or "del" (following original Markdown.pl) + if !found && curtag != "ins" && curtag != "del" { + i = 1 + for i < len(data) { + i++ + for i < len(data) && !(data[i-1] == '<' && data[i] == '/') { + i++ + } + + if i+2+len(curtag) >= len(data) { + break + } + + j = p.htmlFindEnd(curtag, data[i-1:]) + + if j > 0 { + i += j - 1 + found = true + break + } + } + } + + if !found { + return 0 + } + + // the end of the block has been found + if doRender { + // trim newlines + end := i + for end > 0 && data[end-1] == '\n' { + end-- + } + p.r.BlockHtml(out, data[:end]) + } + + return i +} + +// HTML comment, lax form +func (p *parser) htmlComment(out *bytes.Buffer, data []byte, doRender bool) int { + i := p.inlineHtmlComment(out, data) + // needs to end with a blank line + if j := p.isEmpty(data[i:]); j > 0 { + size := i + j + if doRender { + // trim trailing newlines + end := size + for end > 0 && data[end-1] == '\n' { + end-- + } + p.r.BlockHtml(out, data[:end]) + } + return size + } + return 0 +} + +// HR, which is the only self-closing block tag considered +func (p *parser) htmlHr(out *bytes.Buffer, data []byte, doRender bool) int { + if data[0] != '<' || (data[1] != 'h' && data[1] != 'H') || (data[2] != 'r' && data[2] != 'R') { + return 0 + } + if data[3] != ' ' && data[3] != '/' && data[3] != '>' { + // not an
tag after all; at least not a valid one + return 0 + } + + i := 3 + for data[i] != '>' && data[i] != '\n' { + i++ + } + + if data[i] == '>' { + i++ + if j := p.isEmpty(data[i:]); j > 0 { + size := i + j + if doRender { + // trim newlines + end := size + for end > 0 && data[end-1] == '\n' { + end-- + } + p.r.BlockHtml(out, data[:end]) + } + return size + } + } + + return 0 +} + +func (p *parser) htmlFindTag(data []byte) (string, bool) { + i := 0 + for isalnum(data[i]) { + i++ + } + key := string(data[:i]) + if _, ok := blockTags[key]; ok { + return key, true + } + return "", false +} + +func (p *parser) htmlFindEnd(tag string, data []byte) int { + // assume data[0] == '<' && data[1] == '/' already tested + + // check if tag is a match + closetag := []byte("") + if !bytes.HasPrefix(data, closetag) { + return 0 + } + i := len(closetag) + + // check that the rest of the line is blank + skip := 0 + if skip = p.isEmpty(data[i:]); skip == 0 { + return 0 + } + i += skip + skip = 0 + + if i >= len(data) { + return i + } + + if p.flags&EXTENSION_LAX_HTML_BLOCKS != 0 { + return i + } + if skip = p.isEmpty(data[i:]); skip == 0 { + // following line must be blank + return 0 + } + + return i + skip +} + +func (p *parser) isEmpty(data []byte) int { + // it is okay to call isEmpty on an empty buffer + if len(data) == 0 { + return 0 + } + + var i int + for i = 0; i < len(data) && data[i] != '\n'; i++ { + if data[i] != ' ' && data[i] != '\t' { + return 0 + } + } + return i + 1 +} + +func (p *parser) isHRule(data []byte) bool { + i := 0 + + // skip up to three spaces + for i < 3 && data[i] == ' ' { + i++ + } + + // look at the hrule char + if data[i] != '*' && data[i] != '-' && data[i] != '_' { + return false + } + c := data[i] + + // the whole line must be the char or whitespace + n := 0 + for data[i] != '\n' { + switch { + case data[i] == c: + n++ + case data[i] != ' ': + return false + } + i++ + } + + return n >= 3 +} + +func (p *parser) isFencedCode(data []byte, syntax **string, oldmarker string) (skip int, marker string) { + i, size := 0, 0 + skip = 0 + + // skip up to three spaces + for i < len(data) && i < 3 && data[i] == ' ' { + i++ + } + if i >= len(data) { + return + } + + // check for the marker characters: ~ or ` + if data[i] != '~' && data[i] != '`' { + return + } + + c := data[i] + + // the whole line must be the same char or whitespace + for i < len(data) && data[i] == c { + size++ + i++ + } + + if i >= len(data) { + return + } + + // the marker char must occur at least 3 times + if size < 3 { + return + } + marker = string(data[i-size : i]) + + // if this is the end marker, it must match the beginning marker + if oldmarker != "" && marker != oldmarker { + return + } + + if syntax != nil { + syn := 0 + i = skipChar(data, i, ' ') + + if i >= len(data) { + return + } + + syntaxStart := i + + if data[i] == '{' { + i++ + syntaxStart++ + + for i < len(data) && data[i] != '}' && data[i] != '\n' { + syn++ + i++ + } + + if i >= len(data) || data[i] != '}' { + return + } + + // strip all whitespace at the beginning and the end + // of the {} block + for syn > 0 && isspace(data[syntaxStart]) { + syntaxStart++ + syn-- + } + + for syn > 0 && isspace(data[syntaxStart+syn-1]) { + syn-- + } + + i++ + } else { + for i < len(data) && !isspace(data[i]) { + syn++ + i++ + } + } + + language := string(data[syntaxStart : syntaxStart+syn]) + *syntax = &language + } + + i = skipChar(data, i, ' ') + if i >= len(data) || data[i] != '\n' { + return + } + + skip = i + 1 + return +} + +func (p *parser) fencedCode(out *bytes.Buffer, data []byte, doRender bool) int { + var lang *string + beg, marker := p.isFencedCode(data, &lang, "") + if beg == 0 || beg >= len(data) { + return 0 + } + + var work bytes.Buffer + + for { + // safe to assume beg < len(data) + + // check for the end of the code block + fenceEnd, _ := p.isFencedCode(data[beg:], nil, marker) + if fenceEnd != 0 { + beg += fenceEnd + break + } + + // copy the current line + end := skipUntilChar(data, beg, '\n') + 1 + + // did we reach the end of the buffer without a closing marker? + if end >= len(data) { + return 0 + } + + // verbatim copy to the working buffer + if doRender { + work.Write(data[beg:end]) + } + beg = end + } + + syntax := "" + if lang != nil { + syntax = *lang + } + + if doRender { + p.r.BlockCode(out, work.Bytes(), syntax) + } + + return beg +} + +func (p *parser) table(out *bytes.Buffer, data []byte) int { + var header bytes.Buffer + i, columns := p.tableHeader(&header, data) + if i == 0 { + return 0 + } + + var body bytes.Buffer + + for i < len(data) { + pipes, rowStart := 0, i + for ; data[i] != '\n'; i++ { + if data[i] == '|' { + pipes++ + } + } + + if pipes == 0 { + i = rowStart + break + } + + // include the newline in data sent to tableRow + i++ + p.tableRow(&body, data[rowStart:i], columns, false) + } + + p.r.Table(out, header.Bytes(), body.Bytes(), columns) + + return i +} + +// check if the specified position is preceded by an odd number of backslashes +func isBackslashEscaped(data []byte, i int) bool { + backslashes := 0 + for i-backslashes-1 >= 0 && data[i-backslashes-1] == '\\' { + backslashes++ + } + return backslashes&1 == 1 +} + +func (p *parser) tableHeader(out *bytes.Buffer, data []byte) (size int, columns []int) { + i := 0 + colCount := 1 + for i = 0; data[i] != '\n'; i++ { + if data[i] == '|' && !isBackslashEscaped(data, i) { + colCount++ + } + } + + // doesn't look like a table header + if colCount == 1 { + return + } + + // include the newline in the data sent to tableRow + header := data[:i+1] + + // column count ignores pipes at beginning or end of line + if data[0] == '|' { + colCount-- + } + if i > 2 && data[i-1] == '|' && !isBackslashEscaped(data, i-1) { + colCount-- + } + + columns = make([]int, colCount) + + // move on to the header underline + i++ + if i >= len(data) { + return + } + + if data[i] == '|' && !isBackslashEscaped(data, i) { + i++ + } + i = skipChar(data, i, ' ') + + // each column header is of form: / *:?-+:? *|/ with # dashes + # colons >= 3 + // and trailing | optional on last column + col := 0 + for data[i] != '\n' { + dashes := 0 + + if data[i] == ':' { + i++ + columns[col] |= TABLE_ALIGNMENT_LEFT + dashes++ + } + for data[i] == '-' { + i++ + dashes++ + } + if data[i] == ':' { + i++ + columns[col] |= TABLE_ALIGNMENT_RIGHT + dashes++ + } + for data[i] == ' ' { + i++ + } + + // end of column test is messy + switch { + case dashes < 3: + // not a valid column + return + + case data[i] == '|' && !isBackslashEscaped(data, i): + // marker found, now skip past trailing whitespace + col++ + i++ + for data[i] == ' ' { + i++ + } + + // trailing junk found after last column + if col >= colCount && data[i] != '\n' { + return + } + + case (data[i] != '|' || isBackslashEscaped(data, i)) && col+1 < colCount: + // something else found where marker was required + return + + case data[i] == '\n': + // marker is optional for the last column + col++ + + default: + // trailing junk found after last column + return + } + } + if col != colCount { + return + } + + p.tableRow(out, header, columns, true) + size = i + 1 + return +} + +func (p *parser) tableRow(out *bytes.Buffer, data []byte, columns []int, header bool) { + i, col := 0, 0 + var rowWork bytes.Buffer + + if data[i] == '|' && !isBackslashEscaped(data, i) { + i++ + } + + for col = 0; col < len(columns) && i < len(data); col++ { + for data[i] == ' ' { + i++ + } + + cellStart := i + + for (data[i] != '|' || isBackslashEscaped(data, i)) && data[i] != '\n' { + i++ + } + + cellEnd := i + + // skip the end-of-cell marker, possibly taking us past end of buffer + i++ + + for cellEnd > cellStart && data[cellEnd-1] == ' ' { + cellEnd-- + } + + var cellWork bytes.Buffer + p.inline(&cellWork, data[cellStart:cellEnd]) + + if header { + p.r.TableHeaderCell(&rowWork, cellWork.Bytes(), columns[col]) + } else { + p.r.TableCell(&rowWork, cellWork.Bytes(), columns[col]) + } + } + + // pad it out with empty columns to get the right number + for ; col < len(columns); col++ { + if header { + p.r.TableHeaderCell(&rowWork, nil, columns[col]) + } else { + p.r.TableCell(&rowWork, nil, columns[col]) + } + } + + // silently ignore rows with too many cells + + p.r.TableRow(out, rowWork.Bytes()) +} + +// returns blockquote prefix length +func (p *parser) quotePrefix(data []byte) int { + i := 0 + for i < 3 && data[i] == ' ' { + i++ + } + if data[i] == '>' { + if data[i+1] == ' ' { + return i + 2 + } + return i + 1 + } + return 0 +} + +// blockquote ends with at least one blank line +// followed by something without a blockquote prefix +func (p *parser) terminateBlockquote(data []byte, beg, end int) bool { + if p.isEmpty(data[beg:]) <= 0 { + return false + } + if end >= len(data) { + return true + } + return p.quotePrefix(data[end:]) == 0 && p.isEmpty(data[end:]) == 0 +} + +// parse a blockquote fragment +func (p *parser) quote(out *bytes.Buffer, data []byte) int { + var raw bytes.Buffer + beg, end := 0, 0 + for beg < len(data) { + end = beg + // Step over whole lines, collecting them. While doing that, check for + // fenced code and if one's found, incorporate it altogether, + // irregardless of any contents inside it + for data[end] != '\n' { + if p.flags&EXTENSION_FENCED_CODE != 0 { + if i := p.fencedCode(out, data[end:], false); i > 0 { + // -1 to compensate for the extra end++ after the loop: + end += i - 1 + break + } + } + end++ + } + end++ + + if pre := p.quotePrefix(data[beg:]); pre > 0 { + // skip the prefix + beg += pre + } else if p.terminateBlockquote(data, beg, end) { + break + } + + // this line is part of the blockquote + raw.Write(data[beg:end]) + beg = end + } + + var cooked bytes.Buffer + p.block(&cooked, raw.Bytes()) + p.r.BlockQuote(out, cooked.Bytes()) + return end +} + +// returns prefix length for block code +func (p *parser) codePrefix(data []byte) int { + if data[0] == ' ' && data[1] == ' ' && data[2] == ' ' && data[3] == ' ' { + return 4 + } + return 0 +} + +func (p *parser) code(out *bytes.Buffer, data []byte) int { + var work bytes.Buffer + + i := 0 + for i < len(data) { + beg := i + for data[i] != '\n' { + i++ + } + i++ + + blankline := p.isEmpty(data[beg:i]) > 0 + if pre := p.codePrefix(data[beg:i]); pre > 0 { + beg += pre + } else if !blankline { + // non-empty, non-prefixed line breaks the pre + i = beg + break + } + + // verbatim copy to the working buffeu + if blankline { + work.WriteByte('\n') + } else { + work.Write(data[beg:i]) + } + } + + // trim all the \n off the end of work + workbytes := work.Bytes() + eol := len(workbytes) + for eol > 0 && workbytes[eol-1] == '\n' { + eol-- + } + if eol != len(workbytes) { + work.Truncate(eol) + } + + work.WriteByte('\n') + + p.r.BlockCode(out, work.Bytes(), "") + + return i +} + +// returns unordered list item prefix +func (p *parser) uliPrefix(data []byte) int { + i := 0 + + // start with up to 3 spaces + for i < 3 && data[i] == ' ' { + i++ + } + + // need a *, +, or - followed by a space + if (data[i] != '*' && data[i] != '+' && data[i] != '-') || + data[i+1] != ' ' { + return 0 + } + return i + 2 +} + +// returns ordered list item prefix +func (p *parser) oliPrefix(data []byte) int { + i := 0 + + // start with up to 3 spaces + for i < 3 && data[i] == ' ' { + i++ + } + + // count the digits + start := i + for data[i] >= '0' && data[i] <= '9' { + i++ + } + + // we need >= 1 digits followed by a dot and a space + if start == i || data[i] != '.' || data[i+1] != ' ' { + return 0 + } + return i + 2 +} + +// returns definition list item prefix +func (p *parser) dliPrefix(data []byte) int { + i := 0 + + // need a : followed by a spaces + if data[i] != ':' || data[i+1] != ' ' { + return 0 + } + for data[i] == ' ' { + i++ + } + return i + 2 +} + +// parse ordered or unordered list block +func (p *parser) list(out *bytes.Buffer, data []byte, flags int) int { + i := 0 + flags |= LIST_ITEM_BEGINNING_OF_LIST + work := func() bool { + for i < len(data) { + skip := p.listItem(out, data[i:], &flags) + i += skip + + if skip == 0 || flags&LIST_ITEM_END_OF_LIST != 0 { + break + } + flags &= ^LIST_ITEM_BEGINNING_OF_LIST + } + return true + } + + p.r.List(out, work, flags) + return i +} + +// Parse a single list item. +// Assumes initial prefix is already removed if this is a sublist. +func (p *parser) listItem(out *bytes.Buffer, data []byte, flags *int) int { + // keep track of the indentation of the first line + itemIndent := 0 + for itemIndent < 3 && data[itemIndent] == ' ' { + itemIndent++ + } + + i := p.uliPrefix(data) + if i == 0 { + i = p.oliPrefix(data) + } + if i == 0 { + i = p.dliPrefix(data) + // reset definition term flag + if i > 0 { + *flags &= ^LIST_TYPE_TERM + } + } + if i == 0 { + // if in defnition list, set term flag and continue + if *flags&LIST_TYPE_DEFINITION != 0 { + *flags |= LIST_TYPE_TERM + } else { + return 0 + } + } + + // skip leading whitespace on first line + for data[i] == ' ' { + i++ + } + + // find the end of the line + line := i + for i > 0 && data[i-1] != '\n' { + i++ + } + + // get working buffer + var raw bytes.Buffer + + // put the first line into the working buffer + raw.Write(data[line:i]) + line = i + + // process the following lines + containsBlankLine := false + sublist := 0 + +gatherlines: + for line < len(data) { + i++ + + // find the end of this line + for data[i-1] != '\n' { + i++ + } + + // if it is an empty line, guess that it is part of this item + // and move on to the next line + if p.isEmpty(data[line:i]) > 0 { + containsBlankLine = true + line = i + continue + } + + // calculate the indentation + indent := 0 + for indent < 4 && line+indent < i && data[line+indent] == ' ' { + indent++ + } + + chunk := data[line+indent : i] + + // evaluate how this line fits in + switch { + // is this a nested list item? + case (p.uliPrefix(chunk) > 0 && !p.isHRule(chunk)) || + p.oliPrefix(chunk) > 0 || + p.dliPrefix(chunk) > 0: + + if containsBlankLine { + *flags |= LIST_ITEM_CONTAINS_BLOCK + } + + // to be a nested list, it must be indented more + // if not, it is the next item in the same list + if indent <= itemIndent { + break gatherlines + } + + // is this the first item in the nested list? + if sublist == 0 { + sublist = raw.Len() + } + + // is this a nested prefix header? + case p.isPrefixHeader(chunk): + // if the header is not indented, it is not nested in the list + // and thus ends the list + if containsBlankLine && indent < 4 { + *flags |= LIST_ITEM_END_OF_LIST + break gatherlines + } + *flags |= LIST_ITEM_CONTAINS_BLOCK + + // anything following an empty line is only part + // of this item if it is indented 4 spaces + // (regardless of the indentation of the beginning of the item) + case containsBlankLine && indent < 4: + if *flags&LIST_TYPE_DEFINITION != 0 && i < len(data)-1 { + // is the next item still a part of this list? + next := i + for data[next] != '\n' { + next++ + } + for next < len(data)-1 && data[next] == '\n' { + next++ + } + if i < len(data)-1 && data[i] != ':' && data[next] != ':' { + *flags |= LIST_ITEM_END_OF_LIST + } + } else { + *flags |= LIST_ITEM_END_OF_LIST + } + break gatherlines + + // a blank line means this should be parsed as a block + case containsBlankLine: + raw.WriteByte('\n') + *flags |= LIST_ITEM_CONTAINS_BLOCK + } + + // if this line was preceeded by one or more blanks, + // re-introduce the blank into the buffer + if containsBlankLine { + containsBlankLine = false + raw.WriteByte('\n') + + } + + // add the line into the working buffer without prefix + raw.Write(data[line+indent : i]) + + line = i + } + + rawBytes := raw.Bytes() + + // render the contents of the list item + var cooked bytes.Buffer + if *flags&LIST_ITEM_CONTAINS_BLOCK != 0 && *flags&LIST_TYPE_TERM == 0 { + // intermediate render of block item, except for definition term + if sublist > 0 { + p.block(&cooked, rawBytes[:sublist]) + p.block(&cooked, rawBytes[sublist:]) + } else { + p.block(&cooked, rawBytes) + } + } else { + // intermediate render of inline item + if sublist > 0 { + p.inline(&cooked, rawBytes[:sublist]) + p.block(&cooked, rawBytes[sublist:]) + } else { + p.inline(&cooked, rawBytes) + } + } + + // render the actual list item + cookedBytes := cooked.Bytes() + parsedEnd := len(cookedBytes) + + // strip trailing newlines + for parsedEnd > 0 && cookedBytes[parsedEnd-1] == '\n' { + parsedEnd-- + } + p.r.ListItem(out, cookedBytes[:parsedEnd], *flags) + + return line +} + +// render a single paragraph that has already been parsed out +func (p *parser) renderParagraph(out *bytes.Buffer, data []byte) { + if len(data) == 0 { + return + } + + // trim leading spaces + beg := 0 + for data[beg] == ' ' { + beg++ + } + + // trim trailing newline + end := len(data) - 1 + + // trim trailing spaces + for end > beg && data[end-1] == ' ' { + end-- + } + + work := func() bool { + p.inline(out, data[beg:end]) + return true + } + p.r.Paragraph(out, work) +} + +func (p *parser) paragraph(out *bytes.Buffer, data []byte) int { + // prev: index of 1st char of previous line + // line: index of 1st char of current line + // i: index of cursor/end of current line + var prev, line, i int + + // keep going until we find something to mark the end of the paragraph + for i < len(data) { + // mark the beginning of the current line + prev = line + current := data[i:] + line = i + + // did we find a blank line marking the end of the paragraph? + if n := p.isEmpty(current); n > 0 { + // did this blank line followed by a definition list item? + if p.flags&EXTENSION_DEFINITION_LISTS != 0 { + if i < len(data)-1 && data[i+1] == ':' { + return p.list(out, data[prev:], LIST_TYPE_DEFINITION) + } + } + + p.renderParagraph(out, data[:i]) + return i + n + } + + // an underline under some text marks a header, so our paragraph ended on prev line + if i > 0 { + if level := p.isUnderlinedHeader(current); level > 0 { + // render the paragraph + p.renderParagraph(out, data[:prev]) + + // ignore leading and trailing whitespace + eol := i - 1 + for prev < eol && data[prev] == ' ' { + prev++ + } + for eol > prev && data[eol-1] == ' ' { + eol-- + } + + // render the header + // this ugly double closure avoids forcing variables onto the heap + work := func(o *bytes.Buffer, pp *parser, d []byte) func() bool { + return func() bool { + pp.inline(o, d) + return true + } + }(out, p, data[prev:eol]) + + id := "" + if p.flags&EXTENSION_AUTO_HEADER_IDS != 0 { + id = sanitized_anchor_name.Create(string(data[prev:eol])) + } + + p.r.Header(out, work, level, id) + + // find the end of the underline + for data[i] != '\n' { + i++ + } + return i + } + } + + // if the next line starts a block of HTML, then the paragraph ends here + if p.flags&EXTENSION_LAX_HTML_BLOCKS != 0 { + if data[i] == '<' && p.html(out, current, false) > 0 { + // rewind to before the HTML block + p.renderParagraph(out, data[:i]) + return i + } + } + + // if there's a prefixed header or a horizontal rule after this, paragraph is over + if p.isPrefixHeader(current) || p.isHRule(current) { + p.renderParagraph(out, data[:i]) + return i + } + + // if there's a fenced code block, paragraph is over + if p.flags&EXTENSION_FENCED_CODE != 0 { + if p.fencedCode(out, current, false) > 0 { + p.renderParagraph(out, data[:i]) + return i + } + } + + // if there's a definition list item, prev line is a definition term + if p.flags&EXTENSION_DEFINITION_LISTS != 0 { + if p.dliPrefix(current) != 0 { + return p.list(out, data[prev:], LIST_TYPE_DEFINITION) + } + } + + // if there's a list after this, paragraph is over + if p.flags&EXTENSION_NO_EMPTY_LINE_BEFORE_BLOCK != 0 { + if p.uliPrefix(current) != 0 || + p.oliPrefix(current) != 0 || + p.quotePrefix(current) != 0 || + p.codePrefix(current) != 0 { + p.renderParagraph(out, data[:i]) + return i + } + } + + // otherwise, scan to the beginning of the next line + for data[i] != '\n' { + i++ + } + i++ + } + + p.renderParagraph(out, data[:i]) + return i +} diff --git a/vendor/github.com/russross/blackfriday/html.go b/vendor/github.com/russross/blackfriday/html.go new file mode 100644 index 00000000..74e67ee8 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/html.go @@ -0,0 +1,949 @@ +// +// Blackfriday Markdown Processor +// Available at http://github.com/russross/blackfriday +// +// Copyright © 2011 Russ Ross . +// Distributed under the Simplified BSD License. +// See README.md for details. +// + +// +// +// HTML rendering backend +// +// + +package blackfriday + +import ( + "bytes" + "fmt" + "regexp" + "strconv" + "strings" +) + +// Html renderer configuration options. +const ( + HTML_SKIP_HTML = 1 << iota // skip preformatted HTML blocks + HTML_SKIP_STYLE // skip embedded