From 1a1c2079dc6ad09099dbb23cf09214bf441163dc Mon Sep 17 00:00:00 2001 From: Gyuho Lee Date: Thu, 10 May 2018 11:09:51 -0700 Subject: [PATCH] vendor: use latest "etcd'" Signed-off-by: Gyuho Lee --- Gopkg.lock | 39 +- Gopkg.toml | 2 +- .../{internal => }/auth/authpb/auth.pb.go | 0 .../coreos/etcd/{internal => }/auth/doc.go | 0 vendor/github.com/coreos/etcd/auth/jwt.go | 230 + vendor/github.com/coreos/etcd/auth/nop.go | 35 + .../{internal => }/auth/range_perm_cache.go | 46 +- .../etcd/{internal => }/auth/simple_token.go | 28 +- .../coreos/etcd/{internal => }/auth/store.go | 481 +- .../github.com/coreos/etcd/client/client.go | 2 +- .../coreos/etcd/client/keys.generated.go | 3893 ++++++----------- .../github.com/coreos/etcd/clientv3/auth.go | 38 +- .../coreos/etcd/clientv3/balancer/doc.go | 16 + .../grpc1.7-health.go} | 164 +- .../github.com/coreos/etcd/clientv3/client.go | 42 +- .../github.com/coreos/etcd/clientv3/logger.go | 70 +- .../clientv3/{grpc_options.go => options.go} | 3 + .../github.com/coreos/etcd/clientv3/retry.go | 16 +- .../github.com/coreos/etcd/clientv3/watch.go | 2 +- .../coreos/etcd/etcdserver/api/capability.go | 15 +- .../etcd/etcdserver/api/v3rpc/header.go | 6 +- .../coreos/etcd/etcdserver/api/v3rpc/lease.go | 31 +- .../etcd/etcdserver/api/v3rpc/maintenance.go | 67 +- .../coreos/etcd/etcdserver/api/v3rpc/quota.go | 4 +- .../etcdserver/api/v3rpc/rpctypes/error.go | 15 +- .../api/v3rpc/rpctypes/metadatafields.go | 20 + .../coreos/etcd/etcdserver/api/v3rpc/util.go | 11 +- .../coreos/etcd/etcdserver/api/v3rpc/watch.go | 85 +- .../coreos/etcd/etcdserver/apply.go | 59 +- .../coreos/etcd/etcdserver/apply_auth.go | 6 +- .../coreos/etcd/etcdserver/apply_v2.go | 27 +- .../coreos/etcd/etcdserver/backend.go | 40 +- .../coreos/etcd/etcdserver/cluster_util.go | 168 +- .../coreos/etcd/etcdserver/config.go | 161 +- .../coreos/etcd/etcdserver/corrupt.go | 231 +- .../etcdserverpb/raft_internal_stringer.go | 58 + .../etcd/etcdserver/etcdserverpb/rpc.pb.go | 574 +-- .../coreos/etcd/etcdserver/metrics.go | 9 +- .../coreos/etcd/etcdserver/quota.go | 49 +- .../github.com/coreos/etcd/etcdserver/raft.go | 242 +- .../coreos/etcd/etcdserver/server.go | 1044 ++++- .../etcd/etcdserver/server_access_control.go | 65 + .../coreos/etcd/etcdserver/snapshot_merge.go | 45 +- .../coreos/etcd/etcdserver/storage.go | 34 +- .../github.com/coreos/etcd/etcdserver/util.go | 29 + .../coreos/etcd/etcdserver/v2_server.go | 15 +- .../coreos/etcd/etcdserver/v3_server.go | 66 +- .../coreos/etcd/internal/auth/jwt.go | 139 - .../coreos/etcd/{internal => }/mvcc/doc.go | 0 .../coreos/etcd/{internal => }/mvcc/index.go | 71 +- .../etcd/{internal => }/mvcc/key_index.go | 72 +- .../coreos/etcd/{internal => }/mvcc/kv.go | 6 +- .../etcd/{internal => }/mvcc/kv_view.go | 2 +- .../etcd/{internal => }/mvcc/kvstore.go | 99 +- .../{internal => }/mvcc/kvstore_compaction.go | 12 +- .../etcd/{internal => }/mvcc/kvstore_txn.go | 96 +- .../etcd/{internal => }/mvcc/metrics.go | 19 +- .../etcd/{internal => }/mvcc/metrics_txn.go | 4 +- .../etcd/{internal => }/mvcc/mvccpb/kv.pb.go | 0 .../etcd/{internal => }/mvcc/revision.go | 0 .../coreos/etcd/{internal => }/mvcc/util.go | 7 +- .../{internal => }/mvcc/watchable_store.go | 42 +- .../mvcc/watchable_store_txn.go | 2 +- .../etcd/{internal => }/mvcc/watcher.go | 2 +- .../etcd/{internal => }/mvcc/watcher_group.go | 2 +- .../coreos/etcd/pkg/logutil/discard_logger.go | 46 + .../github.com/coreos/etcd/pkg/logutil/doc.go | 16 + .../coreos/etcd/pkg/logutil/logger.go | 64 + .../coreos/etcd/pkg/logutil/merge_logger.go | 194 + .../coreos/etcd/pkg/logutil/package_logger.go | 60 + .../coreos/etcd/pkg/logutil/zap_grpc.go | 111 + .../coreos/etcd/pkg/logutil/zap_journald.go | 89 + .../coreos/etcd/pkg/logutil/zap_raft.go | 97 + .../github.com/coreos/etcd/pkg/netutil/doc.go | 16 + .../coreos/etcd/pkg/netutil/netutil.go | 64 +- .../coreos/etcd/pkg/report/timeseries.go | 2 +- vendor/github.com/coreos/etcd/pkg/types/id.go | 4 +- vendor/github.com/coreos/etcd/raft/doc.go | 300 ++ vendor/github.com/coreos/etcd/raft/log.go | 358 ++ .../coreos/etcd/raft/log_unstable.go | 159 + vendor/github.com/coreos/etcd/raft/logger.go | 126 + vendor/github.com/coreos/etcd/raft/node.go | 582 +++ .../github.com/coreos/etcd/raft/progress.go | 284 ++ vendor/github.com/coreos/etcd/raft/raft.go | 1450 ++++++ .../coreos/etcd/raft/raftpb/raft.pb.go | 2004 +++++++++ vendor/github.com/coreos/etcd/raft/rawnode.go | 264 ++ .../github.com/coreos/etcd/raft/read_only.go | 118 + vendor/github.com/coreos/etcd/raft/status.go | 88 + vendor/github.com/coreos/etcd/raft/storage.go | 271 ++ vendor/github.com/coreos/etcd/raft/util.go | 129 + vendor/go.uber.org/atomic/LICENSE.txt | 19 + vendor/go.uber.org/atomic/atomic.go | 351 ++ vendor/go.uber.org/atomic/string.go | 49 + vendor/go.uber.org/multierr/LICENSE.txt | 19 + vendor/go.uber.org/multierr/error.go | 401 ++ vendor/go.uber.org/zap/LICENSE.txt | 19 + vendor/go.uber.org/zap/array.go | 320 ++ vendor/go.uber.org/zap/buffer/buffer.go | 106 + vendor/go.uber.org/zap/buffer/pool.go | 49 + vendor/go.uber.org/zap/config.go | 243 + vendor/go.uber.org/zap/doc.go | 113 + vendor/go.uber.org/zap/encoder.go | 75 + vendor/go.uber.org/zap/error.go | 80 + vendor/go.uber.org/zap/field.go | 310 ++ vendor/go.uber.org/zap/flag.go | 39 + vendor/go.uber.org/zap/global.go | 169 + vendor/go.uber.org/zap/http_handler.go | 81 + .../zap/internal/bufferpool/bufferpool.go | 31 + .../go.uber.org/zap/internal/color/color.go | 44 + vendor/go.uber.org/zap/internal/exit/exit.go | 64 + vendor/go.uber.org/zap/level.go | 132 + vendor/go.uber.org/zap/logger.go | 305 ++ vendor/go.uber.org/zap/options.go | 109 + vendor/go.uber.org/zap/stacktrace.go | 126 + vendor/go.uber.org/zap/sugar.go | 304 ++ vendor/go.uber.org/zap/time.go | 27 + vendor/go.uber.org/zap/writer.go | 96 + .../zap/zapcore/console_encoder.go | 147 + vendor/go.uber.org/zap/zapcore/core.go | 113 + vendor/go.uber.org/zap/zapcore/doc.go | 24 + vendor/go.uber.org/zap/zapcore/encoder.go | 348 ++ vendor/go.uber.org/zap/zapcore/entry.go | 257 ++ vendor/go.uber.org/zap/zapcore/error.go | 120 + vendor/go.uber.org/zap/zapcore/field.go | 201 + vendor/go.uber.org/zap/zapcore/hook.go | 68 + .../go.uber.org/zap/zapcore/json_encoder.go | 480 ++ vendor/go.uber.org/zap/zapcore/level.go | 175 + .../go.uber.org/zap/zapcore/level_strings.go | 46 + vendor/go.uber.org/zap/zapcore/marshaler.go | 53 + .../go.uber.org/zap/zapcore/memory_encoder.go | 179 + vendor/go.uber.org/zap/zapcore/sampler.go | 134 + vendor/go.uber.org/zap/zapcore/tee.go | 81 + .../go.uber.org/zap/zapcore/write_syncer.go | 123 + 133 files changed, 18230 insertions(+), 3754 deletions(-) rename vendor/github.com/coreos/etcd/{internal => }/auth/authpb/auth.pb.go (100%) rename vendor/github.com/coreos/etcd/{internal => }/auth/doc.go (100%) create mode 100644 vendor/github.com/coreos/etcd/auth/jwt.go create mode 100644 vendor/github.com/coreos/etcd/auth/nop.go rename vendor/github.com/coreos/etcd/{internal => }/auth/range_perm_cache.go (68%) rename vendor/github.com/coreos/etcd/{internal => }/auth/simple_token.go (88%) rename vendor/github.com/coreos/etcd/{internal => }/auth/store.go (67%) create mode 100644 vendor/github.com/coreos/etcd/clientv3/balancer/doc.go rename vendor/github.com/coreos/etcd/clientv3/{health_balancer.go => balancer/grpc1.7-health.go} (72%) rename vendor/github.com/coreos/etcd/clientv3/{grpc_options.go => options.go} (96%) create mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/metadatafields.go create mode 100644 vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal_stringer.go create mode 100644 vendor/github.com/coreos/etcd/etcdserver/server_access_control.go delete mode 100644 vendor/github.com/coreos/etcd/internal/auth/jwt.go rename vendor/github.com/coreos/etcd/{internal => }/mvcc/doc.go (100%) rename vendor/github.com/coreos/etcd/{internal => }/mvcc/index.go (81%) rename vendor/github.com/coreos/etcd/{internal => }/mvcc/key_index.go (80%) rename vendor/github.com/coreos/etcd/{internal => }/mvcc/kv.go (97%) rename vendor/github.com/coreos/etcd/{internal => }/mvcc/kv_view.go (96%) rename vendor/github.com/coreos/etcd/{internal => }/mvcc/kvstore.go (82%) rename vendor/github.com/coreos/etcd/{internal => }/mvcc/kvstore_compaction.go (85%) rename vendor/github.com/coreos/etcd/{internal => }/mvcc/kvstore_txn.go (72%) rename vendor/github.com/coreos/etcd/{internal => }/mvcc/metrics.go (88%) rename vendor/github.com/coreos/etcd/{internal => }/mvcc/metrics_txn.go (96%) rename vendor/github.com/coreos/etcd/{internal => }/mvcc/mvccpb/kv.pb.go (100%) rename vendor/github.com/coreos/etcd/{internal => }/mvcc/revision.go (100%) rename vendor/github.com/coreos/etcd/{internal => }/mvcc/util.go (90%) rename vendor/github.com/coreos/etcd/{internal => }/mvcc/watchable_store.go (91%) rename vendor/github.com/coreos/etcd/{internal => }/mvcc/watchable_store_txn.go (96%) rename vendor/github.com/coreos/etcd/{internal => }/mvcc/watcher.go (99%) rename vendor/github.com/coreos/etcd/{internal => }/mvcc/watcher_group.go (99%) create mode 100644 vendor/github.com/coreos/etcd/pkg/logutil/discard_logger.go create mode 100644 vendor/github.com/coreos/etcd/pkg/logutil/doc.go create mode 100644 vendor/github.com/coreos/etcd/pkg/logutil/logger.go create mode 100644 vendor/github.com/coreos/etcd/pkg/logutil/merge_logger.go create mode 100644 vendor/github.com/coreos/etcd/pkg/logutil/package_logger.go create mode 100644 vendor/github.com/coreos/etcd/pkg/logutil/zap_grpc.go create mode 100644 vendor/github.com/coreos/etcd/pkg/logutil/zap_journald.go create mode 100644 vendor/github.com/coreos/etcd/pkg/logutil/zap_raft.go create mode 100644 vendor/github.com/coreos/etcd/pkg/netutil/doc.go create mode 100644 vendor/github.com/coreos/etcd/raft/doc.go create mode 100644 vendor/github.com/coreos/etcd/raft/log.go create mode 100644 vendor/github.com/coreos/etcd/raft/log_unstable.go create mode 100644 vendor/github.com/coreos/etcd/raft/logger.go create mode 100644 vendor/github.com/coreos/etcd/raft/node.go create mode 100644 vendor/github.com/coreos/etcd/raft/progress.go create mode 100644 vendor/github.com/coreos/etcd/raft/raft.go create mode 100644 vendor/github.com/coreos/etcd/raft/raftpb/raft.pb.go create mode 100644 vendor/github.com/coreos/etcd/raft/rawnode.go create mode 100644 vendor/github.com/coreos/etcd/raft/read_only.go create mode 100644 vendor/github.com/coreos/etcd/raft/status.go create mode 100644 vendor/github.com/coreos/etcd/raft/storage.go create mode 100644 vendor/github.com/coreos/etcd/raft/util.go create mode 100644 vendor/go.uber.org/atomic/LICENSE.txt create mode 100644 vendor/go.uber.org/atomic/atomic.go create mode 100644 vendor/go.uber.org/atomic/string.go create mode 100644 vendor/go.uber.org/multierr/LICENSE.txt create mode 100644 vendor/go.uber.org/multierr/error.go create mode 100644 vendor/go.uber.org/zap/LICENSE.txt create mode 100644 vendor/go.uber.org/zap/array.go create mode 100644 vendor/go.uber.org/zap/buffer/buffer.go create mode 100644 vendor/go.uber.org/zap/buffer/pool.go create mode 100644 vendor/go.uber.org/zap/config.go create mode 100644 vendor/go.uber.org/zap/doc.go create mode 100644 vendor/go.uber.org/zap/encoder.go create mode 100644 vendor/go.uber.org/zap/error.go create mode 100644 vendor/go.uber.org/zap/field.go create mode 100644 vendor/go.uber.org/zap/flag.go create mode 100644 vendor/go.uber.org/zap/global.go create mode 100644 vendor/go.uber.org/zap/http_handler.go create mode 100644 vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go create mode 100644 vendor/go.uber.org/zap/internal/color/color.go create mode 100644 vendor/go.uber.org/zap/internal/exit/exit.go create mode 100644 vendor/go.uber.org/zap/level.go create mode 100644 vendor/go.uber.org/zap/logger.go create mode 100644 vendor/go.uber.org/zap/options.go create mode 100644 vendor/go.uber.org/zap/stacktrace.go create mode 100644 vendor/go.uber.org/zap/sugar.go create mode 100644 vendor/go.uber.org/zap/time.go create mode 100644 vendor/go.uber.org/zap/writer.go create mode 100644 vendor/go.uber.org/zap/zapcore/console_encoder.go create mode 100644 vendor/go.uber.org/zap/zapcore/core.go create mode 100644 vendor/go.uber.org/zap/zapcore/doc.go create mode 100644 vendor/go.uber.org/zap/zapcore/encoder.go create mode 100644 vendor/go.uber.org/zap/zapcore/entry.go create mode 100644 vendor/go.uber.org/zap/zapcore/error.go create mode 100644 vendor/go.uber.org/zap/zapcore/field.go create mode 100644 vendor/go.uber.org/zap/zapcore/hook.go create mode 100644 vendor/go.uber.org/zap/zapcore/json_encoder.go create mode 100644 vendor/go.uber.org/zap/zapcore/level.go create mode 100644 vendor/go.uber.org/zap/zapcore/level_strings.go create mode 100644 vendor/go.uber.org/zap/zapcore/marshaler.go create mode 100644 vendor/go.uber.org/zap/zapcore/memory_encoder.go create mode 100644 vendor/go.uber.org/zap/zapcore/sampler.go create mode 100644 vendor/go.uber.org/zap/zapcore/tee.go create mode 100644 vendor/go.uber.org/zap/zapcore/write_syncer.go diff --git a/Gopkg.lock b/Gopkg.lock index d22be56d..c8be89c9 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -35,17 +35,21 @@ [[projects]] name = "github.com/coreos/etcd" packages = [ + "auth/authpb", "clientv3", + "clientv3/balancer", "etcdserver/api/v3rpc/rpctypes", "etcdserver/etcdserverpb", - "internal/auth/authpb", - "internal/mvcc/mvccpb", + "mvcc/mvccpb", "pkg/cpuutil", + "pkg/logutil", "pkg/netutil", "pkg/report", - "pkg/types" + "pkg/types", + "raft", + "raft/raftpb" ] - revision = "1d99d3886f6cb5fb7ef13100c9587cc01820d38e" + revision = "67b1ff6724637f0a00f693471ddb17b5adde38cf" source = "https://github.com/coreos/etcd" [[projects]] @@ -206,6 +210,31 @@ revision = "e57e3eeb33f795204c1ca35f56c44f83227c6e66" version = "v1.0.0" +[[projects]] + name = "go.uber.org/atomic" + packages = ["."] + revision = "1ea20fb1cbb1cc08cbd0d913a96dead89aa18289" + version = "v1.3.2" + +[[projects]] + name = "go.uber.org/multierr" + packages = ["."] + revision = "3c4937480c32f4c13a875a1829af76c98ca3d40a" + version = "v1.1.0" + +[[projects]] + name = "go.uber.org/zap" + packages = [ + ".", + "buffer", + "internal/bufferpool", + "internal/color", + "internal/exit", + "zapcore" + ] + revision = "eeedf312bc6c57391d84767a4cd413f02a917974" + version = "v1.8.0" + [[projects]] branch = "master" name = "golang.org/x/image" @@ -394,6 +423,6 @@ [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "69cdc0d9c700a95cfecff8f95f783cc922720cd16666ffe7279cb702a812fa58" + inputs-digest = "be4c2e85b5fe6d0b1af6016e555dcbaa6c76b9f4d5744747a1e2984512298b3a" solver-name = "gps-cdcl" solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml index 40b75b3b..cc7e1ffe 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -6,7 +6,7 @@ [[constraint]] name = "github.com/coreos/etcd" source = "https://github.com/coreos/etcd" - revision = "1d99d3886f6cb5fb7ef13100c9587cc01820d38e" + revision = "67b1ff6724637f0a00f693471ddb17b5adde38cf" # v1.7.5 [[constraint]] diff --git a/vendor/github.com/coreos/etcd/internal/auth/authpb/auth.pb.go b/vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go similarity index 100% rename from vendor/github.com/coreos/etcd/internal/auth/authpb/auth.pb.go rename to vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go diff --git a/vendor/github.com/coreos/etcd/internal/auth/doc.go b/vendor/github.com/coreos/etcd/auth/doc.go similarity index 100% rename from vendor/github.com/coreos/etcd/internal/auth/doc.go rename to vendor/github.com/coreos/etcd/auth/doc.go diff --git a/vendor/github.com/coreos/etcd/auth/jwt.go b/vendor/github.com/coreos/etcd/auth/jwt.go new file mode 100644 index 00000000..6e4c835e --- /dev/null +++ b/vendor/github.com/coreos/etcd/auth/jwt.go @@ -0,0 +1,230 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package auth + +import ( + "context" + "crypto/rsa" + "io/ioutil" + "time" + + jwt "github.com/dgrijalva/jwt-go" + "go.uber.org/zap" +) + +type tokenJWT struct { + lg *zap.Logger + signMethod string + signKey *rsa.PrivateKey + verifyKey *rsa.PublicKey + ttl time.Duration +} + +func (t *tokenJWT) enable() {} +func (t *tokenJWT) disable() {} +func (t *tokenJWT) invalidateUser(string) {} +func (t *tokenJWT) genTokenPrefix() (string, error) { return "", nil } + +func (t *tokenJWT) info(ctx context.Context, token string, rev uint64) (*AuthInfo, bool) { + // rev isn't used in JWT, it is only used in simple token + var ( + username string + revision uint64 + ) + + parsed, err := jwt.Parse(token, func(token *jwt.Token) (interface{}, error) { + return t.verifyKey, nil + }) + + switch err.(type) { + case nil: + if !parsed.Valid { + if t.lg != nil { + t.lg.Warn("invalid JWT token", zap.String("token", token)) + } else { + plog.Warningf("invalid jwt token: %s", token) + } + return nil, false + } + + claims := parsed.Claims.(jwt.MapClaims) + + username = claims["username"].(string) + revision = uint64(claims["revision"].(float64)) + default: + if t.lg != nil { + t.lg.Warn( + "failed to parse a JWT token", + zap.String("token", token), + zap.Error(err), + ) + } else { + plog.Warningf("failed to parse jwt token: %s", err) + } + return nil, false + } + + return &AuthInfo{Username: username, Revision: revision}, true +} + +func (t *tokenJWT) assign(ctx context.Context, username string, revision uint64) (string, error) { + // Future work: let a jwt token include permission information would be useful for + // permission checking in proxy side. + tk := jwt.NewWithClaims(jwt.GetSigningMethod(t.signMethod), + jwt.MapClaims{ + "username": username, + "revision": revision, + "exp": time.Now().Add(t.ttl).Unix(), + }) + + token, err := tk.SignedString(t.signKey) + if err != nil { + if t.lg != nil { + t.lg.Warn( + "failed to sign a JWT token", + zap.String("user-name", username), + zap.Uint64("revision", revision), + zap.Error(err), + ) + } else { + plog.Debugf("failed to sign jwt token: %s", err) + } + return "", err + } + + if t.lg != nil { + t.lg.Info( + "created/assigned a new JWT token", + zap.String("user-name", username), + zap.Uint64("revision", revision), + zap.String("token", token), + ) + } else { + plog.Debugf("jwt token: %s", token) + } + return token, err +} + +func prepareOpts(lg *zap.Logger, opts map[string]string) (jwtSignMethod, jwtPubKeyPath, jwtPrivKeyPath string, ttl time.Duration, err error) { + for k, v := range opts { + switch k { + case "sign-method": + jwtSignMethod = v + case "pub-key": + jwtPubKeyPath = v + case "priv-key": + jwtPrivKeyPath = v + case "ttl": + ttl, err = time.ParseDuration(v) + if err != nil { + if lg != nil { + lg.Warn( + "failed to parse JWT TTL option", + zap.String("ttl-value", v), + zap.Error(err), + ) + } else { + plog.Errorf("failed to parse ttl option (%s)", err) + } + return "", "", "", 0, ErrInvalidAuthOpts + } + default: + if lg != nil { + lg.Warn("unknown JWT token option", zap.String("option", k)) + } else { + plog.Errorf("unknown token specific option: %s", k) + } + return "", "", "", 0, ErrInvalidAuthOpts + } + } + if len(jwtSignMethod) == 0 { + return "", "", "", 0, ErrInvalidAuthOpts + } + return jwtSignMethod, jwtPubKeyPath, jwtPrivKeyPath, ttl, nil +} + +func newTokenProviderJWT(lg *zap.Logger, opts map[string]string) (*tokenJWT, error) { + jwtSignMethod, jwtPubKeyPath, jwtPrivKeyPath, ttl, err := prepareOpts(lg, opts) + if err != nil { + return nil, ErrInvalidAuthOpts + } + + if ttl == 0 { + ttl = 5 * time.Minute + } + + t := &tokenJWT{ + lg: lg, + ttl: ttl, + } + + t.signMethod = jwtSignMethod + + verifyBytes, err := ioutil.ReadFile(jwtPubKeyPath) + if err != nil { + if lg != nil { + lg.Warn( + "failed to read JWT public key", + zap.String("public-key-path", jwtPubKeyPath), + zap.Error(err), + ) + } else { + plog.Errorf("failed to read public key (%s) for jwt: %s", jwtPubKeyPath, err) + } + return nil, err + } + t.verifyKey, err = jwt.ParseRSAPublicKeyFromPEM(verifyBytes) + if err != nil { + if lg != nil { + lg.Warn( + "failed to parse JWT public key", + zap.String("public-key-path", jwtPubKeyPath), + zap.Error(err), + ) + } else { + plog.Errorf("failed to parse public key (%s): %s", jwtPubKeyPath, err) + } + return nil, err + } + + signBytes, err := ioutil.ReadFile(jwtPrivKeyPath) + if err != nil { + if lg != nil { + lg.Warn( + "failed to read JWT private key", + zap.String("private-key-path", jwtPrivKeyPath), + zap.Error(err), + ) + } else { + plog.Errorf("failed to read private key (%s) for jwt: %s", jwtPrivKeyPath, err) + } + return nil, err + } + t.signKey, err = jwt.ParseRSAPrivateKeyFromPEM(signBytes) + if err != nil { + if lg != nil { + lg.Warn( + "failed to parse JWT private key", + zap.String("private-key-path", jwtPrivKeyPath), + zap.Error(err), + ) + } else { + plog.Errorf("failed to parse private key (%s): %s", jwtPrivKeyPath, err) + } + return nil, err + } + + return t, nil +} diff --git a/vendor/github.com/coreos/etcd/auth/nop.go b/vendor/github.com/coreos/etcd/auth/nop.go new file mode 100644 index 00000000..d4378747 --- /dev/null +++ b/vendor/github.com/coreos/etcd/auth/nop.go @@ -0,0 +1,35 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package auth + +import ( + "context" +) + +type tokenNop struct{} + +func (t *tokenNop) enable() {} +func (t *tokenNop) disable() {} +func (t *tokenNop) invalidateUser(string) {} +func (t *tokenNop) genTokenPrefix() (string, error) { return "", nil } +func (t *tokenNop) info(ctx context.Context, token string, rev uint64) (*AuthInfo, bool) { + return nil, false +} +func (t *tokenNop) assign(ctx context.Context, username string, revision uint64) (string, error) { + return "", ErrAuthFailed +} +func newTokenProviderNop() (*tokenNop, error) { + return &tokenNop{}, nil +} diff --git a/vendor/github.com/coreos/etcd/internal/auth/range_perm_cache.go b/vendor/github.com/coreos/etcd/auth/range_perm_cache.go similarity index 68% rename from vendor/github.com/coreos/etcd/internal/auth/range_perm_cache.go rename to vendor/github.com/coreos/etcd/auth/range_perm_cache.go index 24123935..6baa1867 100644 --- a/vendor/github.com/coreos/etcd/internal/auth/range_perm_cache.go +++ b/vendor/github.com/coreos/etcd/auth/range_perm_cache.go @@ -15,15 +15,16 @@ package auth import ( - "github.com/coreos/etcd/internal/auth/authpb" - "github.com/coreos/etcd/internal/mvcc/backend" + "github.com/coreos/etcd/auth/authpb" + "github.com/coreos/etcd/mvcc/backend" "github.com/coreos/etcd/pkg/adt" + + "go.uber.org/zap" ) -func getMergedPerms(tx backend.BatchTx, userName string) *unifiedRangePermissions { - user := getUser(tx, userName) +func getMergedPerms(lg *zap.Logger, tx backend.BatchTx, userName string) *unifiedRangePermissions { + user := getUser(lg, tx, userName) if user == nil { - plog.Errorf("invalid user name %s", userName) return nil } @@ -70,7 +71,11 @@ func getMergedPerms(tx backend.BatchTx, userName string) *unifiedRangePermission } } -func checkKeyInterval(cachedPerms *unifiedRangePermissions, key, rangeEnd []byte, permtyp authpb.Permission_Type) bool { +func checkKeyInterval( + lg *zap.Logger, + cachedPerms *unifiedRangePermissions, + key, rangeEnd []byte, + permtyp authpb.Permission_Type) bool { if len(rangeEnd) == 1 && rangeEnd[0] == 0 { rangeEnd = nil } @@ -82,12 +87,16 @@ func checkKeyInterval(cachedPerms *unifiedRangePermissions, key, rangeEnd []byte case authpb.WRITE: return cachedPerms.writePerms.Contains(ivl) default: - plog.Panicf("unknown auth type: %v", permtyp) + if lg != nil { + lg.Panic("unknown auth type", zap.String("auth-type", permtyp.String())) + } else { + plog.Panicf("unknown auth type: %v", permtyp) + } } return false } -func checkKeyPoint(cachedPerms *unifiedRangePermissions, key []byte, permtyp authpb.Permission_Type) bool { +func checkKeyPoint(lg *zap.Logger, cachedPerms *unifiedRangePermissions, key []byte, permtyp authpb.Permission_Type) bool { pt := adt.NewBytesAffinePoint(key) switch permtyp { case authpb.READ: @@ -95,7 +104,11 @@ func checkKeyPoint(cachedPerms *unifiedRangePermissions, key []byte, permtyp aut case authpb.WRITE: return cachedPerms.writePerms.Intersects(pt) default: - plog.Panicf("unknown auth type: %v", permtyp) + if lg != nil { + lg.Panic("unknown auth type", zap.String("auth-type", permtyp.String())) + } else { + plog.Panicf("unknown auth type: %v", permtyp) + } } return false } @@ -104,19 +117,26 @@ func (as *authStore) isRangeOpPermitted(tx backend.BatchTx, userName string, key // assumption: tx is Lock()ed _, ok := as.rangePermCache[userName] if !ok { - perms := getMergedPerms(tx, userName) + perms := getMergedPerms(as.lg, tx, userName) if perms == nil { - plog.Errorf("failed to create a unified permission of user %s", userName) + if as.lg != nil { + as.lg.Warn( + "failed to create a merged permission", + zap.String("user-name", userName), + ) + } else { + plog.Errorf("failed to create a unified permission of user %s", userName) + } return false } as.rangePermCache[userName] = perms } if len(rangeEnd) == 0 { - return checkKeyPoint(as.rangePermCache[userName], key, permtyp) + return checkKeyPoint(as.lg, as.rangePermCache[userName], key, permtyp) } - return checkKeyInterval(as.rangePermCache[userName], key, rangeEnd, permtyp) + return checkKeyInterval(as.lg, as.rangePermCache[userName], key, rangeEnd, permtyp) } func (as *authStore) clearCachedPerm() { diff --git a/vendor/github.com/coreos/etcd/internal/auth/simple_token.go b/vendor/github.com/coreos/etcd/auth/simple_token.go similarity index 88% rename from vendor/github.com/coreos/etcd/internal/auth/simple_token.go rename to vendor/github.com/coreos/etcd/auth/simple_token.go index ac55ad7f..49797cbd 100644 --- a/vendor/github.com/coreos/etcd/internal/auth/simple_token.go +++ b/vendor/github.com/coreos/etcd/auth/simple_token.go @@ -26,6 +26,8 @@ import ( "strings" "sync" "time" + + "go.uber.org/zap" ) const ( @@ -94,6 +96,7 @@ func (tm *simpleTokenTTLKeeper) run() { } type tokenSimple struct { + lg *zap.Logger indexWaiter func(uint64) <-chan struct{} simpleTokenKeeper *simpleTokenTTLKeeper simpleTokensMu sync.Mutex @@ -124,7 +127,15 @@ func (t *tokenSimple) assignSimpleTokenToUser(username, token string) { _, ok := t.simpleTokens[token] if ok { - plog.Panicf("token %s is alredy used", token) + if t.lg != nil { + t.lg.Panic( + "failed to assign already-used simple token to a user", + zap.String("user-name", username), + zap.String("token", token), + ) + } else { + plog.Panicf("token %s is alredy used", token) + } } t.simpleTokens[token] = username @@ -137,7 +148,7 @@ func (t *tokenSimple) invalidateUser(username string) { } t.simpleTokensMu.Lock() for token, name := range t.simpleTokens { - if strings.Compare(name, username) == 0 { + if name == username { delete(t.simpleTokens, token) t.simpleTokenKeeper.deleteSimpleToken(token) } @@ -148,7 +159,15 @@ func (t *tokenSimple) invalidateUser(username string) { func (t *tokenSimple) enable() { delf := func(tk string) { if username, ok := t.simpleTokens[tk]; ok { - plog.Infof("deleting token %s for user %s", tk, username) + if t.lg != nil { + t.lg.Info( + "deleted a simple token", + zap.String("user-name", username), + zap.String("token", tk), + ) + } else { + plog.Infof("deleting token %s for user %s", tk, username) + } delete(t.simpleTokens, tk) } } @@ -215,8 +234,9 @@ func (t *tokenSimple) isValidSimpleToken(ctx context.Context, token string) bool return false } -func newTokenProviderSimple(indexWaiter func(uint64) <-chan struct{}) *tokenSimple { +func newTokenProviderSimple(lg *zap.Logger, indexWaiter func(uint64) <-chan struct{}) *tokenSimple { return &tokenSimple{ + lg: lg, simpleTokens: make(map[string]string), indexWaiter: indexWaiter, } diff --git a/vendor/github.com/coreos/etcd/internal/auth/store.go b/vendor/github.com/coreos/etcd/auth/store.go similarity index 67% rename from vendor/github.com/coreos/etcd/internal/auth/store.go rename to vendor/github.com/coreos/etcd/auth/store.go index 7796c5d8..3f305a1a 100644 --- a/vendor/github.com/coreos/etcd/internal/auth/store.go +++ b/vendor/github.com/coreos/etcd/auth/store.go @@ -24,11 +24,13 @@ import ( "sync" "sync/atomic" + "github.com/coreos/etcd/auth/authpb" + "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/internal/auth/authpb" - "github.com/coreos/etcd/internal/mvcc/backend" + "github.com/coreos/etcd/mvcc/backend" "github.com/coreos/pkg/capnslog" + "go.uber.org/zap" "golang.org/x/crypto/bcrypt" "google.golang.org/grpc/credentials" "google.golang.org/grpc/metadata" @@ -64,9 +66,6 @@ var ( ErrInvalidAuthToken = errors.New("auth: invalid auth token") ErrInvalidAuthOpts = errors.New("auth: invalid auth options") ErrInvalidAuthMgmt = errors.New("auth: invalid auth management") - - // BcryptCost is the algorithm cost / strength for hashing auth passwords - BcryptCost = bcrypt.DefaultCost ) const ( @@ -87,6 +86,7 @@ type AuthenticateParamIndex struct{} // AuthenticateParamSimpleTokenPrefix is used for a key of context in the parameters of Authenticate() type AuthenticateParamSimpleTokenPrefix struct{} +// AuthStore defines auth storage interface. type AuthStore interface { // AuthEnable turns on the authentication feature AuthEnable() error @@ -94,6 +94,9 @@ type AuthStore interface { // AuthDisable turns off the authentication feature AuthDisable() + // IsAuthEnabled returns true if the authentication feature is enabled. + IsAuthEnabled() bool + // Authenticate does authentication based on given user name and password Authenticate(ctx context.Context, username, password string) (*pb.AuthenticateResponse, error) @@ -191,6 +194,7 @@ type authStore struct { // atomic operations; need 64-bit align, or 32-bit tests will crash revision uint64 + lg *zap.Logger be backend.Backend enabled bool enabledMu sync.RWMutex @@ -198,13 +202,18 @@ type authStore struct { rangePermCache map[string]*unifiedRangePermissions // username -> unifiedRangePermissions tokenProvider TokenProvider + bcryptCost int // the algorithm cost / strength for hashing auth passwords } func (as *authStore) AuthEnable() error { as.enabledMu.Lock() defer as.enabledMu.Unlock() if as.enabled { - plog.Noticef("Authentication already enabled") + if as.lg != nil { + as.lg.Info("authentication is already enabled; ignored auth enable request") + } else { + plog.Noticef("Authentication already enabled") + } return nil } b := as.be @@ -215,7 +224,7 @@ func (as *authStore) AuthEnable() error { b.ForceCommit() }() - u := getUser(tx, rootUser) + u := getUser(as.lg, tx, rootUser) if u == nil { return ErrRootUserNotExist } @@ -233,8 +242,11 @@ func (as *authStore) AuthEnable() error { as.setRevision(getRevision(tx)) - plog.Noticef("Authentication enabled") - + if as.lg != nil { + as.lg.Info("enabled authentication") + } else { + plog.Noticef("Authentication enabled") + } return nil } @@ -255,7 +267,11 @@ func (as *authStore) AuthDisable() { as.enabled = false as.tokenProvider.disable() - plog.Noticef("Authentication disabled") + if as.lg != nil { + as.lg.Info("disabled authentication") + } else { + plog.Noticef("Authentication disabled") + } } func (as *authStore) Close() error { @@ -269,7 +285,7 @@ func (as *authStore) Close() error { } func (as *authStore) Authenticate(ctx context.Context, username, password string) (*pb.AuthenticateResponse, error) { - if !as.isAuthEnabled() { + if !as.IsAuthEnabled() { return nil, ErrAuthNotEnabled } @@ -277,7 +293,7 @@ func (as *authStore) Authenticate(ctx context.Context, username, password string tx.Lock() defer tx.Unlock() - user := getUser(tx, username) + user := getUser(as.lg, tx, username) if user == nil { return nil, ErrAuthFailed } @@ -290,12 +306,20 @@ func (as *authStore) Authenticate(ctx context.Context, username, password string return nil, err } - plog.Debugf("authorized %s, token is %s", username, token) + if as.lg != nil { + as.lg.Debug( + "authenticated a user", + zap.String("user-name", username), + zap.String("token", token), + ) + } else { + plog.Debugf("authorized %s, token is %s", username, token) + } return &pb.AuthenticateResponse{Token: token}, nil } func (as *authStore) CheckPassword(username, password string) (uint64, error) { - if !as.isAuthEnabled() { + if !as.IsAuthEnabled() { return 0, ErrAuthNotEnabled } @@ -303,16 +327,19 @@ func (as *authStore) CheckPassword(username, password string) (uint64, error) { tx.Lock() defer tx.Unlock() - user := getUser(tx, username) + user := getUser(as.lg, tx, username) if user == nil { return 0, ErrAuthFailed } if bcrypt.CompareHashAndPassword(user.Password, []byte(password)) != nil { - plog.Noticef("authentication failed, invalid password for user %s", username) + if as.lg != nil { + as.lg.Info("invalid password", zap.String("user-name", username)) + } else { + plog.Noticef("authentication failed, invalid password for user %s", username) + } return 0, ErrAuthFailed } - return getRevision(tx), nil } @@ -342,9 +369,17 @@ func (as *authStore) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, return nil, ErrUserEmpty } - hashed, err := bcrypt.GenerateFromPassword([]byte(r.Password), BcryptCost) + hashed, err := bcrypt.GenerateFromPassword([]byte(r.Password), as.bcryptCost) if err != nil { - plog.Errorf("failed to hash password: %s", err) + if as.lg != nil { + as.lg.Warn( + "failed to bcrypt hash password", + zap.String("user-name", r.Name), + zap.Error(err), + ) + } else { + plog.Errorf("failed to hash password: %s", err) + } return nil, err } @@ -352,7 +387,7 @@ func (as *authStore) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, tx.Lock() defer tx.Unlock() - user := getUser(tx, r.Name) + user := getUser(as.lg, tx, r.Name) if user != nil { return nil, ErrUserAlreadyExist } @@ -362,18 +397,25 @@ func (as *authStore) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, Password: hashed, } - putUser(tx, newUser) + putUser(as.lg, tx, newUser) as.commitRevision(tx) - plog.Noticef("added a new user: %s", r.Name) - + if as.lg != nil { + as.lg.Info("added a user", zap.String("user-name", r.Name)) + } else { + plog.Noticef("added a new user: %s", r.Name) + } return &pb.AuthUserAddResponse{}, nil } func (as *authStore) UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) { - if as.enabled && strings.Compare(r.Name, rootUser) == 0 { - plog.Errorf("the user root must not be deleted") + if as.enabled && r.Name == rootUser { + if as.lg != nil { + as.lg.Warn("cannot delete 'root' user", zap.String("user-name", r.Name)) + } else { + plog.Errorf("the user root must not be deleted") + } return nil, ErrInvalidAuthMgmt } @@ -381,7 +423,7 @@ func (as *authStore) UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDelete tx.Lock() defer tx.Unlock() - user := getUser(tx, r.Name) + user := getUser(as.lg, tx, r.Name) if user == nil { return nil, ErrUserNotFound } @@ -393,17 +435,32 @@ func (as *authStore) UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDelete as.invalidateCachedPerm(r.Name) as.tokenProvider.invalidateUser(r.Name) - plog.Noticef("deleted a user: %s", r.Name) - + if as.lg != nil { + as.lg.Info( + "deleted a user", + zap.String("user-name", r.Name), + zap.Strings("user-roles", user.Roles), + ) + } else { + plog.Noticef("deleted a user: %s", r.Name) + } return &pb.AuthUserDeleteResponse{}, nil } func (as *authStore) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) { // TODO(mitake): measure the cost of bcrypt.GenerateFromPassword() // If the cost is too high, we should move the encryption to outside of the raft - hashed, err := bcrypt.GenerateFromPassword([]byte(r.Password), BcryptCost) + hashed, err := bcrypt.GenerateFromPassword([]byte(r.Password), as.bcryptCost) if err != nil { - plog.Errorf("failed to hash password: %s", err) + if as.lg != nil { + as.lg.Warn( + "failed to bcrypt hash password", + zap.String("user-name", r.Name), + zap.Error(err), + ) + } else { + plog.Errorf("failed to hash password: %s", err) + } return nil, err } @@ -411,7 +468,7 @@ func (as *authStore) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*p tx.Lock() defer tx.Unlock() - user := getUser(tx, r.Name) + user := getUser(as.lg, tx, r.Name) if user == nil { return nil, ErrUserNotFound } @@ -422,15 +479,22 @@ func (as *authStore) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*p Password: hashed, } - putUser(tx, updatedUser) + putUser(as.lg, tx, updatedUser) as.commitRevision(tx) as.invalidateCachedPerm(r.Name) as.tokenProvider.invalidateUser(r.Name) - plog.Noticef("changed a password of a user: %s", r.Name) - + if as.lg != nil { + as.lg.Info( + "changed a password of a user", + zap.String("user-name", r.Name), + zap.Strings("user-roles", user.Roles), + ) + } else { + plog.Noticef("changed a password of a user: %s", r.Name) + } return &pb.AuthUserChangePasswordResponse{}, nil } @@ -439,7 +503,7 @@ func (as *authStore) UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUser tx.Lock() defer tx.Unlock() - user := getUser(tx, r.User) + user := getUser(as.lg, tx, r.User) if user == nil { return nil, ErrUserNotFound } @@ -452,28 +516,46 @@ func (as *authStore) UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUser } idx := sort.SearchStrings(user.Roles, r.Role) - if idx < len(user.Roles) && strings.Compare(user.Roles[idx], r.Role) == 0 { - plog.Warningf("user %s is already granted role %s", r.User, r.Role) + if idx < len(user.Roles) && user.Roles[idx] == r.Role { + if as.lg != nil { + as.lg.Warn( + "ignored grant role request to a user", + zap.String("user-name", r.User), + zap.Strings("user-roles", user.Roles), + zap.String("duplicate-role-name", r.Role), + ) + } else { + plog.Warningf("user %s is already granted role %s", r.User, r.Role) + } return &pb.AuthUserGrantRoleResponse{}, nil } user.Roles = append(user.Roles, r.Role) sort.Strings(user.Roles) - putUser(tx, user) + putUser(as.lg, tx, user) as.invalidateCachedPerm(r.User) as.commitRevision(tx) - plog.Noticef("granted role %s to user %s", r.Role, r.User) + if as.lg != nil { + as.lg.Info( + "granted a role to a user", + zap.String("user-name", r.User), + zap.Strings("user-roles", user.Roles), + zap.String("added-role-name", r.Role), + ) + } else { + plog.Noticef("granted role %s to user %s", r.Role, r.User) + } return &pb.AuthUserGrantRoleResponse{}, nil } func (as *authStore) UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) { tx := as.be.BatchTx() tx.Lock() - user := getUser(tx, r.Name) + user := getUser(as.lg, tx, r.Name) tx.Unlock() if user == nil { @@ -488,7 +570,7 @@ func (as *authStore) UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, func (as *authStore) UserList(r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) { tx := as.be.BatchTx() tx.Lock() - users := getAllUsers(tx) + users := getAllUsers(as.lg, tx) tx.Unlock() resp := &pb.AuthUserListResponse{Users: make([]string, len(users))} @@ -499,8 +581,16 @@ func (as *authStore) UserList(r *pb.AuthUserListRequest) (*pb.AuthUserListRespon } func (as *authStore) UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) { - if as.enabled && strings.Compare(r.Name, rootUser) == 0 && strings.Compare(r.Role, rootRole) == 0 { - plog.Errorf("the role root must not be revoked from the user root") + if as.enabled && r.Name == rootUser && r.Role == rootRole { + if as.lg != nil { + as.lg.Warn( + "'root' user cannot revoke 'root' role", + zap.String("user-name", r.Name), + zap.String("role-name", r.Role), + ) + } else { + plog.Errorf("the role root must not be revoked from the user root") + } return nil, ErrInvalidAuthMgmt } @@ -508,7 +598,7 @@ func (as *authStore) UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUs tx.Lock() defer tx.Unlock() - user := getUser(tx, r.Name) + user := getUser(as.lg, tx, r.Name) if user == nil { return nil, ErrUserNotFound } @@ -519,7 +609,7 @@ func (as *authStore) UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUs } for _, role := range user.Roles { - if strings.Compare(role, r.Role) != 0 { + if role != r.Role { updatedUser.Roles = append(updatedUser.Roles, role) } } @@ -528,13 +618,23 @@ func (as *authStore) UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUs return nil, ErrRoleNotGranted } - putUser(tx, updatedUser) + putUser(as.lg, tx, updatedUser) as.invalidateCachedPerm(r.Name) as.commitRevision(tx) - plog.Noticef("revoked role %s from user %s", r.Role, r.Name) + if as.lg != nil { + as.lg.Info( + "revoked a role from a user", + zap.String("user-name", r.Name), + zap.Strings("old-user-roles", user.Roles), + zap.Strings("new-user-roles", updatedUser.Roles), + zap.String("revoked-role-name", r.Role), + ) + } else { + plog.Noticef("revoked role %s from user %s", r.Role, r.Name) + } return &pb.AuthUserRevokeRoleResponse{}, nil } @@ -556,7 +656,7 @@ func (as *authStore) RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, func (as *authStore) RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) { tx := as.be.BatchTx() tx.Lock() - roles := getAllRoles(tx) + roles := getAllRoles(as.lg, tx) tx.Unlock() resp := &pb.AuthRoleListResponse{Roles: make([]string, len(roles))} @@ -581,7 +681,7 @@ func (as *authStore) RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest) } for _, perm := range role.KeyPermission { - if !bytes.Equal(perm.Key, []byte(r.Key)) || !bytes.Equal(perm.RangeEnd, []byte(r.RangeEnd)) { + if !bytes.Equal(perm.Key, r.Key) || !bytes.Equal(perm.RangeEnd, r.RangeEnd) { updatedRole.KeyPermission = append(updatedRole.KeyPermission, perm) } } @@ -590,7 +690,7 @@ func (as *authStore) RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest) return nil, ErrPermissionNotGranted } - putRole(tx, updatedRole) + putRole(as.lg, tx, updatedRole) // TODO(mitake): currently single role update invalidates every cache // It should be optimized. @@ -598,13 +698,26 @@ func (as *authStore) RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest) as.commitRevision(tx) - plog.Noticef("revoked key %s from role %s", r.Key, r.Role) + if as.lg != nil { + as.lg.Info( + "revoked a permission on range", + zap.String("role-name", r.Role), + zap.String("key", string(r.Key)), + zap.String("range-end", string(r.RangeEnd)), + ) + } else { + plog.Noticef("revoked key %s from role %s", r.Key, r.Role) + } return &pb.AuthRoleRevokePermissionResponse{}, nil } func (as *authStore) RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) { - if as.enabled && strings.Compare(r.Role, rootRole) == 0 { - plog.Errorf("the role root must not be deleted") + if as.enabled && r.Role == rootRole { + if as.lg != nil { + as.lg.Warn("cannot delete 'root' role", zap.String("role-name", r.Role)) + } else { + plog.Errorf("the role root must not be deleted") + } return nil, ErrInvalidAuthMgmt } @@ -619,7 +732,7 @@ func (as *authStore) RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDelete delRole(tx, r.Role) - users := getAllUsers(tx) + users := getAllUsers(as.lg, tx) for _, user := range users { updatedUser := &authpb.User{ Name: user.Name, @@ -627,7 +740,7 @@ func (as *authStore) RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDelete } for _, role := range user.Roles { - if strings.Compare(role, r.Role) != 0 { + if role != r.Role { updatedUser.Roles = append(updatedUser.Roles, role) } } @@ -636,14 +749,18 @@ func (as *authStore) RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDelete continue } - putUser(tx, updatedUser) + putUser(as.lg, tx, updatedUser) as.invalidateCachedPerm(string(user.Name)) } as.commitRevision(tx) - plog.Noticef("deleted role %s", r.Role) + if as.lg != nil { + as.lg.Info("deleted a role", zap.String("role-name", r.Role)) + } else { + plog.Noticef("deleted role %s", r.Role) + } return &pb.AuthRoleDeleteResponse{}, nil } @@ -661,12 +778,15 @@ func (as *authStore) RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, Name: []byte(r.Name), } - putRole(tx, newRole) + putRole(as.lg, tx, newRole) as.commitRevision(tx) - plog.Noticef("Role %s is created", r.Name) - + if as.lg != nil { + as.lg.Info("created a role", zap.String("role-name", r.Name)) + } else { + plog.Noticef("Role %s is created", r.Name) + } return &pb.AuthRoleAddResponse{}, nil } @@ -699,7 +819,7 @@ func (as *authStore) RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) ( } idx := sort.Search(len(role.KeyPermission), func(i int) bool { - return bytes.Compare(role.KeyPermission[i].Key, []byte(r.Perm.Key)) >= 0 + return bytes.Compare(role.KeyPermission[i].Key, r.Perm.Key) >= 0 }) if idx < len(role.KeyPermission) && bytes.Equal(role.KeyPermission[idx].Key, r.Perm.Key) && bytes.Equal(role.KeyPermission[idx].RangeEnd, r.Perm.RangeEnd) { @@ -708,8 +828,8 @@ func (as *authStore) RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) ( } else { // append new permission to the role newPerm := &authpb.Permission{ - Key: []byte(r.Perm.Key), - RangeEnd: []byte(r.Perm.RangeEnd), + Key: r.Perm.Key, + RangeEnd: r.Perm.RangeEnd, PermType: r.Perm.PermType, } @@ -717,7 +837,7 @@ func (as *authStore) RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) ( sort.Sort(permSlice(role.KeyPermission)) } - putRole(tx, role) + putRole(as.lg, tx, role) // TODO(mitake): currently single role update invalidates every cache // It should be optimized. @@ -725,14 +845,21 @@ func (as *authStore) RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) ( as.commitRevision(tx) - plog.Noticef("role %s's permission of key %s is updated as %s", r.Name, r.Perm.Key, authpb.Permission_Type_name[int32(r.Perm.PermType)]) - + if as.lg != nil { + as.lg.Info( + "granted/updated a permission to a user", + zap.String("user-name", r.Name), + zap.String("permission-name", authpb.Permission_Type_name[int32(r.Perm.PermType)]), + ) + } else { + plog.Noticef("role %s's permission of key %s is updated as %s", r.Name, r.Perm.Key, authpb.Permission_Type_name[int32(r.Perm.PermType)]) + } return &pb.AuthRoleGrantPermissionResponse{}, nil } func (as *authStore) isOpPermitted(userName string, revision uint64, key, rangeEnd []byte, permTyp authpb.Permission_Type) error { // TODO(mitake): this function would be costly so we need a caching mechanism - if !as.isAuthEnabled() { + if !as.IsAuthEnabled() { return nil } @@ -749,9 +876,13 @@ func (as *authStore) isOpPermitted(userName string, revision uint64, key, rangeE tx.Lock() defer tx.Unlock() - user := getUser(tx, userName) + user := getUser(as.lg, tx, userName) if user == nil { - plog.Errorf("invalid user name %s for permission checking", userName) + if as.lg != nil { + as.lg.Warn("cannot find a user for permission check", zap.String("user-name", userName)) + } else { + plog.Errorf("invalid user name %s for permission checking", userName) + } return ErrPermissionDenied } @@ -780,7 +911,7 @@ func (as *authStore) IsDeleteRangePermitted(authInfo *AuthInfo, key, rangeEnd [] } func (as *authStore) IsAdminPermitted(authInfo *AuthInfo) error { - if !as.isAuthEnabled() { + if !as.IsAuthEnabled() { return nil } if authInfo == nil { @@ -789,7 +920,7 @@ func (as *authStore) IsAdminPermitted(authInfo *AuthInfo) error { tx := as.be.BatchTx() tx.Lock() - u := getUser(tx, authInfo.Username) + u := getUser(as.lg, tx, authInfo.Username) tx.Unlock() if u == nil { @@ -803,7 +934,7 @@ func (as *authStore) IsAdminPermitted(authInfo *AuthInfo) error { return nil } -func getUser(tx backend.BatchTx, username string) *authpb.User { +func getUser(lg *zap.Logger, tx backend.BatchTx, username string) *authpb.User { _, vs := tx.UnsafeRange(authUsersBucketName, []byte(username), nil, 0) if len(vs) == 0 { return nil @@ -812,12 +943,20 @@ func getUser(tx backend.BatchTx, username string) *authpb.User { user := &authpb.User{} err := user.Unmarshal(vs[0]) if err != nil { - plog.Panicf("failed to unmarshal user struct (name: %s): %s", username, err) + if lg != nil { + lg.Panic( + "failed to unmarshal 'authpb.User'", + zap.String("user-name", username), + zap.Error(err), + ) + } else { + plog.Panicf("failed to unmarshal user struct (name: %s): %s", username, err) + } } return user } -func getAllUsers(tx backend.BatchTx) []*authpb.User { +func getAllUsers(lg *zap.Logger, tx backend.BatchTx) []*authpb.User { _, vs := tx.UnsafeRange(authUsersBucketName, []byte{0}, []byte{0xff}, -1) if len(vs) == 0 { return nil @@ -828,17 +967,25 @@ func getAllUsers(tx backend.BatchTx) []*authpb.User { user := &authpb.User{} err := user.Unmarshal(vs[i]) if err != nil { - plog.Panicf("failed to unmarshal user struct: %s", err) + if lg != nil { + lg.Panic("failed to unmarshal 'authpb.User'", zap.Error(err)) + } else { + plog.Panicf("failed to unmarshal user struct: %s", err) + } } users[i] = user } return users } -func putUser(tx backend.BatchTx, user *authpb.User) { +func putUser(lg *zap.Logger, tx backend.BatchTx, user *authpb.User) { b, err := user.Marshal() if err != nil { - plog.Panicf("failed to marshal user struct (name: %s): %s", user.Name, err) + if lg != nil { + lg.Panic("failed to unmarshal 'authpb.User'", zap.Error(err)) + } else { + plog.Panicf("failed to marshal user struct (name: %s): %s", user.Name, err) + } } tx.UnsafePut(authUsersBucketName, user.Name, b) } @@ -861,7 +1008,7 @@ func getRole(tx backend.BatchTx, rolename string) *authpb.Role { return role } -func getAllRoles(tx backend.BatchTx) []*authpb.Role { +func getAllRoles(lg *zap.Logger, tx backend.BatchTx) []*authpb.Role { _, vs := tx.UnsafeRange(authRolesBucketName, []byte{0}, []byte{0xff}, -1) if len(vs) == 0 { return nil @@ -872,33 +1019,62 @@ func getAllRoles(tx backend.BatchTx) []*authpb.Role { role := &authpb.Role{} err := role.Unmarshal(vs[i]) if err != nil { - plog.Panicf("failed to unmarshal role struct: %s", err) + if lg != nil { + lg.Panic("failed to unmarshal 'authpb.Role'", zap.Error(err)) + } else { + plog.Panicf("failed to unmarshal role struct: %s", err) + } } roles[i] = role } return roles } -func putRole(tx backend.BatchTx, role *authpb.Role) { +func putRole(lg *zap.Logger, tx backend.BatchTx, role *authpb.Role) { b, err := role.Marshal() if err != nil { - plog.Panicf("failed to marshal role struct (name: %s): %s", role.Name, err) + if lg != nil { + lg.Panic( + "failed to marshal 'authpb.Role'", + zap.String("role-name", string(role.Name)), + zap.Error(err), + ) + } else { + plog.Panicf("failed to marshal role struct (name: %s): %s", role.Name, err) + } } - tx.UnsafePut(authRolesBucketName, []byte(role.Name), b) + tx.UnsafePut(authRolesBucketName, role.Name, b) } func delRole(tx backend.BatchTx, rolename string) { tx.UnsafeDelete(authRolesBucketName, []byte(rolename)) } -func (as *authStore) isAuthEnabled() bool { +func (as *authStore) IsAuthEnabled() bool { as.enabledMu.RLock() defer as.enabledMu.RUnlock() return as.enabled } -func NewAuthStore(be backend.Backend, tp TokenProvider) *authStore { +// NewAuthStore creates a new AuthStore. +func NewAuthStore(lg *zap.Logger, be backend.Backend, tp TokenProvider, bcryptCost int) *authStore { + if bcryptCost < bcrypt.MinCost || bcryptCost > bcrypt.MaxCost { + if lg != nil { + lg.Warn( + "use default bcrypt cost instead of the invalid given cost", + zap.Int("min-cost", bcrypt.MinCost), + zap.Int("max-cost", bcrypt.MaxCost), + zap.Int("default-cost", bcrypt.DefaultCost), + zap.Int("given-cost", bcryptCost)) + } else { + plog.Warningf("Use default bcrypt-cost %d instead of the invalid value %d", + bcrypt.DefaultCost, bcryptCost) + } + + bcryptCost = bcrypt.DefaultCost + } + tx := be.BatchTx() tx.Lock() @@ -915,11 +1091,13 @@ func NewAuthStore(be backend.Backend, tp TokenProvider) *authStore { } as := &authStore{ - be: be, revision: getRevision(tx), + lg: lg, + be: be, enabled: enabled, rangePermCache: make(map[string]*unifiedRangePermissions), tokenProvider: tp, + bcryptCost: bcryptCost, } if enabled { @@ -950,12 +1128,11 @@ func (as *authStore) commitRevision(tx backend.BatchTx) { } func getRevision(tx backend.BatchTx) uint64 { - _, vs := tx.UnsafeRange(authBucketName, []byte(revisionKey), nil, 0) + _, vs := tx.UnsafeRange(authBucketName, revisionKey, nil, 0) if len(vs) != 1 { // this can happen in the initialization phase return 0 } - return binary.BigEndian.Uint64(vs[0]) } @@ -967,7 +1144,7 @@ func (as *authStore) Revision() uint64 { return atomic.LoadUint64(&as.revision) } -func (as *authStore) AuthInfoFromTLS(ctx context.Context) *AuthInfo { +func (as *authStore) AuthInfoFromTLS(ctx context.Context) (ai *AuthInfo) { peer, ok := peer.FromContext(ctx) if !ok || peer == nil || peer.AuthInfo == nil { return nil @@ -975,18 +1152,26 @@ func (as *authStore) AuthInfoFromTLS(ctx context.Context) *AuthInfo { tlsInfo := peer.AuthInfo.(credentials.TLSInfo) for _, chains := range tlsInfo.State.VerifiedChains { - for _, chain := range chains { - cn := chain.Subject.CommonName - plog.Debugf("found common name %s", cn) - - return &AuthInfo{ - Username: cn, - Revision: as.Revision(), - } + if len(chains) < 1 { + continue } + ai = &AuthInfo{ + Username: chains[0].Subject.CommonName, + Revision: as.Revision(), + } + if as.lg != nil { + as.lg.Debug( + "found command name", + zap.String("common-name", ai.Username), + zap.String("user-name", ai.Username), + zap.Uint64("revision", ai.Revision), + ) + } else { + plog.Debugf("found common name %s", ai.Username) + } + break } - - return nil + return ai } func (as *authStore) AuthInfoFromCtx(ctx context.Context) (*AuthInfo, error) { @@ -996,9 +1181,9 @@ func (as *authStore) AuthInfoFromCtx(ctx context.Context) (*AuthInfo, error) { } //TODO(mitake|hexfusion) review unifying key names - ts, ok := md["token"] + ts, ok := md[rpctypes.TokenFieldNameGRPC] if !ok { - ts, ok = md["authorization"] + ts, ok = md[rpctypes.TokenFieldNameSwagger] } if !ok { return nil, nil @@ -1007,7 +1192,11 @@ func (as *authStore) AuthInfoFromCtx(ctx context.Context) (*AuthInfo, error) { token := ts[0] authInfo, uok := as.authInfoFromToken(ctx, token) if !uok { - plog.Warningf("invalid auth token: %s", token) + if as.lg != nil { + as.lg.Warn("invalid auth token", zap.String("token", token)) + } else { + plog.Warningf("invalid auth token: %s", token) + } return nil, ErrInvalidAuthToken } @@ -1018,7 +1207,7 @@ func (as *authStore) GenTokenPrefix() (string, error) { return as.tokenProvider.genTokenPrefix() } -func decomposeOpts(optstr string) (string, map[string]string, error) { +func decomposeOpts(lg *zap.Logger, optstr string) (string, map[string]string, error) { opts := strings.Split(optstr, ",") tokenType := opts[0] @@ -1027,12 +1216,24 @@ func decomposeOpts(optstr string) (string, map[string]string, error) { pair := strings.Split(opts[i], "=") if len(pair) != 2 { - plog.Errorf("invalid token specific option: %s", optstr) + if lg != nil { + lg.Warn("invalid token option", zap.String("option", optstr)) + } else { + plog.Errorf("invalid token specific option: %s", optstr) + } return "", nil, ErrInvalidAuthOpts } if _, ok := typeSpecificOpts[pair[0]]; ok { - plog.Errorf("invalid token specific option, duplicated parameters (%s): %s", pair[0], optstr) + if lg != nil { + lg.Warn( + "invalid token option", + zap.String("option", optstr), + zap.String("duplicate-parameter", pair[0]), + ) + } else { + plog.Errorf("invalid token specific option, duplicated parameters (%s): %s", pair[0], optstr) + } return "", nil, ErrInvalidAuthOpts } @@ -1043,26 +1244,47 @@ func decomposeOpts(optstr string) (string, map[string]string, error) { } -func NewTokenProvider(tokenOpts string, indexWaiter func(uint64) <-chan struct{}) (TokenProvider, error) { - tokenType, typeSpecificOpts, err := decomposeOpts(tokenOpts) +// NewTokenProvider creates a new token provider. +func NewTokenProvider( + lg *zap.Logger, + tokenOpts string, + indexWaiter func(uint64) <-chan struct{}) (TokenProvider, error) { + tokenType, typeSpecificOpts, err := decomposeOpts(lg, tokenOpts) if err != nil { return nil, ErrInvalidAuthOpts } switch tokenType { case "simple": - plog.Warningf("simple token is not cryptographically signed") - return newTokenProviderSimple(indexWaiter), nil + if lg != nil { + lg.Warn("simple token is not cryptographically signed") + } else { + plog.Warningf("simple token is not cryptographically signed") + } + return newTokenProviderSimple(lg, indexWaiter), nil + case "jwt": - return newTokenProviderJWT(typeSpecificOpts) + return newTokenProviderJWT(lg, typeSpecificOpts) + + case "": + return newTokenProviderNop() + default: - plog.Errorf("unknown token type: %s", tokenType) + if lg != nil { + lg.Warn( + "unknown token type", + zap.String("type", tokenType), + zap.Error(ErrInvalidAuthOpts), + ) + } else { + plog.Errorf("unknown token type: %s", tokenType) + } return nil, ErrInvalidAuthOpts } } func (as *authStore) WithRoot(ctx context.Context) context.Context { - if !as.isAuthEnabled() { + if !as.IsAuthEnabled() { return ctx } @@ -1071,7 +1293,14 @@ func (as *authStore) WithRoot(ctx context.Context) context.Context { ctx1 := context.WithValue(ctx, AuthenticateParamIndex{}, uint64(0)) prefix, err := ts.genTokenPrefix() if err != nil { - plog.Errorf("failed to generate prefix of internally used token") + if as.lg != nil { + as.lg.Warn( + "failed to generate prefix of internally used token", + zap.Error(err), + ) + } else { + plog.Errorf("failed to generate prefix of internally used token") + } return ctx } ctxForAssign = context.WithValue(ctx1, AuthenticateParamSimpleTokenPrefix{}, prefix) @@ -1082,12 +1311,19 @@ func (as *authStore) WithRoot(ctx context.Context) context.Context { token, err := as.tokenProvider.assign(ctxForAssign, "root", as.Revision()) if err != nil { // this must not happen - plog.Errorf("failed to assign token for lease revoking: %s", err) + if as.lg != nil { + as.lg.Warn( + "failed to assign token for lease revoking", + zap.Error(err), + ) + } else { + plog.Errorf("failed to assign token for lease revoking: %s", err) + } return ctx } mdMap := map[string]string{ - "token": token, + rpctypes.TokenFieldNameGRPC: token, } tokenMD := metadata.New(mdMap) @@ -1098,11 +1334,19 @@ func (as *authStore) WithRoot(ctx context.Context) context.Context { func (as *authStore) HasRole(user, role string) bool { tx := as.be.BatchTx() tx.Lock() - u := getUser(tx, user) + u := getUser(as.lg, tx, user) tx.Unlock() if u == nil { - plog.Warningf("tried to check user %s has role %s, but user %s doesn't exist", user, role, user) + if as.lg != nil { + as.lg.Warn( + "'has-role' requested for non-existing user", + zap.String("user-name", user), + zap.String("role-name", role), + ) + } else { + plog.Warningf("tried to check user %s has role %s, but user %s doesn't exist", user, role, user) + } return false } @@ -1111,6 +1355,9 @@ func (as *authStore) HasRole(user, role string) bool { return true } } - return false } + +func (as *authStore) BcryptCost() int { + return as.bcryptCost +} diff --git a/vendor/github.com/coreos/etcd/client/client.go b/vendor/github.com/coreos/etcd/client/client.go index ec732723..e6874505 100644 --- a/vendor/github.com/coreos/etcd/client/client.go +++ b/vendor/github.com/coreos/etcd/client/client.go @@ -29,7 +29,7 @@ import ( "sync" "time" - "github.com/coreos/etcd/internal/version" + "github.com/coreos/etcd/version" ) var ( diff --git a/vendor/github.com/coreos/etcd/client/keys.generated.go b/vendor/github.com/coreos/etcd/client/keys.generated.go index 237fdbe8..1c65c1b0 100644 --- a/vendor/github.com/coreos/etcd/client/keys.generated.go +++ b/vendor/github.com/coreos/etcd/client/keys.generated.go @@ -1,48 +1,40 @@ -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED BY codecgen. -// ************************************************************ +// Code generated by codecgen - DO NOT EDIT. package client import ( "errors" - "fmt" - "reflect" "runtime" - time "time" + "strconv" + "time" codec1978 "github.com/ugorji/go/codec" ) const ( // ----- content types ---- - codecSelferC_UTF87612 = 1 - codecSelferC_RAW7612 = 0 + codecSelferCcUTF86628 = 1 + codecSelferCcRAW6628 = 0 // ----- value types used ---- - codecSelferValueTypeArray7612 = 10 - codecSelferValueTypeMap7612 = 9 - // ----- containerStateValues ---- - codecSelfer_containerMapKey7612 = 2 - codecSelfer_containerMapValue7612 = 3 - codecSelfer_containerMapEnd7612 = 4 - codecSelfer_containerArrayElem7612 = 6 - codecSelfer_containerArrayEnd7612 = 7 + codecSelferValueTypeArray6628 = 10 + codecSelferValueTypeMap6628 = 9 + codecSelferValueTypeString6628 = 6 + codecSelferValueTypeInt6628 = 2 + codecSelferValueTypeUint6628 = 3 + codecSelferValueTypeFloat6628 = 4 + codecSelferBitsize6628 = uint8(32 << (^uint(0) >> 63)) ) var ( - codecSelferBitsize7612 = uint8(reflect.TypeOf(uint(0)).Bits()) - codecSelferOnlyMapOrArrayEncodeToStructErr7612 = errors.New(`only encoded map or array can be decoded into a struct`) + errCodecSelferOnlyMapOrArrayEncodeToStruct6628 = errors.New(`only encoded map or array can be decoded into a struct`) ) -type codecSelfer7612 struct{} +type codecSelfer6628 struct{} func init() { if codec1978.GenVersion != 8 { _, file, _, _ := runtime.Caller(0) - err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", - 8, codec1978.GenVersion, file) - panic(err) + panic("codecgen version mismatch: current: 8, need " + strconv.FormatInt(int64(codec1978.GenVersion), 10) + ". Re-generate file: " + file) } if false { // reference the types, but skip this branch at build/run time var v0 time.Duration @@ -51,21 +43,20 @@ func init() { } func (x *Error) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer7612 + var h codecSelfer6628 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r if x == nil { r.EncodeNil() } else { - yym1 := z.EncBinary() - _ = yym1 if false { - } else if z.HasExtensions() && z.EncExt(x) { + } else if yyxt1 := z.Extension(z.I2Rtid(x)); yyxt1 != nil { + z.EncExtension(x, yyxt1) } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray _, _ = yysep2, yy2arr2 - const yyr2 bool = false + const yyr2 bool = false // struct tag has 'toArray' if yyr2 || yy2arr2 { r.WriteArrayStart(4) } else { @@ -73,18 +64,14 @@ func (x *Error) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { r.WriteArrayElem() - yym4 := z.EncBinary() - _ = yym4 if false { } else { r.EncodeInt(int64(x.Code)) } } else { r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("errorCode")) + r.EncodeString(codecSelferCcUTF86628, `errorCode`) r.WriteMapElemValue() - yym5 := z.EncBinary() - _ = yym5 if false { } else { r.EncodeInt(int64(x.Code)) @@ -92,56 +79,44 @@ func (x *Error) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { r.WriteArrayElem() - yym7 := z.EncBinary() - _ = yym7 if false { } else { - r.EncodeString(codecSelferC_UTF87612, string(x.Message)) + r.EncodeString(codecSelferCcUTF86628, string(x.Message)) } } else { r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("message")) + r.EncodeString(codecSelferCcUTF86628, `message`) r.WriteMapElemValue() - yym8 := z.EncBinary() - _ = yym8 if false { } else { - r.EncodeString(codecSelferC_UTF87612, string(x.Message)) + r.EncodeString(codecSelferCcUTF86628, string(x.Message)) } } if yyr2 || yy2arr2 { r.WriteArrayElem() - yym10 := z.EncBinary() - _ = yym10 if false { } else { - r.EncodeString(codecSelferC_UTF87612, string(x.Cause)) + r.EncodeString(codecSelferCcUTF86628, string(x.Cause)) } } else { r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("cause")) + r.EncodeString(codecSelferCcUTF86628, `cause`) r.WriteMapElemValue() - yym11 := z.EncBinary() - _ = yym11 if false { } else { - r.EncodeString(codecSelferC_UTF87612, string(x.Cause)) + r.EncodeString(codecSelferCcUTF86628, string(x.Cause)) } } if yyr2 || yy2arr2 { r.WriteArrayElem() - yym13 := z.EncBinary() - _ = yym13 if false { } else { r.EncodeUint(uint64(x.Index)) } } else { r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("index")) + r.EncodeString(codecSelferCcUTF86628, `index`) r.WriteMapElemValue() - yym14 := z.EncBinary() - _ = yym14 if false { } else { r.EncodeUint(uint64(x.Index)) @@ -157,23 +132,22 @@ func (x *Error) CodecEncodeSelf(e *codec1978.Encoder) { } func (x *Error) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer7612 + var h codecSelfer6628 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 if false { - } else if z.HasExtensions() && z.DecExt(x) { + } else if yyxt1 := z.Extension(z.I2Rtid(x)); yyxt1 != nil { + z.DecExtension(x, yyxt1) } else { yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap7612 { + if yyct2 == codecSelferValueTypeMap6628 { yyl2 := r.ReadMapStart() if yyl2 == 0 { r.ReadMapEnd() } else { x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct2 == codecSelferValueTypeArray7612 { + } else if yyct2 == codecSelferValueTypeArray6628 { yyl2 := r.ReadArrayStart() if yyl2 == 0 { r.ReadArrayEnd() @@ -181,17 +155,15 @@ func (x *Error) CodecDecodeSelf(d *codec1978.Decoder) { x.codecDecodeSelfFromArray(yyl2, d) } } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) + panic(errCodecSelferOnlyMapOrArrayEncodeToStruct6628) } } } func (x *Error) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer7612 + var h codecSelfer6628 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc var yyhl3 bool = l >= 0 for yyj3 := 0; ; yyj3++ { if yyhl3 { @@ -204,57 +176,32 @@ func (x *Error) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } r.ReadMapElemKey() - yys3Slc = r.DecodeStringAsBytes() - yys3 := string(yys3Slc) + yys3 := z.StringView(r.DecodeStringAsBytes()) r.ReadMapElemValue() switch yys3 { case "errorCode": if r.TryDecodeAsNil() { x.Code = 0 } else { - yyv4 := &x.Code - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*int)(yyv4)) = int(r.DecodeInt(codecSelferBitsize7612)) - } + x.Code = (int)(z.C.IntV(r.DecodeInt64(), codecSelferBitsize6628)) } case "message": if r.TryDecodeAsNil() { x.Message = "" } else { - yyv6 := &x.Message - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } + x.Message = (string)(r.DecodeString()) } case "cause": if r.TryDecodeAsNil() { x.Cause = "" } else { - yyv8 := &x.Cause - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*string)(yyv8)) = r.DecodeString() - } + x.Cause = (string)(r.DecodeString()) } case "index": if r.TryDecodeAsNil() { x.Index = 0 } else { - yyv10 := &x.Index - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*uint64)(yyv10)) = uint64(r.DecodeUint(64)) - } + x.Index = (uint64)(r.DecodeUint64()) } default: z.DecStructFieldNotFound(-1, yys3) @@ -264,295 +211,7 @@ func (x *Error) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } func (x *Error) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - r.ReadArrayEnd() - return - } - r.ReadArrayElem() - if r.TryDecodeAsNil() { - x.Code = 0 - } else { - yyv13 := &x.Code - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*int)(yyv13)) = int(r.DecodeInt(codecSelferBitsize7612)) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - r.ReadArrayEnd() - return - } - r.ReadArrayElem() - if r.TryDecodeAsNil() { - x.Message = "" - } else { - yyv15 := &x.Message - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - r.ReadArrayEnd() - return - } - r.ReadArrayElem() - if r.TryDecodeAsNil() { - x.Cause = "" - } else { - yyv17 := &x.Cause - yym18 := z.DecBinary() - _ = yym18 - if false { - } else { - *((*string)(yyv17)) = r.DecodeString() - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - r.ReadArrayEnd() - return - } - r.ReadArrayElem() - if r.TryDecodeAsNil() { - x.Index = 0 - } else { - yyv19 := &x.Index - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - *((*uint64)(yyv19)) = uint64(r.DecodeUint(64)) - } - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - r.ReadArrayElem() - z.DecStructFieldNotFound(yyj12-1, "") - } - r.ReadArrayEnd() -} - -func (x PrevExistType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF87612, string(x)) - } -} - -func (x *PrevExistType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *WatcherOptions) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - _, _ = yysep2, yy2arr2 - const yyr2 bool = false - if yyr2 || yy2arr2 { - r.WriteArrayStart(2) - } else { - r.WriteMapStart(2) - } - if yyr2 || yy2arr2 { - r.WriteArrayElem() - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeUint(uint64(x.AfterIndex)) - } - } else { - r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("AfterIndex")) - r.WriteMapElemValue() - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeUint(uint64(x.AfterIndex)) - } - } - if yyr2 || yy2arr2 { - r.WriteArrayElem() - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeBool(bool(x.Recursive)) - } - } else { - r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("Recursive")) - r.WriteMapElemValue() - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeBool(bool(x.Recursive)) - } - } - if yyr2 || yy2arr2 { - r.WriteArrayEnd() - } else { - r.WriteMapEnd() - } - } - } -} - -func (x *WatcherOptions) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap7612 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - r.ReadMapEnd() - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray7612 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - r.ReadArrayEnd() - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) - } - } -} - -func (x *WatcherOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - r.ReadMapElemKey() - yys3Slc = r.DecodeStringAsBytes() - yys3 := string(yys3Slc) - r.ReadMapElemValue() - switch yys3 { - case "AfterIndex": - if r.TryDecodeAsNil() { - x.AfterIndex = 0 - } else { - yyv4 := &x.AfterIndex - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*uint64)(yyv4)) = uint64(r.DecodeUint(64)) - } - } - case "Recursive": - if r.TryDecodeAsNil() { - x.Recursive = false - } else { - yyv6 := &x.Recursive - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*bool)(yyv6)) = r.DecodeBool() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - r.ReadMapEnd() -} - -func (x *WatcherOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer7612 + var h codecSelfer6628 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r var yyj8 int @@ -570,15 +229,9 @@ func (x *WatcherOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } r.ReadArrayElem() if r.TryDecodeAsNil() { - x.AfterIndex = 0 + x.Code = 0 } else { - yyv9 := &x.AfterIndex - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - *((*uint64)(yyv9)) = uint64(r.DecodeUint(64)) - } + x.Code = (int)(z.C.IntV(r.DecodeInt64(), codecSelferBitsize6628)) } yyj8++ if yyhl8 { @@ -592,15 +245,41 @@ func (x *WatcherOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } r.ReadArrayElem() if r.TryDecodeAsNil() { - x.Recursive = false + x.Message = "" } else { - yyv11 := &x.Recursive - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*bool)(yyv11)) = r.DecodeBool() - } + x.Message = (string)(r.DecodeString()) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Cause = "" + } else { + x.Cause = (string)(r.DecodeString()) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Index = 0 + } else { + x.Index = (uint64)(r.DecodeUint64()) } for { yyj8++ @@ -618,22 +297,226 @@ func (x *WatcherOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { r.ReadArrayEnd() } -func (x *CreateInOrderOptions) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer7612 +func (x PrevExistType) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer6628 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if false { + } else if yyxt1 := z.Extension(z.I2Rtid(x)); yyxt1 != nil { + z.EncExtension(x, yyxt1) + } else { + r.EncodeString(codecSelferCcUTF86628, string(x)) + } +} + +func (x *PrevExistType) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer6628 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + if false { + } else if yyxt1 := z.Extension(z.I2Rtid(x)); yyxt1 != nil { + z.DecExtension(x, yyxt1) + } else { + *x = (PrevExistType)(r.DecodeString()) + } +} + +func (x *WatcherOptions) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer6628 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r if x == nil { r.EncodeNil() } else { - yym1 := z.EncBinary() - _ = yym1 if false { - } else if z.HasExtensions() && z.EncExt(x) { + } else if yyxt1 := z.Extension(z.I2Rtid(x)); yyxt1 != nil { + z.EncExtension(x, yyxt1) } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray _, _ = yysep2, yy2arr2 - const yyr2 bool = false + const yyr2 bool = false // struct tag has 'toArray' + if yyr2 || yy2arr2 { + r.WriteArrayStart(2) + } else { + r.WriteMapStart(2) + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + if false { + } else { + r.EncodeUint(uint64(x.AfterIndex)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferCcUTF86628, `AfterIndex`) + r.WriteMapElemValue() + if false { + } else { + r.EncodeUint(uint64(x.AfterIndex)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + if false { + } else { + r.EncodeBool(bool(x.Recursive)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferCcUTF86628, `Recursive`) + r.WriteMapElemValue() + if false { + } else { + r.EncodeBool(bool(x.Recursive)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayEnd() + } else { + r.WriteMapEnd() + } + } + } +} + +func (x *WatcherOptions) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer6628 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + if false { + } else if yyxt1 := z.Extension(z.I2Rtid(x)); yyxt1 != nil { + z.DecExtension(x, yyxt1) + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap6628 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + r.ReadMapEnd() + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray6628 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + r.ReadArrayEnd() + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(errCodecSelferOnlyMapOrArrayEncodeToStruct6628) + } + } +} + +func (x *WatcherOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer6628 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + r.ReadMapElemKey() + yys3 := z.StringView(r.DecodeStringAsBytes()) + r.ReadMapElemValue() + switch yys3 { + case "AfterIndex": + if r.TryDecodeAsNil() { + x.AfterIndex = 0 + } else { + x.AfterIndex = (uint64)(r.DecodeUint64()) + } + case "Recursive": + if r.TryDecodeAsNil() { + x.Recursive = false + } else { + x.Recursive = (bool)(r.DecodeBool()) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + r.ReadMapEnd() +} + +func (x *WatcherOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer6628 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.AfterIndex = 0 + } else { + x.AfterIndex = (uint64)(r.DecodeUint64()) + } + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Recursive = false + } else { + x.Recursive = (bool)(r.DecodeBool()) + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + r.ReadArrayElem() + z.DecStructFieldNotFound(yyj6-1, "") + } + r.ReadArrayEnd() +} + +func (x *CreateInOrderOptions) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer6628 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + if false { + } else if yyxt1 := z.Extension(z.I2Rtid(x)); yyxt1 != nil { + z.EncExtension(x, yyxt1) + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + _, _ = yysep2, yy2arr2 + const yyr2 bool = false // struct tag has 'toArray' if yyr2 || yy2arr2 { r.WriteArrayStart(1) } else { @@ -641,21 +524,19 @@ func (x *CreateInOrderOptions) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { r.WriteArrayElem() - yym4 := z.EncBinary() - _ = yym4 if false { - } else if z.HasExtensions() && z.EncExt(x.TTL) { + } else if yyxt4 := z.Extension(z.I2Rtid(x.TTL)); yyxt4 != nil { + z.EncExtension(x.TTL, yyxt4) } else { r.EncodeInt(int64(x.TTL)) } } else { r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("TTL")) + r.EncodeString(codecSelferCcUTF86628, `TTL`) r.WriteMapElemValue() - yym5 := z.EncBinary() - _ = yym5 if false { - } else if z.HasExtensions() && z.EncExt(x.TTL) { + } else if yyxt5 := z.Extension(z.I2Rtid(x.TTL)); yyxt5 != nil { + z.EncExtension(x.TTL, yyxt5) } else { r.EncodeInt(int64(x.TTL)) } @@ -670,23 +551,22 @@ func (x *CreateInOrderOptions) CodecEncodeSelf(e *codec1978.Encoder) { } func (x *CreateInOrderOptions) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer7612 + var h codecSelfer6628 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 if false { - } else if z.HasExtensions() && z.DecExt(x) { + } else if yyxt1 := z.Extension(z.I2Rtid(x)); yyxt1 != nil { + z.DecExtension(x, yyxt1) } else { yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap7612 { + if yyct2 == codecSelferValueTypeMap6628 { yyl2 := r.ReadMapStart() if yyl2 == 0 { r.ReadMapEnd() } else { x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct2 == codecSelferValueTypeArray7612 { + } else if yyct2 == codecSelferValueTypeArray6628 { yyl2 := r.ReadArrayStart() if yyl2 == 0 { r.ReadArrayEnd() @@ -694,17 +574,15 @@ func (x *CreateInOrderOptions) CodecDecodeSelf(d *codec1978.Decoder) { x.codecDecodeSelfFromArray(yyl2, d) } } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) + panic(errCodecSelferOnlyMapOrArrayEncodeToStruct6628) } } } func (x *CreateInOrderOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer7612 + var h codecSelfer6628 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc var yyhl3 bool = l >= 0 for yyj3 := 0; ; yyj3++ { if yyhl3 { @@ -717,21 +595,18 @@ func (x *CreateInOrderOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decode } } r.ReadMapElemKey() - yys3Slc = r.DecodeStringAsBytes() - yys3 := string(yys3Slc) + yys3 := z.StringView(r.DecodeStringAsBytes()) r.ReadMapElemValue() switch yys3 { case "TTL": if r.TryDecodeAsNil() { x.TTL = 0 } else { - yyv4 := &x.TTL - yym5 := z.DecBinary() - _ = yym5 if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { + } else if yyxt5 := z.Extension(z.I2Rtid(x.TTL)); yyxt5 != nil { + z.DecExtension(x.TTL, yyxt5) } else { - *((*int64)(yyv4)) = int64(r.DecodeInt(64)) + x.TTL = (time.Duration)(r.DecodeInt64()) } } default: @@ -742,7 +617,7 @@ func (x *CreateInOrderOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decode } func (x *CreateInOrderOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer7612 + var h codecSelfer6628 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r var yyj6 int @@ -762,13 +637,11 @@ func (x *CreateInOrderOptions) codecDecodeSelfFromArray(l int, d *codec1978.Deco if r.TryDecodeAsNil() { x.TTL = 0 } else { - yyv7 := &x.TTL - yym8 := z.DecBinary() - _ = yym8 if false { - } else if z.HasExtensions() && z.DecExt(yyv7) { + } else if yyxt8 := z.Extension(z.I2Rtid(x.TTL)); yyxt8 != nil { + z.DecExtension(x.TTL, yyxt8) } else { - *((*int64)(yyv7)) = int64(r.DecodeInt(64)) + x.TTL = (time.Duration)(r.DecodeInt64()) } } for { @@ -788,21 +661,20 @@ func (x *CreateInOrderOptions) codecDecodeSelfFromArray(l int, d *codec1978.Deco } func (x *SetOptions) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer7612 + var h codecSelfer6628 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r if x == nil { r.EncodeNil() } else { - yym1 := z.EncBinary() - _ = yym1 if false { - } else if z.HasExtensions() && z.EncExt(x) { + } else if yyxt1 := z.Extension(z.I2Rtid(x)); yyxt1 != nil { + z.EncExtension(x, yyxt1) } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray _, _ = yysep2, yy2arr2 - const yyr2 bool = false + const yyr2 bool = false // struct tag has 'toArray' if yyr2 || yy2arr2 { r.WriteArrayStart(7) } else { @@ -810,37 +682,29 @@ func (x *SetOptions) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { r.WriteArrayElem() - yym4 := z.EncBinary() - _ = yym4 if false { } else { - r.EncodeString(codecSelferC_UTF87612, string(x.PrevValue)) + r.EncodeString(codecSelferCcUTF86628, string(x.PrevValue)) } } else { r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("PrevValue")) + r.EncodeString(codecSelferCcUTF86628, `PrevValue`) r.WriteMapElemValue() - yym5 := z.EncBinary() - _ = yym5 if false { } else { - r.EncodeString(codecSelferC_UTF87612, string(x.PrevValue)) + r.EncodeString(codecSelferCcUTF86628, string(x.PrevValue)) } } if yyr2 || yy2arr2 { r.WriteArrayElem() - yym7 := z.EncBinary() - _ = yym7 if false { } else { r.EncodeUint(uint64(x.PrevIndex)) } } else { r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("PrevIndex")) + r.EncodeString(codecSelferCcUTF86628, `PrevIndex`) r.WriteMapElemValue() - yym8 := z.EncBinary() - _ = yym8 if false { } else { r.EncodeUint(uint64(x.PrevIndex)) @@ -851,45 +715,39 @@ func (x *SetOptions) CodecEncodeSelf(e *codec1978.Encoder) { x.PrevExist.CodecEncodeSelf(e) } else { r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("PrevExist")) + r.EncodeString(codecSelferCcUTF86628, `PrevExist`) r.WriteMapElemValue() x.PrevExist.CodecEncodeSelf(e) } if yyr2 || yy2arr2 { r.WriteArrayElem() - yym13 := z.EncBinary() - _ = yym13 if false { - } else if z.HasExtensions() && z.EncExt(x.TTL) { + } else if yyxt13 := z.Extension(z.I2Rtid(x.TTL)); yyxt13 != nil { + z.EncExtension(x.TTL, yyxt13) } else { r.EncodeInt(int64(x.TTL)) } } else { r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("TTL")) + r.EncodeString(codecSelferCcUTF86628, `TTL`) r.WriteMapElemValue() - yym14 := z.EncBinary() - _ = yym14 if false { - } else if z.HasExtensions() && z.EncExt(x.TTL) { + } else if yyxt14 := z.Extension(z.I2Rtid(x.TTL)); yyxt14 != nil { + z.EncExtension(x.TTL, yyxt14) } else { r.EncodeInt(int64(x.TTL)) } } if yyr2 || yy2arr2 { r.WriteArrayElem() - yym16 := z.EncBinary() - _ = yym16 if false { } else { r.EncodeBool(bool(x.Refresh)) } } else { r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("Refresh")) + r.EncodeString(codecSelferCcUTF86628, `Refresh`) r.WriteMapElemValue() - yym17 := z.EncBinary() - _ = yym17 if false { } else { r.EncodeBool(bool(x.Refresh)) @@ -897,18 +755,14 @@ func (x *SetOptions) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { r.WriteArrayElem() - yym19 := z.EncBinary() - _ = yym19 if false { } else { r.EncodeBool(bool(x.Dir)) } } else { r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("Dir")) + r.EncodeString(codecSelferCcUTF86628, `Dir`) r.WriteMapElemValue() - yym20 := z.EncBinary() - _ = yym20 if false { } else { r.EncodeBool(bool(x.Dir)) @@ -916,18 +770,14 @@ func (x *SetOptions) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { r.WriteArrayElem() - yym22 := z.EncBinary() - _ = yym22 if false { } else { r.EncodeBool(bool(x.NoValueOnSuccess)) } } else { r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("NoValueOnSuccess")) + r.EncodeString(codecSelferCcUTF86628, `NoValueOnSuccess`) r.WriteMapElemValue() - yym23 := z.EncBinary() - _ = yym23 if false { } else { r.EncodeBool(bool(x.NoValueOnSuccess)) @@ -943,23 +793,22 @@ func (x *SetOptions) CodecEncodeSelf(e *codec1978.Encoder) { } func (x *SetOptions) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer7612 + var h codecSelfer6628 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 if false { - } else if z.HasExtensions() && z.DecExt(x) { + } else if yyxt1 := z.Extension(z.I2Rtid(x)); yyxt1 != nil { + z.DecExtension(x, yyxt1) } else { yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap7612 { + if yyct2 == codecSelferValueTypeMap6628 { yyl2 := r.ReadMapStart() if yyl2 == 0 { r.ReadMapEnd() } else { x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct2 == codecSelferValueTypeArray7612 { + } else if yyct2 == codecSelferValueTypeArray6628 { yyl2 := r.ReadArrayStart() if yyl2 == 0 { r.ReadArrayEnd() @@ -967,17 +816,15 @@ func (x *SetOptions) CodecDecodeSelf(d *codec1978.Decoder) { x.codecDecodeSelfFromArray(yyl2, d) } } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) + panic(errCodecSelferOnlyMapOrArrayEncodeToStruct6628) } } } func (x *SetOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer7612 + var h codecSelfer6628 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc var yyhl3 bool = l >= 0 for yyj3 := 0; ; yyj3++ { if yyhl3 { @@ -990,89 +837,55 @@ func (x *SetOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } r.ReadMapElemKey() - yys3Slc = r.DecodeStringAsBytes() - yys3 := string(yys3Slc) + yys3 := z.StringView(r.DecodeStringAsBytes()) r.ReadMapElemValue() switch yys3 { case "PrevValue": if r.TryDecodeAsNil() { x.PrevValue = "" } else { - yyv4 := &x.PrevValue - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } + x.PrevValue = (string)(r.DecodeString()) } case "PrevIndex": if r.TryDecodeAsNil() { x.PrevIndex = 0 } else { - yyv6 := &x.PrevIndex - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*uint64)(yyv6)) = uint64(r.DecodeUint(64)) - } + x.PrevIndex = (uint64)(r.DecodeUint64()) } case "PrevExist": if r.TryDecodeAsNil() { x.PrevExist = "" } else { - yyv8 := &x.PrevExist - yyv8.CodecDecodeSelf(d) + x.PrevExist.CodecDecodeSelf(d) } case "TTL": if r.TryDecodeAsNil() { x.TTL = 0 } else { - yyv9 := &x.TTL - yym10 := z.DecBinary() - _ = yym10 if false { - } else if z.HasExtensions() && z.DecExt(yyv9) { + } else if yyxt8 := z.Extension(z.I2Rtid(x.TTL)); yyxt8 != nil { + z.DecExtension(x.TTL, yyxt8) } else { - *((*int64)(yyv9)) = int64(r.DecodeInt(64)) + x.TTL = (time.Duration)(r.DecodeInt64()) } } case "Refresh": if r.TryDecodeAsNil() { x.Refresh = false } else { - yyv11 := &x.Refresh - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*bool)(yyv11)) = r.DecodeBool() - } + x.Refresh = (bool)(r.DecodeBool()) } case "Dir": if r.TryDecodeAsNil() { x.Dir = false } else { - yyv13 := &x.Dir - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*bool)(yyv13)) = r.DecodeBool() - } + x.Dir = (bool)(r.DecodeBool()) } case "NoValueOnSuccess": if r.TryDecodeAsNil() { x.NoValueOnSuccess = false } else { - yyv15 := &x.NoValueOnSuccess - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*bool)(yyv15)) = r.DecodeBool() - } + x.NoValueOnSuccess = (bool)(r.DecodeBool()) } default: z.DecStructFieldNotFound(-1, yys3) @@ -1082,664 +895,7 @@ func (x *SetOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } func (x *SetOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj17 int - var yyb17 bool - var yyhl17 bool = l >= 0 - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l - } else { - yyb17 = r.CheckBreak() - } - if yyb17 { - r.ReadArrayEnd() - return - } - r.ReadArrayElem() - if r.TryDecodeAsNil() { - x.PrevValue = "" - } else { - yyv18 := &x.PrevValue - yym19 := z.DecBinary() - _ = yym19 - if false { - } else { - *((*string)(yyv18)) = r.DecodeString() - } - } - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l - } else { - yyb17 = r.CheckBreak() - } - if yyb17 { - r.ReadArrayEnd() - return - } - r.ReadArrayElem() - if r.TryDecodeAsNil() { - x.PrevIndex = 0 - } else { - yyv20 := &x.PrevIndex - yym21 := z.DecBinary() - _ = yym21 - if false { - } else { - *((*uint64)(yyv20)) = uint64(r.DecodeUint(64)) - } - } - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l - } else { - yyb17 = r.CheckBreak() - } - if yyb17 { - r.ReadArrayEnd() - return - } - r.ReadArrayElem() - if r.TryDecodeAsNil() { - x.PrevExist = "" - } else { - yyv22 := &x.PrevExist - yyv22.CodecDecodeSelf(d) - } - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l - } else { - yyb17 = r.CheckBreak() - } - if yyb17 { - r.ReadArrayEnd() - return - } - r.ReadArrayElem() - if r.TryDecodeAsNil() { - x.TTL = 0 - } else { - yyv23 := &x.TTL - yym24 := z.DecBinary() - _ = yym24 - if false { - } else if z.HasExtensions() && z.DecExt(yyv23) { - } else { - *((*int64)(yyv23)) = int64(r.DecodeInt(64)) - } - } - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l - } else { - yyb17 = r.CheckBreak() - } - if yyb17 { - r.ReadArrayEnd() - return - } - r.ReadArrayElem() - if r.TryDecodeAsNil() { - x.Refresh = false - } else { - yyv25 := &x.Refresh - yym26 := z.DecBinary() - _ = yym26 - if false { - } else { - *((*bool)(yyv25)) = r.DecodeBool() - } - } - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l - } else { - yyb17 = r.CheckBreak() - } - if yyb17 { - r.ReadArrayEnd() - return - } - r.ReadArrayElem() - if r.TryDecodeAsNil() { - x.Dir = false - } else { - yyv27 := &x.Dir - yym28 := z.DecBinary() - _ = yym28 - if false { - } else { - *((*bool)(yyv27)) = r.DecodeBool() - } - } - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l - } else { - yyb17 = r.CheckBreak() - } - if yyb17 { - r.ReadArrayEnd() - return - } - r.ReadArrayElem() - if r.TryDecodeAsNil() { - x.NoValueOnSuccess = false - } else { - yyv29 := &x.NoValueOnSuccess - yym30 := z.DecBinary() - _ = yym30 - if false { - } else { - *((*bool)(yyv29)) = r.DecodeBool() - } - } - for { - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l - } else { - yyb17 = r.CheckBreak() - } - if yyb17 { - break - } - r.ReadArrayElem() - z.DecStructFieldNotFound(yyj17-1, "") - } - r.ReadArrayEnd() -} - -func (x *GetOptions) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - _, _ = yysep2, yy2arr2 - const yyr2 bool = false - if yyr2 || yy2arr2 { - r.WriteArrayStart(3) - } else { - r.WriteMapStart(3) - } - if yyr2 || yy2arr2 { - r.WriteArrayElem() - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeBool(bool(x.Recursive)) - } - } else { - r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("Recursive")) - r.WriteMapElemValue() - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeBool(bool(x.Recursive)) - } - } - if yyr2 || yy2arr2 { - r.WriteArrayElem() - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeBool(bool(x.Sort)) - } - } else { - r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("Sort")) - r.WriteMapElemValue() - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeBool(bool(x.Sort)) - } - } - if yyr2 || yy2arr2 { - r.WriteArrayElem() - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeBool(bool(x.Quorum)) - } - } else { - r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("Quorum")) - r.WriteMapElemValue() - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeBool(bool(x.Quorum)) - } - } - if yyr2 || yy2arr2 { - r.WriteArrayEnd() - } else { - r.WriteMapEnd() - } - } - } -} - -func (x *GetOptions) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap7612 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - r.ReadMapEnd() - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray7612 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - r.ReadArrayEnd() - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) - } - } -} - -func (x *GetOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - r.ReadMapElemKey() - yys3Slc = r.DecodeStringAsBytes() - yys3 := string(yys3Slc) - r.ReadMapElemValue() - switch yys3 { - case "Recursive": - if r.TryDecodeAsNil() { - x.Recursive = false - } else { - yyv4 := &x.Recursive - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*bool)(yyv4)) = r.DecodeBool() - } - } - case "Sort": - if r.TryDecodeAsNil() { - x.Sort = false - } else { - yyv6 := &x.Sort - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*bool)(yyv6)) = r.DecodeBool() - } - } - case "Quorum": - if r.TryDecodeAsNil() { - x.Quorum = false - } else { - yyv8 := &x.Quorum - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*bool)(yyv8)) = r.DecodeBool() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - r.ReadMapEnd() -} - -func (x *GetOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - r.ReadArrayEnd() - return - } - r.ReadArrayElem() - if r.TryDecodeAsNil() { - x.Recursive = false - } else { - yyv11 := &x.Recursive - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*bool)(yyv11)) = r.DecodeBool() - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - r.ReadArrayEnd() - return - } - r.ReadArrayElem() - if r.TryDecodeAsNil() { - x.Sort = false - } else { - yyv13 := &x.Sort - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*bool)(yyv13)) = r.DecodeBool() - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - r.ReadArrayEnd() - return - } - r.ReadArrayElem() - if r.TryDecodeAsNil() { - x.Quorum = false - } else { - yyv15 := &x.Quorum - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*bool)(yyv15)) = r.DecodeBool() - } - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - r.ReadArrayElem() - z.DecStructFieldNotFound(yyj10-1, "") - } - r.ReadArrayEnd() -} - -func (x *DeleteOptions) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - _, _ = yysep2, yy2arr2 - const yyr2 bool = false - if yyr2 || yy2arr2 { - r.WriteArrayStart(4) - } else { - r.WriteMapStart(4) - } - if yyr2 || yy2arr2 { - r.WriteArrayElem() - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF87612, string(x.PrevValue)) - } - } else { - r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("PrevValue")) - r.WriteMapElemValue() - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF87612, string(x.PrevValue)) - } - } - if yyr2 || yy2arr2 { - r.WriteArrayElem() - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeUint(uint64(x.PrevIndex)) - } - } else { - r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("PrevIndex")) - r.WriteMapElemValue() - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeUint(uint64(x.PrevIndex)) - } - } - if yyr2 || yy2arr2 { - r.WriteArrayElem() - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeBool(bool(x.Recursive)) - } - } else { - r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("Recursive")) - r.WriteMapElemValue() - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeBool(bool(x.Recursive)) - } - } - if yyr2 || yy2arr2 { - r.WriteArrayElem() - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeBool(bool(x.Dir)) - } - } else { - r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("Dir")) - r.WriteMapElemValue() - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeBool(bool(x.Dir)) - } - } - if yyr2 || yy2arr2 { - r.WriteArrayEnd() - } else { - r.WriteMapEnd() - } - } - } -} - -func (x *DeleteOptions) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap7612 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - r.ReadMapEnd() - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray7612 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - r.ReadArrayEnd() - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) - } - } -} - -func (x *DeleteOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer7612 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - r.ReadMapElemKey() - yys3Slc = r.DecodeStringAsBytes() - yys3 := string(yys3Slc) - r.ReadMapElemValue() - switch yys3 { - case "PrevValue": - if r.TryDecodeAsNil() { - x.PrevValue = "" - } else { - yyv4 := &x.PrevValue - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "PrevIndex": - if r.TryDecodeAsNil() { - x.PrevIndex = 0 - } else { - yyv6 := &x.PrevIndex - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*uint64)(yyv6)) = uint64(r.DecodeUint(64)) - } - } - case "Recursive": - if r.TryDecodeAsNil() { - x.Recursive = false - } else { - yyv8 := &x.Recursive - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*bool)(yyv8)) = r.DecodeBool() - } - } - case "Dir": - if r.TryDecodeAsNil() { - x.Dir = false - } else { - yyv10 := &x.Dir - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*bool)(yyv10)) = r.DecodeBool() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - r.ReadMapEnd() -} - -func (x *DeleteOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer7612 + var h codecSelfer6628 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r var yyj12 int @@ -1759,13 +915,7 @@ func (x *DeleteOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.PrevValue = "" } else { - yyv13 := &x.PrevValue - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } + x.PrevValue = (string)(r.DecodeString()) } yyj12++ if yyhl12 { @@ -1781,12 +931,43 @@ func (x *DeleteOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.PrevIndex = 0 } else { - yyv15 := &x.PrevIndex - yym16 := z.DecBinary() - _ = yym16 + x.PrevIndex = (uint64)(r.DecodeUint64()) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.PrevExist = "" + } else { + x.PrevExist.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.TTL = 0 + } else { if false { + } else if yyxt17 := z.Extension(z.I2Rtid(x.TTL)); yyxt17 != nil { + z.DecExtension(x.TTL, yyxt17) } else { - *((*uint64)(yyv15)) = uint64(r.DecodeUint(64)) + x.TTL = (time.Duration)(r.DecodeInt64()) } } yyj12++ @@ -1801,15 +982,9 @@ func (x *DeleteOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } r.ReadArrayElem() if r.TryDecodeAsNil() { - x.Recursive = false + x.Refresh = false } else { - yyv17 := &x.Recursive - yym18 := z.DecBinary() - _ = yym18 - if false { - } else { - *((*bool)(yyv17)) = r.DecodeBool() - } + x.Refresh = (bool)(r.DecodeBool()) } yyj12++ if yyhl12 { @@ -1825,13 +1000,23 @@ func (x *DeleteOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Dir = false } else { - yyv19 := &x.Dir - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - *((*bool)(yyv19)) = r.DecodeBool() - } + x.Dir = (bool)(r.DecodeBool()) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.NoValueOnSuccess = false + } else { + x.NoValueOnSuccess = (bool)(r.DecodeBool()) } for { yyj12++ @@ -1849,22 +1034,21 @@ func (x *DeleteOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { r.ReadArrayEnd() } -func (x *Response) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer7612 +func (x *GetOptions) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer6628 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r if x == nil { r.EncodeNil() } else { - yym1 := z.EncBinary() - _ = yym1 if false { - } else if z.HasExtensions() && z.EncExt(x) { + } else if yyxt1 := z.Extension(z.I2Rtid(x)); yyxt1 != nil { + z.EncExtension(x, yyxt1) } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray _, _ = yysep2, yy2arr2 - const yyr2 bool = false + const yyr2 bool = false // struct tag has 'toArray' if yyr2 || yy2arr2 { r.WriteArrayStart(3) } else { @@ -1872,21 +1056,490 @@ func (x *Response) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { r.WriteArrayElem() - yym4 := z.EncBinary() - _ = yym4 if false { } else { - r.EncodeString(codecSelferC_UTF87612, string(x.Action)) + r.EncodeBool(bool(x.Recursive)) } } else { r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("action")) + r.EncodeString(codecSelferCcUTF86628, `Recursive`) r.WriteMapElemValue() - yym5 := z.EncBinary() - _ = yym5 if false { } else { - r.EncodeString(codecSelferC_UTF87612, string(x.Action)) + r.EncodeBool(bool(x.Recursive)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + if false { + } else { + r.EncodeBool(bool(x.Sort)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferCcUTF86628, `Sort`) + r.WriteMapElemValue() + if false { + } else { + r.EncodeBool(bool(x.Sort)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + if false { + } else { + r.EncodeBool(bool(x.Quorum)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferCcUTF86628, `Quorum`) + r.WriteMapElemValue() + if false { + } else { + r.EncodeBool(bool(x.Quorum)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayEnd() + } else { + r.WriteMapEnd() + } + } + } +} + +func (x *GetOptions) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer6628 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + if false { + } else if yyxt1 := z.Extension(z.I2Rtid(x)); yyxt1 != nil { + z.DecExtension(x, yyxt1) + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap6628 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + r.ReadMapEnd() + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray6628 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + r.ReadArrayEnd() + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(errCodecSelferOnlyMapOrArrayEncodeToStruct6628) + } + } +} + +func (x *GetOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer6628 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + r.ReadMapElemKey() + yys3 := z.StringView(r.DecodeStringAsBytes()) + r.ReadMapElemValue() + switch yys3 { + case "Recursive": + if r.TryDecodeAsNil() { + x.Recursive = false + } else { + x.Recursive = (bool)(r.DecodeBool()) + } + case "Sort": + if r.TryDecodeAsNil() { + x.Sort = false + } else { + x.Sort = (bool)(r.DecodeBool()) + } + case "Quorum": + if r.TryDecodeAsNil() { + x.Quorum = false + } else { + x.Quorum = (bool)(r.DecodeBool()) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + r.ReadMapEnd() +} + +func (x *GetOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer6628 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Recursive = false + } else { + x.Recursive = (bool)(r.DecodeBool()) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Sort = false + } else { + x.Sort = (bool)(r.DecodeBool()) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Quorum = false + } else { + x.Quorum = (bool)(r.DecodeBool()) + } + for { + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + break + } + r.ReadArrayElem() + z.DecStructFieldNotFound(yyj7-1, "") + } + r.ReadArrayEnd() +} + +func (x *DeleteOptions) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer6628 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + if false { + } else if yyxt1 := z.Extension(z.I2Rtid(x)); yyxt1 != nil { + z.EncExtension(x, yyxt1) + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + _, _ = yysep2, yy2arr2 + const yyr2 bool = false // struct tag has 'toArray' + if yyr2 || yy2arr2 { + r.WriteArrayStart(4) + } else { + r.WriteMapStart(4) + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + if false { + } else { + r.EncodeString(codecSelferCcUTF86628, string(x.PrevValue)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferCcUTF86628, `PrevValue`) + r.WriteMapElemValue() + if false { + } else { + r.EncodeString(codecSelferCcUTF86628, string(x.PrevValue)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + if false { + } else { + r.EncodeUint(uint64(x.PrevIndex)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferCcUTF86628, `PrevIndex`) + r.WriteMapElemValue() + if false { + } else { + r.EncodeUint(uint64(x.PrevIndex)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + if false { + } else { + r.EncodeBool(bool(x.Recursive)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferCcUTF86628, `Recursive`) + r.WriteMapElemValue() + if false { + } else { + r.EncodeBool(bool(x.Recursive)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + if false { + } else { + r.EncodeBool(bool(x.Dir)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferCcUTF86628, `Dir`) + r.WriteMapElemValue() + if false { + } else { + r.EncodeBool(bool(x.Dir)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayEnd() + } else { + r.WriteMapEnd() + } + } + } +} + +func (x *DeleteOptions) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer6628 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + if false { + } else if yyxt1 := z.Extension(z.I2Rtid(x)); yyxt1 != nil { + z.DecExtension(x, yyxt1) + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap6628 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + r.ReadMapEnd() + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray6628 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + r.ReadArrayEnd() + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(errCodecSelferOnlyMapOrArrayEncodeToStruct6628) + } + } +} + +func (x *DeleteOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer6628 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + r.ReadMapElemKey() + yys3 := z.StringView(r.DecodeStringAsBytes()) + r.ReadMapElemValue() + switch yys3 { + case "PrevValue": + if r.TryDecodeAsNil() { + x.PrevValue = "" + } else { + x.PrevValue = (string)(r.DecodeString()) + } + case "PrevIndex": + if r.TryDecodeAsNil() { + x.PrevIndex = 0 + } else { + x.PrevIndex = (uint64)(r.DecodeUint64()) + } + case "Recursive": + if r.TryDecodeAsNil() { + x.Recursive = false + } else { + x.Recursive = (bool)(r.DecodeBool()) + } + case "Dir": + if r.TryDecodeAsNil() { + x.Dir = false + } else { + x.Dir = (bool)(r.DecodeBool()) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + r.ReadMapEnd() +} + +func (x *DeleteOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer6628 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.PrevValue = "" + } else { + x.PrevValue = (string)(r.DecodeString()) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.PrevIndex = 0 + } else { + x.PrevIndex = (uint64)(r.DecodeUint64()) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Recursive = false + } else { + x.Recursive = (bool)(r.DecodeBool()) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Dir = false + } else { + x.Dir = (bool)(r.DecodeBool()) + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + r.ReadArrayElem() + z.DecStructFieldNotFound(yyj8-1, "") + } + r.ReadArrayEnd() +} + +func (x *Response) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer6628 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + if false { + } else if yyxt1 := z.Extension(z.I2Rtid(x)); yyxt1 != nil { + z.EncExtension(x, yyxt1) + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + _, _ = yysep2, yy2arr2 + const yyr2 bool = false // struct tag has 'toArray' + if yyr2 || yy2arr2 { + r.WriteArrayStart(3) + } else { + r.WriteMapStart(3) + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + if false { + } else { + r.EncodeString(codecSelferCcUTF86628, string(x.Action)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferCcUTF86628, `action`) + r.WriteMapElemValue() + if false { + } else { + r.EncodeString(codecSelferCcUTF86628, string(x.Action)) } } var yyn6 bool @@ -1909,7 +1562,7 @@ func (x *Response) CodecEncodeSelf(e *codec1978.Encoder) { } } else { r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("node")) + r.EncodeString(codecSelferCcUTF86628, `node`) r.WriteMapElemValue() if yyn6 { r.EncodeNil() @@ -1941,7 +1594,7 @@ func (x *Response) CodecEncodeSelf(e *codec1978.Encoder) { } } else { r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("prevNode")) + r.EncodeString(codecSelferCcUTF86628, `prevNode`) r.WriteMapElemValue() if yyn9 { r.EncodeNil() @@ -1963,23 +1616,22 @@ func (x *Response) CodecEncodeSelf(e *codec1978.Encoder) { } func (x *Response) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer7612 + var h codecSelfer6628 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 if false { - } else if z.HasExtensions() && z.DecExt(x) { + } else if yyxt1 := z.Extension(z.I2Rtid(x)); yyxt1 != nil { + z.DecExtension(x, yyxt1) } else { yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap7612 { + if yyct2 == codecSelferValueTypeMap6628 { yyl2 := r.ReadMapStart() if yyl2 == 0 { r.ReadMapEnd() } else { x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct2 == codecSelferValueTypeArray7612 { + } else if yyct2 == codecSelferValueTypeArray6628 { yyl2 := r.ReadArrayStart() if yyl2 == 0 { r.ReadArrayEnd() @@ -1987,17 +1639,15 @@ func (x *Response) CodecDecodeSelf(d *codec1978.Decoder) { x.codecDecodeSelfFromArray(yyl2, d) } } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) + panic(errCodecSelferOnlyMapOrArrayEncodeToStruct6628) } } } func (x *Response) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer7612 + var h codecSelfer6628 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc var yyhl3 bool = l >= 0 for yyj3 := 0; ; yyj3++ { if yyhl3 { @@ -2010,48 +1660,37 @@ func (x *Response) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } r.ReadMapElemKey() - yys3Slc = r.DecodeStringAsBytes() - yys3 := string(yys3Slc) + yys3 := z.StringView(r.DecodeStringAsBytes()) r.ReadMapElemValue() switch yys3 { case "action": if r.TryDecodeAsNil() { x.Action = "" } else { - yyv4 := &x.Action - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } + x.Action = (string)(r.DecodeString()) } case "node": - if x.Node == nil { - x.Node = new(Node) - } if r.TryDecodeAsNil() { - if x.Node != nil { + if true && x.Node != nil { x.Node = nil } } else { if x.Node == nil { x.Node = new(Node) } + x.Node.CodecDecodeSelf(d) } case "prevNode": - if x.PrevNode == nil { - x.PrevNode = new(Node) - } if r.TryDecodeAsNil() { - if x.PrevNode != nil { + if true && x.PrevNode != nil { x.PrevNode = nil } } else { if x.PrevNode == nil { x.PrevNode = new(Node) } + x.PrevNode.CodecDecodeSelf(d) } default: @@ -2062,19 +1701,19 @@ func (x *Response) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } func (x *Response) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer7612 + var h codecSelfer6628 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb8 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb8 { + if yyb7 { r.ReadArrayEnd() return } @@ -2082,103 +1721,98 @@ func (x *Response) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Action = "" } else { - yyv9 := &x.Action - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - *((*string)(yyv9)) = r.DecodeString() - } + x.Action = (string)(r.DecodeString()) } - if x.Node == nil { - x.Node = new(Node) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb8 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb8 { + if yyb7 { r.ReadArrayEnd() return } r.ReadArrayElem() if r.TryDecodeAsNil() { - if x.Node != nil { + if true && x.Node != nil { x.Node = nil } } else { if x.Node == nil { x.Node = new(Node) } + x.Node.CodecDecodeSelf(d) } - if x.PrevNode == nil { - x.PrevNode = new(Node) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb8 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb8 { + if yyb7 { r.ReadArrayEnd() return } r.ReadArrayElem() if r.TryDecodeAsNil() { - if x.PrevNode != nil { + if true && x.PrevNode != nil { x.PrevNode = nil } } else { if x.PrevNode == nil { x.PrevNode = new(Node) } + x.PrevNode.CodecDecodeSelf(d) } for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb8 = r.CheckBreak() + yyb7 = r.CheckBreak() } - if yyb8 { + if yyb7 { break } r.ReadArrayElem() - z.DecStructFieldNotFound(yyj8-1, "") + z.DecStructFieldNotFound(yyj7-1, "") } r.ReadArrayEnd() } func (x *Node) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer7612 + var h codecSelfer6628 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r if x == nil { r.EncodeNil() } else { - yym1 := z.EncBinary() - _ = yym1 if false { - } else if z.HasExtensions() && z.EncExt(x) { + } else if yyxt1 := z.Extension(z.I2Rtid(x)); yyxt1 != nil { + z.EncExtension(x, yyxt1) } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [8]bool - _ = yyq2 _, _ = yysep2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.Dir != false - yyq2[6] = x.Expiration != nil - yyq2[7] = x.TTL != 0 + const yyr2 bool = false // struct tag has 'toArray' + var yyq2 = [8]bool{ // should field at this index be written? + true, // Key + x.Dir, // Dir + true, // Value + true, // Nodes + true, // CreatedIndex + true, // ModifiedIndex + x.Expiration != nil, // Expiration + x.TTL != 0, // TTL + } + _ = yyq2 if yyr2 || yy2arr2 { r.WriteArrayStart(8) } else { - var yynn2 = 5 + var yynn2 int for _, b := range yyq2 { if b { yynn2++ @@ -2188,28 +1822,22 @@ func (x *Node) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { r.WriteArrayElem() - yym4 := z.EncBinary() - _ = yym4 if false { } else { - r.EncodeString(codecSelferC_UTF87612, string(x.Key)) + r.EncodeString(codecSelferCcUTF86628, string(x.Key)) } } else { r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("key")) + r.EncodeString(codecSelferCcUTF86628, `key`) r.WriteMapElemValue() - yym5 := z.EncBinary() - _ = yym5 if false { } else { - r.EncodeString(codecSelferC_UTF87612, string(x.Key)) + r.EncodeString(codecSelferCcUTF86628, string(x.Key)) } } if yyr2 || yy2arr2 { r.WriteArrayElem() if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 if false { } else { r.EncodeBool(bool(x.Dir)) @@ -2220,10 +1848,8 @@ func (x *Node) CodecEncodeSelf(e *codec1978.Encoder) { } else { if yyq2[1] { r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("dir")) + r.EncodeString(codecSelferCcUTF86628, `dir`) r.WriteMapElemValue() - yym8 := z.EncBinary() - _ = yym8 if false { } else { r.EncodeBool(bool(x.Dir)) @@ -2232,21 +1858,17 @@ func (x *Node) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { r.WriteArrayElem() - yym10 := z.EncBinary() - _ = yym10 if false { } else { - r.EncodeString(codecSelferC_UTF87612, string(x.Value)) + r.EncodeString(codecSelferCcUTF86628, string(x.Value)) } } else { r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("value")) + r.EncodeString(codecSelferCcUTF86628, `value`) r.WriteMapElemValue() - yym11 := z.EncBinary() - _ = yym11 if false { } else { - r.EncodeString(codecSelferC_UTF87612, string(x.Value)) + r.EncodeString(codecSelferCcUTF86628, string(x.Value)) } } if yyr2 || yy2arr2 { @@ -2258,7 +1880,7 @@ func (x *Node) CodecEncodeSelf(e *codec1978.Encoder) { } } else { r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("nodes")) + r.EncodeString(codecSelferCcUTF86628, `nodes`) r.WriteMapElemValue() if x.Nodes == nil { r.EncodeNil() @@ -2268,18 +1890,14 @@ func (x *Node) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { r.WriteArrayElem() - yym16 := z.EncBinary() - _ = yym16 if false { } else { r.EncodeUint(uint64(x.CreatedIndex)) } } else { r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("createdIndex")) + r.EncodeString(codecSelferCcUTF86628, `createdIndex`) r.WriteMapElemValue() - yym17 := z.EncBinary() - _ = yym17 if false { } else { r.EncodeUint(uint64(x.CreatedIndex)) @@ -2287,18 +1905,14 @@ func (x *Node) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { r.WriteArrayElem() - yym19 := z.EncBinary() - _ = yym19 if false { } else { r.EncodeUint(uint64(x.ModifiedIndex)) } } else { r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("modifiedIndex")) + r.EncodeString(codecSelferCcUTF86628, `modifiedIndex`) r.WriteMapElemValue() - yym20 := z.EncBinary() - _ = yym20 if false { } else { r.EncodeUint(uint64(x.ModifiedIndex)) @@ -2320,18 +1934,10 @@ func (x *Node) CodecEncodeSelf(e *codec1978.Encoder) { if x.Expiration == nil { r.EncodeNil() } else { - yym22 := z.EncBinary() - _ = yym22 + yy22 := *x.Expiration if false { - } else if yym23 := z.TimeRtidIfBinc(); yym23 != 0 { - r.EncodeBuiltin(yym23, x.Expiration) - } else if z.HasExtensions() && z.EncExt(x.Expiration) { - } else if yym22 { - z.EncBinaryMarshal(x.Expiration) - } else if !yym22 && z.IsJSONHandle() { - z.EncJSONMarshal(x.Expiration) } else { - z.EncFallback(x.Expiration) + r.EncodeTime(yy22) } } } else { @@ -2341,7 +1947,7 @@ func (x *Node) CodecEncodeSelf(e *codec1978.Encoder) { } else { if yyq2[6] { r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("expiration")) + r.EncodeString(codecSelferCcUTF86628, `expiration`) r.WriteMapElemValue() if yyn21 { r.EncodeNil() @@ -2349,18 +1955,10 @@ func (x *Node) CodecEncodeSelf(e *codec1978.Encoder) { if x.Expiration == nil { r.EncodeNil() } else { - yym24 := z.EncBinary() - _ = yym24 + yy24 := *x.Expiration if false { - } else if yym25 := z.TimeRtidIfBinc(); yym25 != 0 { - r.EncodeBuiltin(yym25, x.Expiration) - } else if z.HasExtensions() && z.EncExt(x.Expiration) { - } else if yym24 { - z.EncBinaryMarshal(x.Expiration) - } else if !yym24 && z.IsJSONHandle() { - z.EncJSONMarshal(x.Expiration) } else { - z.EncFallback(x.Expiration) + r.EncodeTime(yy24) } } } @@ -2369,8 +1967,6 @@ func (x *Node) CodecEncodeSelf(e *codec1978.Encoder) { if yyr2 || yy2arr2 { r.WriteArrayElem() if yyq2[7] { - yym27 := z.EncBinary() - _ = yym27 if false { } else { r.EncodeInt(int64(x.TTL)) @@ -2381,10 +1977,8 @@ func (x *Node) CodecEncodeSelf(e *codec1978.Encoder) { } else { if yyq2[7] { r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("ttl")) + r.EncodeString(codecSelferCcUTF86628, `ttl`) r.WriteMapElemValue() - yym28 := z.EncBinary() - _ = yym28 if false { } else { r.EncodeInt(int64(x.TTL)) @@ -2401,23 +1995,22 @@ func (x *Node) CodecEncodeSelf(e *codec1978.Encoder) { } func (x *Node) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer7612 + var h codecSelfer6628 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 if false { - } else if z.HasExtensions() && z.DecExt(x) { + } else if yyxt1 := z.Extension(z.I2Rtid(x)); yyxt1 != nil { + z.DecExtension(x, yyxt1) } else { yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap7612 { + if yyct2 == codecSelferValueTypeMap6628 { yyl2 := r.ReadMapStart() if yyl2 == 0 { r.ReadMapEnd() } else { x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct2 == codecSelferValueTypeArray7612 { + } else if yyct2 == codecSelferValueTypeArray6628 { yyl2 := r.ReadArrayStart() if yyl2 == 0 { r.ReadArrayEnd() @@ -2425,17 +2018,15 @@ func (x *Node) CodecDecodeSelf(d *codec1978.Decoder) { x.codecDecodeSelfFromArray(yyl2, d) } } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) + panic(errCodecSelferOnlyMapOrArrayEncodeToStruct6628) } } } func (x *Node) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer7612 + var h codecSelfer6628 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc var yyhl3 bool = l >= 0 for yyj3 := 0; ; yyj3++ { if yyhl3 { @@ -2448,114 +2039,65 @@ func (x *Node) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } r.ReadMapElemKey() - yys3Slc = r.DecodeStringAsBytes() - yys3 := string(yys3Slc) + yys3 := z.StringView(r.DecodeStringAsBytes()) r.ReadMapElemValue() switch yys3 { case "key": if r.TryDecodeAsNil() { x.Key = "" } else { - yyv4 := &x.Key - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } + x.Key = (string)(r.DecodeString()) } case "dir": if r.TryDecodeAsNil() { x.Dir = false } else { - yyv6 := &x.Dir - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*bool)(yyv6)) = r.DecodeBool() - } + x.Dir = (bool)(r.DecodeBool()) } case "value": if r.TryDecodeAsNil() { x.Value = "" } else { - yyv8 := &x.Value - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*string)(yyv8)) = r.DecodeString() - } + x.Value = (string)(r.DecodeString()) } case "nodes": if r.TryDecodeAsNil() { x.Nodes = nil } else { - yyv10 := &x.Nodes - yyv10.CodecDecodeSelf(d) + x.Nodes.CodecDecodeSelf(d) } case "createdIndex": if r.TryDecodeAsNil() { x.CreatedIndex = 0 } else { - yyv11 := &x.CreatedIndex - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*uint64)(yyv11)) = uint64(r.DecodeUint(64)) - } + x.CreatedIndex = (uint64)(r.DecodeUint64()) } case "modifiedIndex": if r.TryDecodeAsNil() { x.ModifiedIndex = 0 } else { - yyv13 := &x.ModifiedIndex - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*uint64)(yyv13)) = uint64(r.DecodeUint(64)) - } + x.ModifiedIndex = (uint64)(r.DecodeUint64()) } case "expiration": - if x.Expiration == nil { - x.Expiration = new(time.Time) - } if r.TryDecodeAsNil() { - if x.Expiration != nil { + if true && x.Expiration != nil { x.Expiration = nil } } else { if x.Expiration == nil { x.Expiration = new(time.Time) } - yym16 := z.DecBinary() - _ = yym16 + if false { - } else if yym17 := z.TimeRtidIfBinc(); yym17 != 0 { - r.DecodeBuiltin(yym17, x.Expiration) - } else if z.HasExtensions() && z.DecExt(x.Expiration) { - } else if yym16 { - z.DecBinaryUnmarshal(x.Expiration) - } else if !yym16 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.Expiration) } else { - z.DecFallback(x.Expiration, false) + *x.Expiration = r.DecodeTime() } } case "ttl": if r.TryDecodeAsNil() { x.TTL = 0 } else { - yyv18 := &x.TTL - yym19 := z.DecBinary() - _ = yym19 - if false { - } else { - *((*int64)(yyv18)) = int64(r.DecodeInt(64)) - } + x.TTL = (int64)(r.DecodeInt64()) } default: z.DecStructFieldNotFound(-1, yys3) @@ -2565,19 +2107,19 @@ func (x *Node) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer7612 + var h codecSelfer6628 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj20 int - var yyb20 bool - var yyhl20 bool = l >= 0 - yyj20++ - if yyhl20 { - yyb20 = yyj20 > l + var yyj13 int + var yyb13 bool + var yyhl13 bool = l >= 0 + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l } else { - yyb20 = r.CheckBreak() + yyb13 = r.CheckBreak() } - if yyb20 { + if yyb13 { r.ReadArrayEnd() return } @@ -2585,21 +2127,15 @@ func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Key = "" } else { - yyv21 := &x.Key - yym22 := z.DecBinary() - _ = yym22 - if false { - } else { - *((*string)(yyv21)) = r.DecodeString() - } + x.Key = (string)(r.DecodeString()) } - yyj20++ - if yyhl20 { - yyb20 = yyj20 > l + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l } else { - yyb20 = r.CheckBreak() + yyb13 = r.CheckBreak() } - if yyb20 { + if yyb13 { r.ReadArrayEnd() return } @@ -2607,21 +2143,15 @@ func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Dir = false } else { - yyv23 := &x.Dir - yym24 := z.DecBinary() - _ = yym24 - if false { - } else { - *((*bool)(yyv23)) = r.DecodeBool() - } + x.Dir = (bool)(r.DecodeBool()) } - yyj20++ - if yyhl20 { - yyb20 = yyj20 > l + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l } else { - yyb20 = r.CheckBreak() + yyb13 = r.CheckBreak() } - if yyb20 { + if yyb13 { r.ReadArrayEnd() return } @@ -2629,21 +2159,15 @@ func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Value = "" } else { - yyv25 := &x.Value - yym26 := z.DecBinary() - _ = yym26 - if false { - } else { - *((*string)(yyv25)) = r.DecodeString() - } + x.Value = (string)(r.DecodeString()) } - yyj20++ - if yyhl20 { - yyb20 = yyj20 > l + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l } else { - yyb20 = r.CheckBreak() + yyb13 = r.CheckBreak() } - if yyb20 { + if yyb13 { r.ReadArrayEnd() return } @@ -2651,16 +2175,15 @@ func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Nodes = nil } else { - yyv27 := &x.Nodes - yyv27.CodecDecodeSelf(d) + x.Nodes.CodecDecodeSelf(d) } - yyj20++ - if yyhl20 { - yyb20 = yyj20 > l + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l } else { - yyb20 = r.CheckBreak() + yyb13 = r.CheckBreak() } - if yyb20 { + if yyb13 { r.ReadArrayEnd() return } @@ -2668,21 +2191,15 @@ func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.CreatedIndex = 0 } else { - yyv28 := &x.CreatedIndex - yym29 := z.DecBinary() - _ = yym29 - if false { - } else { - *((*uint64)(yyv28)) = uint64(r.DecodeUint(64)) - } + x.CreatedIndex = (uint64)(r.DecodeUint64()) } - yyj20++ - if yyhl20 { - yyb20 = yyj20 > l + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l } else { - yyb20 = r.CheckBreak() + yyb13 = r.CheckBreak() } - if yyb20 { + if yyb13 { r.ReadArrayEnd() return } @@ -2690,57 +2207,40 @@ func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.ModifiedIndex = 0 } else { - yyv30 := &x.ModifiedIndex - yym31 := z.DecBinary() - _ = yym31 - if false { - } else { - *((*uint64)(yyv30)) = uint64(r.DecodeUint(64)) - } + x.ModifiedIndex = (uint64)(r.DecodeUint64()) } - if x.Expiration == nil { - x.Expiration = new(time.Time) - } - yyj20++ - if yyhl20 { - yyb20 = yyj20 > l + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l } else { - yyb20 = r.CheckBreak() + yyb13 = r.CheckBreak() } - if yyb20 { + if yyb13 { r.ReadArrayEnd() return } r.ReadArrayElem() if r.TryDecodeAsNil() { - if x.Expiration != nil { + if true && x.Expiration != nil { x.Expiration = nil } } else { if x.Expiration == nil { x.Expiration = new(time.Time) } - yym33 := z.DecBinary() - _ = yym33 + if false { - } else if yym34 := z.TimeRtidIfBinc(); yym34 != 0 { - r.DecodeBuiltin(yym34, x.Expiration) - } else if z.HasExtensions() && z.DecExt(x.Expiration) { - } else if yym33 { - z.DecBinaryUnmarshal(x.Expiration) - } else if !yym33 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.Expiration) } else { - z.DecFallback(x.Expiration, false) + *x.Expiration = r.DecodeTime() } } - yyj20++ - if yyhl20 { - yyb20 = yyj20 > l + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l } else { - yyb20 = r.CheckBreak() + yyb13 = r.CheckBreak() } - if yyb20 { + if yyb13 { r.ReadArrayEnd() return } @@ -2748,41 +2248,34 @@ func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.TTL = 0 } else { - yyv35 := &x.TTL - yym36 := z.DecBinary() - _ = yym36 - if false { - } else { - *((*int64)(yyv35)) = int64(r.DecodeInt(64)) - } + x.TTL = (int64)(r.DecodeInt64()) } for { - yyj20++ - if yyhl20 { - yyb20 = yyj20 > l + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l } else { - yyb20 = r.CheckBreak() + yyb13 = r.CheckBreak() } - if yyb20 { + if yyb13 { break } r.ReadArrayElem() - z.DecStructFieldNotFound(yyj20-1, "") + z.DecStructFieldNotFound(yyj13-1, "") } r.ReadArrayEnd() } func (x Nodes) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer7612 + var h codecSelfer6628 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r if x == nil { r.EncodeNil() } else { - yym1 := z.EncBinary() - _ = yym1 if false { - } else if z.HasExtensions() && z.EncExt(x) { + } else if yyxt1 := z.Extension(z.I2Rtid(x)); yyxt1 != nil { + z.EncExtension(x, yyxt1) } else { h.encNodes((Nodes)(x), e) } @@ -2790,34 +2283,32 @@ func (x Nodes) CodecEncodeSelf(e *codec1978.Encoder) { } func (x *Nodes) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer7612 + var h codecSelfer6628 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 if false { - } else if z.HasExtensions() && z.DecExt(x) { + } else if yyxt1 := z.Extension(z.I2Rtid(x)); yyxt1 != nil { + z.DecExtension(x, yyxt1) } else { h.decNodes((*Nodes)(x), d) } } func (x *httpKeysAPI) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer7612 + var h codecSelfer6628 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r if x == nil { r.EncodeNil() } else { - yym1 := z.EncBinary() - _ = yym1 if false { - } else if z.HasExtensions() && z.EncExt(x) { + } else if yyxt1 := z.Extension(z.I2Rtid(x)); yyxt1 != nil { + z.EncExtension(x, yyxt1) } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray _, _ = yysep2, yy2arr2 - const yyr2 bool = false + const yyr2 bool = false // struct tag has 'toArray' if yyr2 || yy2arr2 { r.WriteArrayStart(0) } else { @@ -2833,23 +2324,22 @@ func (x *httpKeysAPI) CodecEncodeSelf(e *codec1978.Encoder) { } func (x *httpKeysAPI) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer7612 + var h codecSelfer6628 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 if false { - } else if z.HasExtensions() && z.DecExt(x) { + } else if yyxt1 := z.Extension(z.I2Rtid(x)); yyxt1 != nil { + z.DecExtension(x, yyxt1) } else { yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap7612 { + if yyct2 == codecSelferValueTypeMap6628 { yyl2 := r.ReadMapStart() if yyl2 == 0 { r.ReadMapEnd() } else { x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct2 == codecSelferValueTypeArray7612 { + } else if yyct2 == codecSelferValueTypeArray6628 { yyl2 := r.ReadArrayStart() if yyl2 == 0 { r.ReadArrayEnd() @@ -2857,17 +2347,15 @@ func (x *httpKeysAPI) CodecDecodeSelf(d *codec1978.Decoder) { x.codecDecodeSelfFromArray(yyl2, d) } } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) + panic(errCodecSelferOnlyMapOrArrayEncodeToStruct6628) } } } func (x *httpKeysAPI) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer7612 + var h codecSelfer6628 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc var yyhl3 bool = l >= 0 for yyj3 := 0; ; yyj3++ { if yyhl3 { @@ -2880,8 +2368,7 @@ func (x *httpKeysAPI) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } r.ReadMapElemKey() - yys3Slc = r.DecodeStringAsBytes() - yys3 := string(yys3Slc) + yys3 := z.StringView(r.DecodeStringAsBytes()) r.ReadMapElemValue() switch yys3 { default: @@ -2892,7 +2379,7 @@ func (x *httpKeysAPI) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } func (x *httpKeysAPI) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer7612 + var h codecSelfer6628 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r var yyj4 int @@ -2915,21 +2402,20 @@ func (x *httpKeysAPI) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } func (x *httpWatcher) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer7612 + var h codecSelfer6628 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r if x == nil { r.EncodeNil() } else { - yym1 := z.EncBinary() - _ = yym1 if false { - } else if z.HasExtensions() && z.EncExt(x) { + } else if yyxt1 := z.Extension(z.I2Rtid(x)); yyxt1 != nil { + z.EncExtension(x, yyxt1) } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray _, _ = yysep2, yy2arr2 - const yyr2 bool = false + const yyr2 bool = false // struct tag has 'toArray' if yyr2 || yy2arr2 { r.WriteArrayStart(0) } else { @@ -2945,23 +2431,22 @@ func (x *httpWatcher) CodecEncodeSelf(e *codec1978.Encoder) { } func (x *httpWatcher) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer7612 + var h codecSelfer6628 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 if false { - } else if z.HasExtensions() && z.DecExt(x) { + } else if yyxt1 := z.Extension(z.I2Rtid(x)); yyxt1 != nil { + z.DecExtension(x, yyxt1) } else { yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap7612 { + if yyct2 == codecSelferValueTypeMap6628 { yyl2 := r.ReadMapStart() if yyl2 == 0 { r.ReadMapEnd() } else { x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct2 == codecSelferValueTypeArray7612 { + } else if yyct2 == codecSelferValueTypeArray6628 { yyl2 := r.ReadArrayStart() if yyl2 == 0 { r.ReadArrayEnd() @@ -2969,17 +2454,15 @@ func (x *httpWatcher) CodecDecodeSelf(d *codec1978.Decoder) { x.codecDecodeSelfFromArray(yyl2, d) } } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) + panic(errCodecSelferOnlyMapOrArrayEncodeToStruct6628) } } } func (x *httpWatcher) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer7612 + var h codecSelfer6628 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc var yyhl3 bool = l >= 0 for yyj3 := 0; ; yyj3++ { if yyhl3 { @@ -2992,8 +2475,7 @@ func (x *httpWatcher) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } r.ReadMapElemKey() - yys3Slc = r.DecodeStringAsBytes() - yys3 := string(yys3Slc) + yys3 := z.StringView(r.DecodeStringAsBytes()) r.ReadMapElemValue() switch yys3 { default: @@ -3004,7 +2486,7 @@ func (x *httpWatcher) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } func (x *httpWatcher) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer7612 + var h codecSelfer6628 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r var yyj4 int @@ -3027,21 +2509,20 @@ func (x *httpWatcher) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } func (x *getAction) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer7612 + var h codecSelfer6628 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r if x == nil { r.EncodeNil() } else { - yym1 := z.EncBinary() - _ = yym1 if false { - } else if z.HasExtensions() && z.EncExt(x) { + } else if yyxt1 := z.Extension(z.I2Rtid(x)); yyxt1 != nil { + z.EncExtension(x, yyxt1) } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray _, _ = yysep2, yy2arr2 - const yyr2 bool = false + const yyr2 bool = false // struct tag has 'toArray' if yyr2 || yy2arr2 { r.WriteArrayStart(5) } else { @@ -3049,56 +2530,44 @@ func (x *getAction) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { r.WriteArrayElem() - yym4 := z.EncBinary() - _ = yym4 if false { } else { - r.EncodeString(codecSelferC_UTF87612, string(x.Prefix)) + r.EncodeString(codecSelferCcUTF86628, string(x.Prefix)) } } else { r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("Prefix")) + r.EncodeString(codecSelferCcUTF86628, `Prefix`) r.WriteMapElemValue() - yym5 := z.EncBinary() - _ = yym5 if false { } else { - r.EncodeString(codecSelferC_UTF87612, string(x.Prefix)) + r.EncodeString(codecSelferCcUTF86628, string(x.Prefix)) } } if yyr2 || yy2arr2 { r.WriteArrayElem() - yym7 := z.EncBinary() - _ = yym7 if false { } else { - r.EncodeString(codecSelferC_UTF87612, string(x.Key)) + r.EncodeString(codecSelferCcUTF86628, string(x.Key)) } } else { r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("Key")) + r.EncodeString(codecSelferCcUTF86628, `Key`) r.WriteMapElemValue() - yym8 := z.EncBinary() - _ = yym8 if false { } else { - r.EncodeString(codecSelferC_UTF87612, string(x.Key)) + r.EncodeString(codecSelferCcUTF86628, string(x.Key)) } } if yyr2 || yy2arr2 { r.WriteArrayElem() - yym10 := z.EncBinary() - _ = yym10 if false { } else { r.EncodeBool(bool(x.Recursive)) } } else { r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("Recursive")) + r.EncodeString(codecSelferCcUTF86628, `Recursive`) r.WriteMapElemValue() - yym11 := z.EncBinary() - _ = yym11 if false { } else { r.EncodeBool(bool(x.Recursive)) @@ -3106,18 +2575,14 @@ func (x *getAction) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { r.WriteArrayElem() - yym13 := z.EncBinary() - _ = yym13 if false { } else { r.EncodeBool(bool(x.Sorted)) } } else { r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("Sorted")) + r.EncodeString(codecSelferCcUTF86628, `Sorted`) r.WriteMapElemValue() - yym14 := z.EncBinary() - _ = yym14 if false { } else { r.EncodeBool(bool(x.Sorted)) @@ -3125,18 +2590,14 @@ func (x *getAction) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { r.WriteArrayElem() - yym16 := z.EncBinary() - _ = yym16 if false { } else { r.EncodeBool(bool(x.Quorum)) } } else { r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("Quorum")) + r.EncodeString(codecSelferCcUTF86628, `Quorum`) r.WriteMapElemValue() - yym17 := z.EncBinary() - _ = yym17 if false { } else { r.EncodeBool(bool(x.Quorum)) @@ -3152,23 +2613,22 @@ func (x *getAction) CodecEncodeSelf(e *codec1978.Encoder) { } func (x *getAction) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer7612 + var h codecSelfer6628 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 if false { - } else if z.HasExtensions() && z.DecExt(x) { + } else if yyxt1 := z.Extension(z.I2Rtid(x)); yyxt1 != nil { + z.DecExtension(x, yyxt1) } else { yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap7612 { + if yyct2 == codecSelferValueTypeMap6628 { yyl2 := r.ReadMapStart() if yyl2 == 0 { r.ReadMapEnd() } else { x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct2 == codecSelferValueTypeArray7612 { + } else if yyct2 == codecSelferValueTypeArray6628 { yyl2 := r.ReadArrayStart() if yyl2 == 0 { r.ReadArrayEnd() @@ -3176,17 +2636,15 @@ func (x *getAction) CodecDecodeSelf(d *codec1978.Decoder) { x.codecDecodeSelfFromArray(yyl2, d) } } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) + panic(errCodecSelferOnlyMapOrArrayEncodeToStruct6628) } } } func (x *getAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer7612 + var h codecSelfer6628 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc var yyhl3 bool = l >= 0 for yyj3 := 0; ; yyj3++ { if yyhl3 { @@ -3199,69 +2657,38 @@ func (x *getAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } r.ReadMapElemKey() - yys3Slc = r.DecodeStringAsBytes() - yys3 := string(yys3Slc) + yys3 := z.StringView(r.DecodeStringAsBytes()) r.ReadMapElemValue() switch yys3 { case "Prefix": if r.TryDecodeAsNil() { x.Prefix = "" } else { - yyv4 := &x.Prefix - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } + x.Prefix = (string)(r.DecodeString()) } case "Key": if r.TryDecodeAsNil() { x.Key = "" } else { - yyv6 := &x.Key - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } + x.Key = (string)(r.DecodeString()) } case "Recursive": if r.TryDecodeAsNil() { x.Recursive = false } else { - yyv8 := &x.Recursive - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*bool)(yyv8)) = r.DecodeBool() - } + x.Recursive = (bool)(r.DecodeBool()) } case "Sorted": if r.TryDecodeAsNil() { x.Sorted = false } else { - yyv10 := &x.Sorted - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*bool)(yyv10)) = r.DecodeBool() - } + x.Sorted = (bool)(r.DecodeBool()) } case "Quorum": if r.TryDecodeAsNil() { x.Quorum = false } else { - yyv12 := &x.Quorum - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - *((*bool)(yyv12)) = r.DecodeBool() - } + x.Quorum = (bool)(r.DecodeBool()) } default: z.DecStructFieldNotFound(-1, yys3) @@ -3271,19 +2698,19 @@ func (x *getAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } func (x *getAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer7612 + var h codecSelfer6628 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj14 int - var yyb14 bool - var yyhl14 bool = l >= 0 - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l } else { - yyb14 = r.CheckBreak() + yyb9 = r.CheckBreak() } - if yyb14 { + if yyb9 { r.ReadArrayEnd() return } @@ -3291,21 +2718,15 @@ func (x *getAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Prefix = "" } else { - yyv15 := &x.Prefix - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } + x.Prefix = (string)(r.DecodeString()) } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l } else { - yyb14 = r.CheckBreak() + yyb9 = r.CheckBreak() } - if yyb14 { + if yyb9 { r.ReadArrayEnd() return } @@ -3313,21 +2734,15 @@ func (x *getAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Key = "" } else { - yyv17 := &x.Key - yym18 := z.DecBinary() - _ = yym18 - if false { - } else { - *((*string)(yyv17)) = r.DecodeString() - } + x.Key = (string)(r.DecodeString()) } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l } else { - yyb14 = r.CheckBreak() + yyb9 = r.CheckBreak() } - if yyb14 { + if yyb9 { r.ReadArrayEnd() return } @@ -3335,21 +2750,15 @@ func (x *getAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Recursive = false } else { - yyv19 := &x.Recursive - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - *((*bool)(yyv19)) = r.DecodeBool() - } + x.Recursive = (bool)(r.DecodeBool()) } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l } else { - yyb14 = r.CheckBreak() + yyb9 = r.CheckBreak() } - if yyb14 { + if yyb9 { r.ReadArrayEnd() return } @@ -3357,21 +2766,15 @@ func (x *getAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Sorted = false } else { - yyv21 := &x.Sorted - yym22 := z.DecBinary() - _ = yym22 - if false { - } else { - *((*bool)(yyv21)) = r.DecodeBool() - } + x.Sorted = (bool)(r.DecodeBool()) } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l } else { - yyb14 = r.CheckBreak() + yyb9 = r.CheckBreak() } - if yyb14 { + if yyb9 { r.ReadArrayEnd() return } @@ -3379,46 +2782,39 @@ func (x *getAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Quorum = false } else { - yyv23 := &x.Quorum - yym24 := z.DecBinary() - _ = yym24 - if false { - } else { - *((*bool)(yyv23)) = r.DecodeBool() - } + x.Quorum = (bool)(r.DecodeBool()) } for { - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l } else { - yyb14 = r.CheckBreak() + yyb9 = r.CheckBreak() } - if yyb14 { + if yyb9 { break } r.ReadArrayElem() - z.DecStructFieldNotFound(yyj14-1, "") + z.DecStructFieldNotFound(yyj9-1, "") } r.ReadArrayEnd() } func (x *waitAction) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer7612 + var h codecSelfer6628 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r if x == nil { r.EncodeNil() } else { - yym1 := z.EncBinary() - _ = yym1 if false { - } else if z.HasExtensions() && z.EncExt(x) { + } else if yyxt1 := z.Extension(z.I2Rtid(x)); yyxt1 != nil { + z.EncExtension(x, yyxt1) } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray _, _ = yysep2, yy2arr2 - const yyr2 bool = false + const yyr2 bool = false // struct tag has 'toArray' if yyr2 || yy2arr2 { r.WriteArrayStart(4) } else { @@ -3426,56 +2822,44 @@ func (x *waitAction) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { r.WriteArrayElem() - yym4 := z.EncBinary() - _ = yym4 if false { } else { - r.EncodeString(codecSelferC_UTF87612, string(x.Prefix)) + r.EncodeString(codecSelferCcUTF86628, string(x.Prefix)) } } else { r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("Prefix")) + r.EncodeString(codecSelferCcUTF86628, `Prefix`) r.WriteMapElemValue() - yym5 := z.EncBinary() - _ = yym5 if false { } else { - r.EncodeString(codecSelferC_UTF87612, string(x.Prefix)) + r.EncodeString(codecSelferCcUTF86628, string(x.Prefix)) } } if yyr2 || yy2arr2 { r.WriteArrayElem() - yym7 := z.EncBinary() - _ = yym7 if false { } else { - r.EncodeString(codecSelferC_UTF87612, string(x.Key)) + r.EncodeString(codecSelferCcUTF86628, string(x.Key)) } } else { r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("Key")) + r.EncodeString(codecSelferCcUTF86628, `Key`) r.WriteMapElemValue() - yym8 := z.EncBinary() - _ = yym8 if false { } else { - r.EncodeString(codecSelferC_UTF87612, string(x.Key)) + r.EncodeString(codecSelferCcUTF86628, string(x.Key)) } } if yyr2 || yy2arr2 { r.WriteArrayElem() - yym10 := z.EncBinary() - _ = yym10 if false { } else { r.EncodeUint(uint64(x.WaitIndex)) } } else { r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("WaitIndex")) + r.EncodeString(codecSelferCcUTF86628, `WaitIndex`) r.WriteMapElemValue() - yym11 := z.EncBinary() - _ = yym11 if false { } else { r.EncodeUint(uint64(x.WaitIndex)) @@ -3483,18 +2867,14 @@ func (x *waitAction) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { r.WriteArrayElem() - yym13 := z.EncBinary() - _ = yym13 if false { } else { r.EncodeBool(bool(x.Recursive)) } } else { r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("Recursive")) + r.EncodeString(codecSelferCcUTF86628, `Recursive`) r.WriteMapElemValue() - yym14 := z.EncBinary() - _ = yym14 if false { } else { r.EncodeBool(bool(x.Recursive)) @@ -3510,23 +2890,22 @@ func (x *waitAction) CodecEncodeSelf(e *codec1978.Encoder) { } func (x *waitAction) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer7612 + var h codecSelfer6628 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 if false { - } else if z.HasExtensions() && z.DecExt(x) { + } else if yyxt1 := z.Extension(z.I2Rtid(x)); yyxt1 != nil { + z.DecExtension(x, yyxt1) } else { yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap7612 { + if yyct2 == codecSelferValueTypeMap6628 { yyl2 := r.ReadMapStart() if yyl2 == 0 { r.ReadMapEnd() } else { x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct2 == codecSelferValueTypeArray7612 { + } else if yyct2 == codecSelferValueTypeArray6628 { yyl2 := r.ReadArrayStart() if yyl2 == 0 { r.ReadArrayEnd() @@ -3534,17 +2913,15 @@ func (x *waitAction) CodecDecodeSelf(d *codec1978.Decoder) { x.codecDecodeSelfFromArray(yyl2, d) } } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) + panic(errCodecSelferOnlyMapOrArrayEncodeToStruct6628) } } } func (x *waitAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer7612 + var h codecSelfer6628 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc var yyhl3 bool = l >= 0 for yyj3 := 0; ; yyj3++ { if yyhl3 { @@ -3557,57 +2934,32 @@ func (x *waitAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } r.ReadMapElemKey() - yys3Slc = r.DecodeStringAsBytes() - yys3 := string(yys3Slc) + yys3 := z.StringView(r.DecodeStringAsBytes()) r.ReadMapElemValue() switch yys3 { case "Prefix": if r.TryDecodeAsNil() { x.Prefix = "" } else { - yyv4 := &x.Prefix - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } + x.Prefix = (string)(r.DecodeString()) } case "Key": if r.TryDecodeAsNil() { x.Key = "" } else { - yyv6 := &x.Key - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } + x.Key = (string)(r.DecodeString()) } case "WaitIndex": if r.TryDecodeAsNil() { x.WaitIndex = 0 } else { - yyv8 := &x.WaitIndex - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*uint64)(yyv8)) = uint64(r.DecodeUint(64)) - } + x.WaitIndex = (uint64)(r.DecodeUint64()) } case "Recursive": if r.TryDecodeAsNil() { x.Recursive = false } else { - yyv10 := &x.Recursive - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*bool)(yyv10)) = r.DecodeBool() - } + x.Recursive = (bool)(r.DecodeBool()) } default: z.DecStructFieldNotFound(-1, yys3) @@ -3617,19 +2969,19 @@ func (x *waitAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } func (x *waitAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer7612 + var h codecSelfer6628 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb12 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb12 { + if yyb8 { r.ReadArrayEnd() return } @@ -3637,21 +2989,15 @@ func (x *waitAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Prefix = "" } else { - yyv13 := &x.Prefix - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } + x.Prefix = (string)(r.DecodeString()) } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb12 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb12 { + if yyb8 { r.ReadArrayEnd() return } @@ -3659,21 +3005,15 @@ func (x *waitAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Key = "" } else { - yyv15 := &x.Key - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } + x.Key = (string)(r.DecodeString()) } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb12 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb12 { + if yyb8 { r.ReadArrayEnd() return } @@ -3681,21 +3021,15 @@ func (x *waitAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.WaitIndex = 0 } else { - yyv17 := &x.WaitIndex - yym18 := z.DecBinary() - _ = yym18 - if false { - } else { - *((*uint64)(yyv17)) = uint64(r.DecodeUint(64)) - } + x.WaitIndex = (uint64)(r.DecodeUint64()) } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb12 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb12 { + if yyb8 { r.ReadArrayEnd() return } @@ -3703,46 +3037,39 @@ func (x *waitAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Recursive = false } else { - yyv19 := &x.Recursive - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - *((*bool)(yyv19)) = r.DecodeBool() - } + x.Recursive = (bool)(r.DecodeBool()) } for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l } else { - yyb12 = r.CheckBreak() + yyb8 = r.CheckBreak() } - if yyb12 { + if yyb8 { break } r.ReadArrayElem() - z.DecStructFieldNotFound(yyj12-1, "") + z.DecStructFieldNotFound(yyj8-1, "") } r.ReadArrayEnd() } func (x *setAction) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer7612 + var h codecSelfer6628 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r if x == nil { r.EncodeNil() } else { - yym1 := z.EncBinary() - _ = yym1 if false { - } else if z.HasExtensions() && z.EncExt(x) { + } else if yyxt1 := z.Extension(z.I2Rtid(x)); yyxt1 != nil { + z.EncExtension(x, yyxt1) } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray _, _ = yysep2, yy2arr2 - const yyr2 bool = false + const yyr2 bool = false // struct tag has 'toArray' if yyr2 || yy2arr2 { r.WriteArrayStart(10) } else { @@ -3750,94 +3077,74 @@ func (x *setAction) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { r.WriteArrayElem() - yym4 := z.EncBinary() - _ = yym4 if false { } else { - r.EncodeString(codecSelferC_UTF87612, string(x.Prefix)) + r.EncodeString(codecSelferCcUTF86628, string(x.Prefix)) } } else { r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("Prefix")) + r.EncodeString(codecSelferCcUTF86628, `Prefix`) r.WriteMapElemValue() - yym5 := z.EncBinary() - _ = yym5 if false { } else { - r.EncodeString(codecSelferC_UTF87612, string(x.Prefix)) + r.EncodeString(codecSelferCcUTF86628, string(x.Prefix)) } } if yyr2 || yy2arr2 { r.WriteArrayElem() - yym7 := z.EncBinary() - _ = yym7 if false { } else { - r.EncodeString(codecSelferC_UTF87612, string(x.Key)) + r.EncodeString(codecSelferCcUTF86628, string(x.Key)) } } else { r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("Key")) + r.EncodeString(codecSelferCcUTF86628, `Key`) r.WriteMapElemValue() - yym8 := z.EncBinary() - _ = yym8 if false { } else { - r.EncodeString(codecSelferC_UTF87612, string(x.Key)) + r.EncodeString(codecSelferCcUTF86628, string(x.Key)) } } if yyr2 || yy2arr2 { r.WriteArrayElem() - yym10 := z.EncBinary() - _ = yym10 if false { } else { - r.EncodeString(codecSelferC_UTF87612, string(x.Value)) + r.EncodeString(codecSelferCcUTF86628, string(x.Value)) } } else { r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("Value")) + r.EncodeString(codecSelferCcUTF86628, `Value`) r.WriteMapElemValue() - yym11 := z.EncBinary() - _ = yym11 if false { } else { - r.EncodeString(codecSelferC_UTF87612, string(x.Value)) + r.EncodeString(codecSelferCcUTF86628, string(x.Value)) } } if yyr2 || yy2arr2 { r.WriteArrayElem() - yym13 := z.EncBinary() - _ = yym13 if false { } else { - r.EncodeString(codecSelferC_UTF87612, string(x.PrevValue)) + r.EncodeString(codecSelferCcUTF86628, string(x.PrevValue)) } } else { r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("PrevValue")) + r.EncodeString(codecSelferCcUTF86628, `PrevValue`) r.WriteMapElemValue() - yym14 := z.EncBinary() - _ = yym14 if false { } else { - r.EncodeString(codecSelferC_UTF87612, string(x.PrevValue)) + r.EncodeString(codecSelferCcUTF86628, string(x.PrevValue)) } } if yyr2 || yy2arr2 { r.WriteArrayElem() - yym16 := z.EncBinary() - _ = yym16 if false { } else { r.EncodeUint(uint64(x.PrevIndex)) } } else { r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("PrevIndex")) + r.EncodeString(codecSelferCcUTF86628, `PrevIndex`) r.WriteMapElemValue() - yym17 := z.EncBinary() - _ = yym17 if false { } else { r.EncodeUint(uint64(x.PrevIndex)) @@ -3848,45 +3155,39 @@ func (x *setAction) CodecEncodeSelf(e *codec1978.Encoder) { x.PrevExist.CodecEncodeSelf(e) } else { r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("PrevExist")) + r.EncodeString(codecSelferCcUTF86628, `PrevExist`) r.WriteMapElemValue() x.PrevExist.CodecEncodeSelf(e) } if yyr2 || yy2arr2 { r.WriteArrayElem() - yym22 := z.EncBinary() - _ = yym22 if false { - } else if z.HasExtensions() && z.EncExt(x.TTL) { + } else if yyxt22 := z.Extension(z.I2Rtid(x.TTL)); yyxt22 != nil { + z.EncExtension(x.TTL, yyxt22) } else { r.EncodeInt(int64(x.TTL)) } } else { r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("TTL")) + r.EncodeString(codecSelferCcUTF86628, `TTL`) r.WriteMapElemValue() - yym23 := z.EncBinary() - _ = yym23 if false { - } else if z.HasExtensions() && z.EncExt(x.TTL) { + } else if yyxt23 := z.Extension(z.I2Rtid(x.TTL)); yyxt23 != nil { + z.EncExtension(x.TTL, yyxt23) } else { r.EncodeInt(int64(x.TTL)) } } if yyr2 || yy2arr2 { r.WriteArrayElem() - yym25 := z.EncBinary() - _ = yym25 if false { } else { r.EncodeBool(bool(x.Refresh)) } } else { r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("Refresh")) + r.EncodeString(codecSelferCcUTF86628, `Refresh`) r.WriteMapElemValue() - yym26 := z.EncBinary() - _ = yym26 if false { } else { r.EncodeBool(bool(x.Refresh)) @@ -3894,18 +3195,14 @@ func (x *setAction) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { r.WriteArrayElem() - yym28 := z.EncBinary() - _ = yym28 if false { } else { r.EncodeBool(bool(x.Dir)) } } else { r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("Dir")) + r.EncodeString(codecSelferCcUTF86628, `Dir`) r.WriteMapElemValue() - yym29 := z.EncBinary() - _ = yym29 if false { } else { r.EncodeBool(bool(x.Dir)) @@ -3913,18 +3210,14 @@ func (x *setAction) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { r.WriteArrayElem() - yym31 := z.EncBinary() - _ = yym31 if false { } else { r.EncodeBool(bool(x.NoValueOnSuccess)) } } else { r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("NoValueOnSuccess")) + r.EncodeString(codecSelferCcUTF86628, `NoValueOnSuccess`) r.WriteMapElemValue() - yym32 := z.EncBinary() - _ = yym32 if false { } else { r.EncodeBool(bool(x.NoValueOnSuccess)) @@ -3940,23 +3233,22 @@ func (x *setAction) CodecEncodeSelf(e *codec1978.Encoder) { } func (x *setAction) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer7612 + var h codecSelfer6628 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 if false { - } else if z.HasExtensions() && z.DecExt(x) { + } else if yyxt1 := z.Extension(z.I2Rtid(x)); yyxt1 != nil { + z.DecExtension(x, yyxt1) } else { yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap7612 { + if yyct2 == codecSelferValueTypeMap6628 { yyl2 := r.ReadMapStart() if yyl2 == 0 { r.ReadMapEnd() } else { x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct2 == codecSelferValueTypeArray7612 { + } else if yyct2 == codecSelferValueTypeArray6628 { yyl2 := r.ReadArrayStart() if yyl2 == 0 { r.ReadArrayEnd() @@ -3964,17 +3256,15 @@ func (x *setAction) CodecDecodeSelf(d *codec1978.Decoder) { x.codecDecodeSelfFromArray(yyl2, d) } } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) + panic(errCodecSelferOnlyMapOrArrayEncodeToStruct6628) } } } func (x *setAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer7612 + var h codecSelfer6628 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc var yyhl3 bool = l >= 0 for yyj3 := 0; ; yyj3++ { if yyhl3 { @@ -3987,125 +3277,73 @@ func (x *setAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } r.ReadMapElemKey() - yys3Slc = r.DecodeStringAsBytes() - yys3 := string(yys3Slc) + yys3 := z.StringView(r.DecodeStringAsBytes()) r.ReadMapElemValue() switch yys3 { case "Prefix": if r.TryDecodeAsNil() { x.Prefix = "" } else { - yyv4 := &x.Prefix - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } + x.Prefix = (string)(r.DecodeString()) } case "Key": if r.TryDecodeAsNil() { x.Key = "" } else { - yyv6 := &x.Key - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } + x.Key = (string)(r.DecodeString()) } case "Value": if r.TryDecodeAsNil() { x.Value = "" } else { - yyv8 := &x.Value - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*string)(yyv8)) = r.DecodeString() - } + x.Value = (string)(r.DecodeString()) } case "PrevValue": if r.TryDecodeAsNil() { x.PrevValue = "" } else { - yyv10 := &x.PrevValue - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*string)(yyv10)) = r.DecodeString() - } + x.PrevValue = (string)(r.DecodeString()) } case "PrevIndex": if r.TryDecodeAsNil() { x.PrevIndex = 0 } else { - yyv12 := &x.PrevIndex - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - *((*uint64)(yyv12)) = uint64(r.DecodeUint(64)) - } + x.PrevIndex = (uint64)(r.DecodeUint64()) } case "PrevExist": if r.TryDecodeAsNil() { x.PrevExist = "" } else { - yyv14 := &x.PrevExist - yyv14.CodecDecodeSelf(d) + x.PrevExist.CodecDecodeSelf(d) } case "TTL": if r.TryDecodeAsNil() { x.TTL = 0 } else { - yyv15 := &x.TTL - yym16 := z.DecBinary() - _ = yym16 if false { - } else if z.HasExtensions() && z.DecExt(yyv15) { + } else if yyxt11 := z.Extension(z.I2Rtid(x.TTL)); yyxt11 != nil { + z.DecExtension(x.TTL, yyxt11) } else { - *((*int64)(yyv15)) = int64(r.DecodeInt(64)) + x.TTL = (time.Duration)(r.DecodeInt64()) } } case "Refresh": if r.TryDecodeAsNil() { x.Refresh = false } else { - yyv17 := &x.Refresh - yym18 := z.DecBinary() - _ = yym18 - if false { - } else { - *((*bool)(yyv17)) = r.DecodeBool() - } + x.Refresh = (bool)(r.DecodeBool()) } case "Dir": if r.TryDecodeAsNil() { x.Dir = false } else { - yyv19 := &x.Dir - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - *((*bool)(yyv19)) = r.DecodeBool() - } + x.Dir = (bool)(r.DecodeBool()) } case "NoValueOnSuccess": if r.TryDecodeAsNil() { x.NoValueOnSuccess = false } else { - yyv21 := &x.NoValueOnSuccess - yym22 := z.DecBinary() - _ = yym22 - if false { - } else { - *((*bool)(yyv21)) = r.DecodeBool() - } + x.NoValueOnSuccess = (bool)(r.DecodeBool()) } default: z.DecStructFieldNotFound(-1, yys3) @@ -4115,19 +3353,19 @@ func (x *setAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } func (x *setAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer7612 + var h codecSelfer6628 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj23 int - var yyb23 bool - var yyhl23 bool = l >= 0 - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l + var yyj15 int + var yyb15 bool + var yyhl15 bool = l >= 0 + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l } else { - yyb23 = r.CheckBreak() + yyb15 = r.CheckBreak() } - if yyb23 { + if yyb15 { r.ReadArrayEnd() return } @@ -4135,21 +3373,15 @@ func (x *setAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Prefix = "" } else { - yyv24 := &x.Prefix - yym25 := z.DecBinary() - _ = yym25 - if false { - } else { - *((*string)(yyv24)) = r.DecodeString() - } + x.Prefix = (string)(r.DecodeString()) } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l } else { - yyb23 = r.CheckBreak() + yyb15 = r.CheckBreak() } - if yyb23 { + if yyb15 { r.ReadArrayEnd() return } @@ -4157,21 +3389,15 @@ func (x *setAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Key = "" } else { - yyv26 := &x.Key - yym27 := z.DecBinary() - _ = yym27 - if false { - } else { - *((*string)(yyv26)) = r.DecodeString() - } + x.Key = (string)(r.DecodeString()) } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l } else { - yyb23 = r.CheckBreak() + yyb15 = r.CheckBreak() } - if yyb23 { + if yyb15 { r.ReadArrayEnd() return } @@ -4179,21 +3405,15 @@ func (x *setAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Value = "" } else { - yyv28 := &x.Value - yym29 := z.DecBinary() - _ = yym29 - if false { - } else { - *((*string)(yyv28)) = r.DecodeString() - } + x.Value = (string)(r.DecodeString()) } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l } else { - yyb23 = r.CheckBreak() + yyb15 = r.CheckBreak() } - if yyb23 { + if yyb15 { r.ReadArrayEnd() return } @@ -4201,21 +3421,15 @@ func (x *setAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.PrevValue = "" } else { - yyv30 := &x.PrevValue - yym31 := z.DecBinary() - _ = yym31 - if false { - } else { - *((*string)(yyv30)) = r.DecodeString() - } + x.PrevValue = (string)(r.DecodeString()) } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l } else { - yyb23 = r.CheckBreak() + yyb15 = r.CheckBreak() } - if yyb23 { + if yyb15 { r.ReadArrayEnd() return } @@ -4223,21 +3437,15 @@ func (x *setAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.PrevIndex = 0 } else { - yyv32 := &x.PrevIndex - yym33 := z.DecBinary() - _ = yym33 - if false { - } else { - *((*uint64)(yyv32)) = uint64(r.DecodeUint(64)) - } + x.PrevIndex = (uint64)(r.DecodeUint64()) } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l } else { - yyb23 = r.CheckBreak() + yyb15 = r.CheckBreak() } - if yyb23 { + if yyb15 { r.ReadArrayEnd() return } @@ -4245,16 +3453,15 @@ func (x *setAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.PrevExist = "" } else { - yyv34 := &x.PrevExist - yyv34.CodecDecodeSelf(d) + x.PrevExist.CodecDecodeSelf(d) } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l } else { - yyb23 = r.CheckBreak() + yyb15 = r.CheckBreak() } - if yyb23 { + if yyb15 { r.ReadArrayEnd() return } @@ -4262,22 +3469,20 @@ func (x *setAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.TTL = 0 } else { - yyv35 := &x.TTL - yym36 := z.DecBinary() - _ = yym36 if false { - } else if z.HasExtensions() && z.DecExt(yyv35) { + } else if yyxt23 := z.Extension(z.I2Rtid(x.TTL)); yyxt23 != nil { + z.DecExtension(x.TTL, yyxt23) } else { - *((*int64)(yyv35)) = int64(r.DecodeInt(64)) + x.TTL = (time.Duration)(r.DecodeInt64()) } } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l } else { - yyb23 = r.CheckBreak() + yyb15 = r.CheckBreak() } - if yyb23 { + if yyb15 { r.ReadArrayEnd() return } @@ -4285,21 +3490,15 @@ func (x *setAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Refresh = false } else { - yyv37 := &x.Refresh - yym38 := z.DecBinary() - _ = yym38 - if false { - } else { - *((*bool)(yyv37)) = r.DecodeBool() - } + x.Refresh = (bool)(r.DecodeBool()) } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l } else { - yyb23 = r.CheckBreak() + yyb15 = r.CheckBreak() } - if yyb23 { + if yyb15 { r.ReadArrayEnd() return } @@ -4307,21 +3506,15 @@ func (x *setAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Dir = false } else { - yyv39 := &x.Dir - yym40 := z.DecBinary() - _ = yym40 - if false { - } else { - *((*bool)(yyv39)) = r.DecodeBool() - } + x.Dir = (bool)(r.DecodeBool()) } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l } else { - yyb23 = r.CheckBreak() + yyb15 = r.CheckBreak() } - if yyb23 { + if yyb15 { r.ReadArrayEnd() return } @@ -4329,46 +3522,39 @@ func (x *setAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.NoValueOnSuccess = false } else { - yyv41 := &x.NoValueOnSuccess - yym42 := z.DecBinary() - _ = yym42 - if false { - } else { - *((*bool)(yyv41)) = r.DecodeBool() - } + x.NoValueOnSuccess = (bool)(r.DecodeBool()) } for { - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l } else { - yyb23 = r.CheckBreak() + yyb15 = r.CheckBreak() } - if yyb23 { + if yyb15 { break } r.ReadArrayElem() - z.DecStructFieldNotFound(yyj23-1, "") + z.DecStructFieldNotFound(yyj15-1, "") } r.ReadArrayEnd() } func (x *deleteAction) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer7612 + var h codecSelfer6628 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r if x == nil { r.EncodeNil() } else { - yym1 := z.EncBinary() - _ = yym1 if false { - } else if z.HasExtensions() && z.EncExt(x) { + } else if yyxt1 := z.Extension(z.I2Rtid(x)); yyxt1 != nil { + z.EncExtension(x, yyxt1) } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray _, _ = yysep2, yy2arr2 - const yyr2 bool = false + const yyr2 bool = false // struct tag has 'toArray' if yyr2 || yy2arr2 { r.WriteArrayStart(6) } else { @@ -4376,75 +3562,59 @@ func (x *deleteAction) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { r.WriteArrayElem() - yym4 := z.EncBinary() - _ = yym4 if false { } else { - r.EncodeString(codecSelferC_UTF87612, string(x.Prefix)) + r.EncodeString(codecSelferCcUTF86628, string(x.Prefix)) } } else { r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("Prefix")) + r.EncodeString(codecSelferCcUTF86628, `Prefix`) r.WriteMapElemValue() - yym5 := z.EncBinary() - _ = yym5 if false { } else { - r.EncodeString(codecSelferC_UTF87612, string(x.Prefix)) + r.EncodeString(codecSelferCcUTF86628, string(x.Prefix)) } } if yyr2 || yy2arr2 { r.WriteArrayElem() - yym7 := z.EncBinary() - _ = yym7 if false { } else { - r.EncodeString(codecSelferC_UTF87612, string(x.Key)) + r.EncodeString(codecSelferCcUTF86628, string(x.Key)) } } else { r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("Key")) + r.EncodeString(codecSelferCcUTF86628, `Key`) r.WriteMapElemValue() - yym8 := z.EncBinary() - _ = yym8 if false { } else { - r.EncodeString(codecSelferC_UTF87612, string(x.Key)) + r.EncodeString(codecSelferCcUTF86628, string(x.Key)) } } if yyr2 || yy2arr2 { r.WriteArrayElem() - yym10 := z.EncBinary() - _ = yym10 if false { } else { - r.EncodeString(codecSelferC_UTF87612, string(x.PrevValue)) + r.EncodeString(codecSelferCcUTF86628, string(x.PrevValue)) } } else { r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("PrevValue")) + r.EncodeString(codecSelferCcUTF86628, `PrevValue`) r.WriteMapElemValue() - yym11 := z.EncBinary() - _ = yym11 if false { } else { - r.EncodeString(codecSelferC_UTF87612, string(x.PrevValue)) + r.EncodeString(codecSelferCcUTF86628, string(x.PrevValue)) } } if yyr2 || yy2arr2 { r.WriteArrayElem() - yym13 := z.EncBinary() - _ = yym13 if false { } else { r.EncodeUint(uint64(x.PrevIndex)) } } else { r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("PrevIndex")) + r.EncodeString(codecSelferCcUTF86628, `PrevIndex`) r.WriteMapElemValue() - yym14 := z.EncBinary() - _ = yym14 if false { } else { r.EncodeUint(uint64(x.PrevIndex)) @@ -4452,18 +3622,14 @@ func (x *deleteAction) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { r.WriteArrayElem() - yym16 := z.EncBinary() - _ = yym16 if false { } else { r.EncodeBool(bool(x.Dir)) } } else { r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("Dir")) + r.EncodeString(codecSelferCcUTF86628, `Dir`) r.WriteMapElemValue() - yym17 := z.EncBinary() - _ = yym17 if false { } else { r.EncodeBool(bool(x.Dir)) @@ -4471,18 +3637,14 @@ func (x *deleteAction) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { r.WriteArrayElem() - yym19 := z.EncBinary() - _ = yym19 if false { } else { r.EncodeBool(bool(x.Recursive)) } } else { r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("Recursive")) + r.EncodeString(codecSelferCcUTF86628, `Recursive`) r.WriteMapElemValue() - yym20 := z.EncBinary() - _ = yym20 if false { } else { r.EncodeBool(bool(x.Recursive)) @@ -4498,23 +3660,22 @@ func (x *deleteAction) CodecEncodeSelf(e *codec1978.Encoder) { } func (x *deleteAction) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer7612 + var h codecSelfer6628 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 if false { - } else if z.HasExtensions() && z.DecExt(x) { + } else if yyxt1 := z.Extension(z.I2Rtid(x)); yyxt1 != nil { + z.DecExtension(x, yyxt1) } else { yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap7612 { + if yyct2 == codecSelferValueTypeMap6628 { yyl2 := r.ReadMapStart() if yyl2 == 0 { r.ReadMapEnd() } else { x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct2 == codecSelferValueTypeArray7612 { + } else if yyct2 == codecSelferValueTypeArray6628 { yyl2 := r.ReadArrayStart() if yyl2 == 0 { r.ReadArrayEnd() @@ -4522,17 +3683,15 @@ func (x *deleteAction) CodecDecodeSelf(d *codec1978.Decoder) { x.codecDecodeSelfFromArray(yyl2, d) } } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) + panic(errCodecSelferOnlyMapOrArrayEncodeToStruct6628) } } } func (x *deleteAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer7612 + var h codecSelfer6628 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc var yyhl3 bool = l >= 0 for yyj3 := 0; ; yyj3++ { if yyhl3 { @@ -4545,81 +3704,44 @@ func (x *deleteAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } r.ReadMapElemKey() - yys3Slc = r.DecodeStringAsBytes() - yys3 := string(yys3Slc) + yys3 := z.StringView(r.DecodeStringAsBytes()) r.ReadMapElemValue() switch yys3 { case "Prefix": if r.TryDecodeAsNil() { x.Prefix = "" } else { - yyv4 := &x.Prefix - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } + x.Prefix = (string)(r.DecodeString()) } case "Key": if r.TryDecodeAsNil() { x.Key = "" } else { - yyv6 := &x.Key - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } + x.Key = (string)(r.DecodeString()) } case "PrevValue": if r.TryDecodeAsNil() { x.PrevValue = "" } else { - yyv8 := &x.PrevValue - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*string)(yyv8)) = r.DecodeString() - } + x.PrevValue = (string)(r.DecodeString()) } case "PrevIndex": if r.TryDecodeAsNil() { x.PrevIndex = 0 } else { - yyv10 := &x.PrevIndex - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*uint64)(yyv10)) = uint64(r.DecodeUint(64)) - } + x.PrevIndex = (uint64)(r.DecodeUint64()) } case "Dir": if r.TryDecodeAsNil() { x.Dir = false } else { - yyv12 := &x.Dir - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - *((*bool)(yyv12)) = r.DecodeBool() - } + x.Dir = (bool)(r.DecodeBool()) } case "Recursive": if r.TryDecodeAsNil() { x.Recursive = false } else { - yyv14 := &x.Recursive - yym15 := z.DecBinary() - _ = yym15 - if false { - } else { - *((*bool)(yyv14)) = r.DecodeBool() - } + x.Recursive = (bool)(r.DecodeBool()) } default: z.DecStructFieldNotFound(-1, yys3) @@ -4629,19 +3751,19 @@ func (x *deleteAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } func (x *deleteAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer7612 + var h codecSelfer6628 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj16 int - var yyb16 bool - var yyhl16 bool = l >= 0 - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb16 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb16 { + if yyb10 { r.ReadArrayEnd() return } @@ -4649,21 +3771,15 @@ func (x *deleteAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Prefix = "" } else { - yyv17 := &x.Prefix - yym18 := z.DecBinary() - _ = yym18 - if false { - } else { - *((*string)(yyv17)) = r.DecodeString() - } + x.Prefix = (string)(r.DecodeString()) } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb16 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb16 { + if yyb10 { r.ReadArrayEnd() return } @@ -4671,21 +3787,15 @@ func (x *deleteAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Key = "" } else { - yyv19 := &x.Key - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - *((*string)(yyv19)) = r.DecodeString() - } + x.Key = (string)(r.DecodeString()) } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb16 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb16 { + if yyb10 { r.ReadArrayEnd() return } @@ -4693,21 +3803,15 @@ func (x *deleteAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.PrevValue = "" } else { - yyv21 := &x.PrevValue - yym22 := z.DecBinary() - _ = yym22 - if false { - } else { - *((*string)(yyv21)) = r.DecodeString() - } + x.PrevValue = (string)(r.DecodeString()) } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb16 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb16 { + if yyb10 { r.ReadArrayEnd() return } @@ -4715,21 +3819,15 @@ func (x *deleteAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.PrevIndex = 0 } else { - yyv23 := &x.PrevIndex - yym24 := z.DecBinary() - _ = yym24 - if false { - } else { - *((*uint64)(yyv23)) = uint64(r.DecodeUint(64)) - } + x.PrevIndex = (uint64)(r.DecodeUint64()) } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb16 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb16 { + if yyb10 { r.ReadArrayEnd() return } @@ -4737,21 +3835,15 @@ func (x *deleteAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Dir = false } else { - yyv25 := &x.Dir - yym26 := z.DecBinary() - _ = yym26 - if false { - } else { - *((*bool)(yyv25)) = r.DecodeBool() - } + x.Dir = (bool)(r.DecodeBool()) } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb16 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb16 { + if yyb10 { r.ReadArrayEnd() return } @@ -4759,46 +3851,39 @@ func (x *deleteAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if r.TryDecodeAsNil() { x.Recursive = false } else { - yyv27 := &x.Recursive - yym28 := z.DecBinary() - _ = yym28 - if false { - } else { - *((*bool)(yyv27)) = r.DecodeBool() - } + x.Recursive = (bool)(r.DecodeBool()) } for { - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l } else { - yyb16 = r.CheckBreak() + yyb10 = r.CheckBreak() } - if yyb16 { + if yyb10 { break } r.ReadArrayElem() - z.DecStructFieldNotFound(yyj16-1, "") + z.DecStructFieldNotFound(yyj10-1, "") } r.ReadArrayEnd() } func (x *createInOrderAction) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer7612 + var h codecSelfer6628 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r if x == nil { r.EncodeNil() } else { - yym1 := z.EncBinary() - _ = yym1 if false { - } else if z.HasExtensions() && z.EncExt(x) { + } else if yyxt1 := z.Extension(z.I2Rtid(x)); yyxt1 != nil { + z.EncExtension(x, yyxt1) } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray _, _ = yysep2, yy2arr2 - const yyr2 bool = false + const yyr2 bool = false // struct tag has 'toArray' if yyr2 || yy2arr2 { r.WriteArrayStart(4) } else { @@ -4806,78 +3891,64 @@ func (x *createInOrderAction) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { r.WriteArrayElem() - yym4 := z.EncBinary() - _ = yym4 if false { } else { - r.EncodeString(codecSelferC_UTF87612, string(x.Prefix)) + r.EncodeString(codecSelferCcUTF86628, string(x.Prefix)) } } else { r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("Prefix")) + r.EncodeString(codecSelferCcUTF86628, `Prefix`) r.WriteMapElemValue() - yym5 := z.EncBinary() - _ = yym5 if false { } else { - r.EncodeString(codecSelferC_UTF87612, string(x.Prefix)) + r.EncodeString(codecSelferCcUTF86628, string(x.Prefix)) } } if yyr2 || yy2arr2 { r.WriteArrayElem() - yym7 := z.EncBinary() - _ = yym7 if false { } else { - r.EncodeString(codecSelferC_UTF87612, string(x.Dir)) + r.EncodeString(codecSelferCcUTF86628, string(x.Dir)) } } else { r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("Dir")) + r.EncodeString(codecSelferCcUTF86628, `Dir`) r.WriteMapElemValue() - yym8 := z.EncBinary() - _ = yym8 if false { } else { - r.EncodeString(codecSelferC_UTF87612, string(x.Dir)) + r.EncodeString(codecSelferCcUTF86628, string(x.Dir)) } } if yyr2 || yy2arr2 { r.WriteArrayElem() - yym10 := z.EncBinary() - _ = yym10 if false { } else { - r.EncodeString(codecSelferC_UTF87612, string(x.Value)) + r.EncodeString(codecSelferCcUTF86628, string(x.Value)) } } else { r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("Value")) + r.EncodeString(codecSelferCcUTF86628, `Value`) r.WriteMapElemValue() - yym11 := z.EncBinary() - _ = yym11 if false { } else { - r.EncodeString(codecSelferC_UTF87612, string(x.Value)) + r.EncodeString(codecSelferCcUTF86628, string(x.Value)) } } if yyr2 || yy2arr2 { r.WriteArrayElem() - yym13 := z.EncBinary() - _ = yym13 if false { - } else if z.HasExtensions() && z.EncExt(x.TTL) { + } else if yyxt13 := z.Extension(z.I2Rtid(x.TTL)); yyxt13 != nil { + z.EncExtension(x.TTL, yyxt13) } else { r.EncodeInt(int64(x.TTL)) } } else { r.WriteMapElemKey() - r.EncodeString(codecSelferC_UTF87612, string("TTL")) + r.EncodeString(codecSelferCcUTF86628, `TTL`) r.WriteMapElemValue() - yym14 := z.EncBinary() - _ = yym14 if false { - } else if z.HasExtensions() && z.EncExt(x.TTL) { + } else if yyxt14 := z.Extension(z.I2Rtid(x.TTL)); yyxt14 != nil { + z.EncExtension(x.TTL, yyxt14) } else { r.EncodeInt(int64(x.TTL)) } @@ -4892,23 +3963,22 @@ func (x *createInOrderAction) CodecEncodeSelf(e *codec1978.Encoder) { } func (x *createInOrderAction) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer7612 + var h codecSelfer6628 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 if false { - } else if z.HasExtensions() && z.DecExt(x) { + } else if yyxt1 := z.Extension(z.I2Rtid(x)); yyxt1 != nil { + z.DecExtension(x, yyxt1) } else { yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap7612 { + if yyct2 == codecSelferValueTypeMap6628 { yyl2 := r.ReadMapStart() if yyl2 == 0 { r.ReadMapEnd() } else { x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct2 == codecSelferValueTypeArray7612 { + } else if yyct2 == codecSelferValueTypeArray6628 { yyl2 := r.ReadArrayStart() if yyl2 == 0 { r.ReadArrayEnd() @@ -4916,17 +3986,15 @@ func (x *createInOrderAction) CodecDecodeSelf(d *codec1978.Decoder) { x.codecDecodeSelfFromArray(yyl2, d) } } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) + panic(errCodecSelferOnlyMapOrArrayEncodeToStruct6628) } } } func (x *createInOrderAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer7612 + var h codecSelfer6628 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc var yyhl3 bool = l >= 0 for yyj3 := 0; ; yyj3++ { if yyhl3 { @@ -4939,57 +4007,36 @@ func (x *createInOrderAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder } } r.ReadMapElemKey() - yys3Slc = r.DecodeStringAsBytes() - yys3 := string(yys3Slc) + yys3 := z.StringView(r.DecodeStringAsBytes()) r.ReadMapElemValue() switch yys3 { case "Prefix": if r.TryDecodeAsNil() { x.Prefix = "" } else { - yyv4 := &x.Prefix - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } + x.Prefix = (string)(r.DecodeString()) } case "Dir": if r.TryDecodeAsNil() { x.Dir = "" } else { - yyv6 := &x.Dir - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*string)(yyv6)) = r.DecodeString() - } + x.Dir = (string)(r.DecodeString()) } case "Value": if r.TryDecodeAsNil() { x.Value = "" } else { - yyv8 := &x.Value - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*string)(yyv8)) = r.DecodeString() - } + x.Value = (string)(r.DecodeString()) } case "TTL": if r.TryDecodeAsNil() { x.TTL = 0 } else { - yyv10 := &x.TTL - yym11 := z.DecBinary() - _ = yym11 if false { - } else if z.HasExtensions() && z.DecExt(yyv10) { + } else if yyxt8 := z.Extension(z.I2Rtid(x.TTL)); yyxt8 != nil { + z.DecExtension(x.TTL, yyxt8) } else { - *((*int64)(yyv10)) = int64(r.DecodeInt(64)) + x.TTL = (time.Duration)(r.DecodeInt64()) } } default: @@ -5000,19 +4047,19 @@ func (x *createInOrderAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder } func (x *createInOrderAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer7612 + var h codecSelfer6628 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l } else { - yyb12 = r.CheckBreak() + yyb9 = r.CheckBreak() } - if yyb12 { + if yyb9 { r.ReadArrayEnd() return } @@ -5020,21 +4067,15 @@ func (x *createInOrderAction) codecDecodeSelfFromArray(l int, d *codec1978.Decod if r.TryDecodeAsNil() { x.Prefix = "" } else { - yyv13 := &x.Prefix - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*string)(yyv13)) = r.DecodeString() - } + x.Prefix = (string)(r.DecodeString()) } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l } else { - yyb12 = r.CheckBreak() + yyb9 = r.CheckBreak() } - if yyb12 { + if yyb9 { r.ReadArrayEnd() return } @@ -5042,21 +4083,15 @@ func (x *createInOrderAction) codecDecodeSelfFromArray(l int, d *codec1978.Decod if r.TryDecodeAsNil() { x.Dir = "" } else { - yyv15 := &x.Dir - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*string)(yyv15)) = r.DecodeString() - } + x.Dir = (string)(r.DecodeString()) } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l } else { - yyb12 = r.CheckBreak() + yyb9 = r.CheckBreak() } - if yyb12 { + if yyb9 { r.ReadArrayEnd() return } @@ -5064,21 +4099,15 @@ func (x *createInOrderAction) codecDecodeSelfFromArray(l int, d *codec1978.Decod if r.TryDecodeAsNil() { x.Value = "" } else { - yyv17 := &x.Value - yym18 := z.DecBinary() - _ = yym18 - if false { - } else { - *((*string)(yyv17)) = r.DecodeString() - } + x.Value = (string)(r.DecodeString()) } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l } else { - yyb12 = r.CheckBreak() + yyb9 = r.CheckBreak() } - if yyb12 { + if yyb9 { r.ReadArrayEnd() return } @@ -5086,33 +4115,31 @@ func (x *createInOrderAction) codecDecodeSelfFromArray(l int, d *codec1978.Decod if r.TryDecodeAsNil() { x.TTL = 0 } else { - yyv19 := &x.TTL - yym20 := z.DecBinary() - _ = yym20 if false { - } else if z.HasExtensions() && z.DecExt(yyv19) { + } else if yyxt14 := z.Extension(z.I2Rtid(x.TTL)); yyxt14 != nil { + z.DecExtension(x.TTL, yyxt14) } else { - *((*int64)(yyv19)) = int64(r.DecodeInt(64)) + x.TTL = (time.Duration)(r.DecodeInt64()) } } for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l } else { - yyb12 = r.CheckBreak() + yyb9 = r.CheckBreak() } - if yyb12 { + if yyb9 { break } r.ReadArrayElem() - z.DecStructFieldNotFound(yyj12-1, "") + z.DecStructFieldNotFound(yyj9-1, "") } r.ReadArrayEnd() } -func (x codecSelfer7612) encNodes(v Nodes, e *codec1978.Encoder) { - var h codecSelfer7612 +func (x codecSelfer6628) encNodes(v Nodes, e *codec1978.Encoder) { + var h codecSelfer6628 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.WriteArrayStart(len(v)) @@ -5127,8 +4154,8 @@ func (x codecSelfer7612) encNodes(v Nodes, e *codec1978.Encoder) { r.WriteArrayEnd() } -func (x codecSelfer7612) decNodes(v *Nodes, d *codec1978.Decoder) { - var h codecSelfer7612 +func (x codecSelfer6628) decNodes(v *Nodes, d *codec1978.Decoder) { + var h codecSelfer6628 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -5165,7 +4192,7 @@ func (x codecSelfer7612) decNodes(v *Nodes, d *codec1978.Decoder) { var yyj1 int // var yydn1 bool for ; (yyhl1 && yyj1 < yyl1) || !(yyhl1 || r.CheckBreak()); yyj1++ { - if yyj1 == 0 && len(yyv1) == 0 { + if yyj1 == 0 && yyv1 == nil { if yyhl1 { yyrl1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 8) } else { @@ -5175,9 +4202,7 @@ func (x codecSelfer7612) decNodes(v *Nodes, d *codec1978.Decoder) { yyc1 = true } yyh1.ElemContainerState(yyj1) - // yydn1 = r.TryDecodeAsNil() - // if indefinite, etc, then expand the slice if necessary var yydb1 bool if yyj1 >= len(yyv1) { yyv1 = append(yyv1, nil) @@ -5188,15 +4213,12 @@ func (x codecSelfer7612) decNodes(v *Nodes, d *codec1978.Decoder) { z.DecSwallow() } else { if r.TryDecodeAsNil() { - if yyv1[yyj1] != nil { - *yyv1[yyj1] = Node{} - } + yyv1[yyj1] = nil } else { if yyv1[yyj1] == nil { yyv1[yyj1] = new(Node) } - yyw2 := yyv1[yyj1] - yyw2.CodecDecodeSelf(d) + yyv1[yyj1].CodecDecodeSelf(d) } } @@ -5214,5 +4236,4 @@ func (x codecSelfer7612) decNodes(v *Nodes, d *codec1978.Decoder) { if yyc1 { *v = yyv1 } - } diff --git a/vendor/github.com/coreos/etcd/clientv3/auth.go b/vendor/github.com/coreos/etcd/clientv3/auth.go index 6ffe48b0..02389203 100644 --- a/vendor/github.com/coreos/etcd/clientv3/auth.go +++ b/vendor/github.com/coreos/etcd/clientv3/auth.go @@ -19,8 +19,8 @@ import ( "fmt" "strings" + "github.com/coreos/etcd/auth/authpb" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/internal/auth/authpb" "google.golang.org/grpc" ) @@ -100,70 +100,70 @@ type Auth interface { RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error) } -type auth struct { +type authClient struct { remote pb.AuthClient callOpts []grpc.CallOption } func NewAuth(c *Client) Auth { - api := &auth{remote: RetryAuthClient(c)} + api := &authClient{remote: RetryAuthClient(c)} if c != nil { api.callOpts = c.callOpts } return api } -func (auth *auth) AuthEnable(ctx context.Context) (*AuthEnableResponse, error) { +func (auth *authClient) AuthEnable(ctx context.Context) (*AuthEnableResponse, error) { resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{}, auth.callOpts...) return (*AuthEnableResponse)(resp), toErr(ctx, err) } -func (auth *auth) AuthDisable(ctx context.Context) (*AuthDisableResponse, error) { +func (auth *authClient) AuthDisable(ctx context.Context) (*AuthDisableResponse, error) { resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{}, auth.callOpts...) return (*AuthDisableResponse)(resp), toErr(ctx, err) } -func (auth *auth) UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error) { +func (auth *authClient) UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error) { resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password}, auth.callOpts...) return (*AuthUserAddResponse)(resp), toErr(ctx, err) } -func (auth *auth) UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error) { +func (auth *authClient) UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error) { resp, err := auth.remote.UserDelete(ctx, &pb.AuthUserDeleteRequest{Name: name}, auth.callOpts...) return (*AuthUserDeleteResponse)(resp), toErr(ctx, err) } -func (auth *auth) UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error) { +func (auth *authClient) UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error) { resp, err := auth.remote.UserChangePassword(ctx, &pb.AuthUserChangePasswordRequest{Name: name, Password: password}, auth.callOpts...) return (*AuthUserChangePasswordResponse)(resp), toErr(ctx, err) } -func (auth *auth) UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error) { +func (auth *authClient) UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error) { resp, err := auth.remote.UserGrantRole(ctx, &pb.AuthUserGrantRoleRequest{User: user, Role: role}, auth.callOpts...) return (*AuthUserGrantRoleResponse)(resp), toErr(ctx, err) } -func (auth *auth) UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error) { +func (auth *authClient) UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error) { resp, err := auth.remote.UserGet(ctx, &pb.AuthUserGetRequest{Name: name}, auth.callOpts...) return (*AuthUserGetResponse)(resp), toErr(ctx, err) } -func (auth *auth) UserList(ctx context.Context) (*AuthUserListResponse, error) { +func (auth *authClient) UserList(ctx context.Context) (*AuthUserListResponse, error) { resp, err := auth.remote.UserList(ctx, &pb.AuthUserListRequest{}, auth.callOpts...) return (*AuthUserListResponse)(resp), toErr(ctx, err) } -func (auth *auth) UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error) { +func (auth *authClient) UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error) { resp, err := auth.remote.UserRevokeRole(ctx, &pb.AuthUserRevokeRoleRequest{Name: name, Role: role}, auth.callOpts...) return (*AuthUserRevokeRoleResponse)(resp), toErr(ctx, err) } -func (auth *auth) RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error) { +func (auth *authClient) RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error) { resp, err := auth.remote.RoleAdd(ctx, &pb.AuthRoleAddRequest{Name: name}, auth.callOpts...) return (*AuthRoleAddResponse)(resp), toErr(ctx, err) } -func (auth *auth) RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType PermissionType) (*AuthRoleGrantPermissionResponse, error) { +func (auth *authClient) RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType PermissionType) (*AuthRoleGrantPermissionResponse, error) { perm := &authpb.Permission{ Key: []byte(key), RangeEnd: []byte(rangeEnd), @@ -173,22 +173,22 @@ func (auth *auth) RoleGrantPermission(ctx context.Context, name string, key, ran return (*AuthRoleGrantPermissionResponse)(resp), toErr(ctx, err) } -func (auth *auth) RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error) { +func (auth *authClient) RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error) { resp, err := auth.remote.RoleGet(ctx, &pb.AuthRoleGetRequest{Role: role}, auth.callOpts...) return (*AuthRoleGetResponse)(resp), toErr(ctx, err) } -func (auth *auth) RoleList(ctx context.Context) (*AuthRoleListResponse, error) { +func (auth *authClient) RoleList(ctx context.Context) (*AuthRoleListResponse, error) { resp, err := auth.remote.RoleList(ctx, &pb.AuthRoleListRequest{}, auth.callOpts...) return (*AuthRoleListResponse)(resp), toErr(ctx, err) } -func (auth *auth) RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error) { - resp, err := auth.remote.RoleRevokePermission(ctx, &pb.AuthRoleRevokePermissionRequest{Role: role, Key: key, RangeEnd: rangeEnd}, auth.callOpts...) +func (auth *authClient) RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error) { + resp, err := auth.remote.RoleRevokePermission(ctx, &pb.AuthRoleRevokePermissionRequest{Role: role, Key: []byte(key), RangeEnd: []byte(rangeEnd)}, auth.callOpts...) return (*AuthRoleRevokePermissionResponse)(resp), toErr(ctx, err) } -func (auth *auth) RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error) { +func (auth *authClient) RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error) { resp, err := auth.remote.RoleDelete(ctx, &pb.AuthRoleDeleteRequest{Role: role}, auth.callOpts...) return (*AuthRoleDeleteResponse)(resp), toErr(ctx, err) } diff --git a/vendor/github.com/coreos/etcd/clientv3/balancer/doc.go b/vendor/github.com/coreos/etcd/clientv3/balancer/doc.go new file mode 100644 index 00000000..45af5e9d --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/balancer/doc.go @@ -0,0 +1,16 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package balancer implements client balancer. +package balancer diff --git a/vendor/github.com/coreos/etcd/clientv3/health_balancer.go b/vendor/github.com/coreos/etcd/clientv3/balancer/grpc1.7-health.go similarity index 72% rename from vendor/github.com/coreos/etcd/clientv3/health_balancer.go rename to vendor/github.com/coreos/etcd/clientv3/balancer/grpc1.7-health.go index 5918cba8..7d24b93f 100644 --- a/vendor/github.com/coreos/etcd/clientv3/health_balancer.go +++ b/vendor/github.com/coreos/etcd/clientv3/balancer/grpc1.7-health.go @@ -1,4 +1,4 @@ -// Copyright 2017 The etcd Authors +// Copyright 2018 The etcd Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,11 +12,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -package clientv3 +package balancer import ( "context" "errors" + "io/ioutil" "net/url" "strings" "sync" @@ -24,10 +25,14 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" healthpb "google.golang.org/grpc/health/grpc_health_v1" "google.golang.org/grpc/status" ) +// TODO: replace with something better +var lg = grpclog.NewLoggerV2(ioutil.Discard, ioutil.Discard, ioutil.Discard) + const ( minHealthRetryDuration = 3 * time.Second unknownService = "unknown service grpc.health.v1.Health" @@ -38,18 +43,16 @@ const ( // This error is returned only when opts.BlockingWait is true. var ErrNoAddrAvilable = status.Error(codes.Unavailable, "there is no address available") -type healthCheckFunc func(ep string) (bool, error) - -type notifyMsg int +type NotifyMsg int const ( - notifyReset notifyMsg = iota - notifyNext + NotifyReset NotifyMsg = iota + NotifyNext ) -// healthBalancer does the bare minimum to expose multiple eps +// GRPC17Health does the bare minimum to expose multiple eps // to the grpc reconnection code path -type healthBalancer struct { +type GRPC17Health struct { // addrs are the client's endpoint addresses for grpc addrs []grpc.Address @@ -64,7 +67,7 @@ type healthBalancer struct { readyOnce sync.Once // healthCheck checks an endpoint's health. - healthCheck healthCheckFunc + healthCheck func(ep string) (bool, error) healthCheckTimeout time.Duration unhealthyMu sync.RWMutex @@ -88,7 +91,7 @@ type healthBalancer struct { donec chan struct{} // updateAddrsC notifies updateNotifyLoop to update addrs. - updateAddrsC chan notifyMsg + updateAddrsC chan NotifyMsg // grpc issues TLS cert checks using the string passed into dial so // that string must be the host. To recover the full scheme://host URL, @@ -102,21 +105,29 @@ type healthBalancer struct { closed bool } -func newHealthBalancer(eps []string, timeout time.Duration, hc healthCheckFunc) *healthBalancer { +// DialFunc defines gRPC dial function. +type DialFunc func(ep string, dopts ...grpc.DialOption) (*grpc.ClientConn, error) + +// NewGRPC17Health returns a new health balancer with gRPC v1.7. +func NewGRPC17Health( + eps []string, + timeout time.Duration, + dialFunc DialFunc, +) *GRPC17Health { notifyCh := make(chan []grpc.Address) addrs := eps2addrs(eps) - hb := &healthBalancer{ + hb := &GRPC17Health{ addrs: addrs, eps: eps, notifyCh: notifyCh, readyc: make(chan struct{}), - healthCheck: hc, + healthCheck: func(ep string) (bool, error) { return grpcHealthCheck(ep, dialFunc) }, unhealthyHostPorts: make(map[string]time.Time), upc: make(chan struct{}), stopc: make(chan struct{}), downc: make(chan struct{}), donec: make(chan struct{}), - updateAddrsC: make(chan notifyMsg), + updateAddrsC: make(chan NotifyMsg), hostPort2ep: getHostPort2ep(eps), } if timeout < minHealthRetryDuration { @@ -134,78 +145,81 @@ func newHealthBalancer(eps []string, timeout time.Duration, hc healthCheckFunc) return hb } -func (b *healthBalancer) Start(target string, config grpc.BalancerConfig) error { return nil } +func (b *GRPC17Health) Start(target string, config grpc.BalancerConfig) error { return nil } -func (b *healthBalancer) ConnectNotify() <-chan struct{} { +func (b *GRPC17Health) ConnectNotify() <-chan struct{} { b.mu.Lock() defer b.mu.Unlock() return b.upc } -func (b *healthBalancer) ready() <-chan struct{} { return b.readyc } +func (b *GRPC17Health) UpdateAddrsC() chan NotifyMsg { return b.updateAddrsC } +func (b *GRPC17Health) StopC() chan struct{} { return b.stopc } -func (b *healthBalancer) endpoint(hostPort string) string { +func (b *GRPC17Health) Ready() <-chan struct{} { return b.readyc } + +func (b *GRPC17Health) Endpoint(hostPort string) string { b.mu.RLock() defer b.mu.RUnlock() return b.hostPort2ep[hostPort] } -func (b *healthBalancer) pinned() string { +func (b *GRPC17Health) Pinned() string { b.mu.RLock() defer b.mu.RUnlock() return b.pinAddr } -func (b *healthBalancer) hostPortError(hostPort string, err error) { - if b.endpoint(hostPort) == "" { - logger.Lvl(4).Infof("clientv3/balancer: %q is stale (skip marking as unhealthy on %q)", hostPort, err.Error()) +func (b *GRPC17Health) HostPortError(hostPort string, err error) { + if b.Endpoint(hostPort) == "" { + lg.Infof("clientv3/balancer: %q is stale (skip marking as unhealthy on %q)", hostPort, err.Error()) return } b.unhealthyMu.Lock() b.unhealthyHostPorts[hostPort] = time.Now() b.unhealthyMu.Unlock() - logger.Lvl(4).Infof("clientv3/balancer: %q is marked unhealthy (%q)", hostPort, err.Error()) + lg.Infof("clientv3/balancer: %q is marked unhealthy (%q)", hostPort, err.Error()) } -func (b *healthBalancer) removeUnhealthy(hostPort, msg string) { - if b.endpoint(hostPort) == "" { - logger.Lvl(4).Infof("clientv3/balancer: %q was not in unhealthy (%q)", hostPort, msg) +func (b *GRPC17Health) removeUnhealthy(hostPort, msg string) { + if b.Endpoint(hostPort) == "" { + lg.Infof("clientv3/balancer: %q was not in unhealthy (%q)", hostPort, msg) return } b.unhealthyMu.Lock() delete(b.unhealthyHostPorts, hostPort) b.unhealthyMu.Unlock() - logger.Lvl(4).Infof("clientv3/balancer: %q is removed from unhealthy (%q)", hostPort, msg) + lg.Infof("clientv3/balancer: %q is removed from unhealthy (%q)", hostPort, msg) } -func (b *healthBalancer) countUnhealthy() (count int) { +func (b *GRPC17Health) countUnhealthy() (count int) { b.unhealthyMu.RLock() count = len(b.unhealthyHostPorts) b.unhealthyMu.RUnlock() return count } -func (b *healthBalancer) isUnhealthy(hostPort string) (unhealthy bool) { +func (b *GRPC17Health) isUnhealthy(hostPort string) (unhealthy bool) { b.unhealthyMu.RLock() _, unhealthy = b.unhealthyHostPorts[hostPort] b.unhealthyMu.RUnlock() return unhealthy } -func (b *healthBalancer) cleanupUnhealthy() { +func (b *GRPC17Health) cleanupUnhealthy() { b.unhealthyMu.Lock() for k, v := range b.unhealthyHostPorts { if time.Since(v) > b.healthCheckTimeout { delete(b.unhealthyHostPorts, k) - logger.Lvl(4).Infof("clientv3/balancer: removed %q from unhealthy after %v", k, b.healthCheckTimeout) + lg.Infof("clientv3/balancer: removed %q from unhealthy after %v", k, b.healthCheckTimeout) } } b.unhealthyMu.Unlock() } -func (b *healthBalancer) liveAddrs() ([]grpc.Address, map[string]struct{}) { +func (b *GRPC17Health) liveAddrs() ([]grpc.Address, map[string]struct{}) { unhealthyCnt := b.countUnhealthy() b.mu.RLock() @@ -231,15 +245,15 @@ func (b *healthBalancer) liveAddrs() ([]grpc.Address, map[string]struct{}) { return addrs, liveHostPorts } -func (b *healthBalancer) updateUnhealthy() { +func (b *GRPC17Health) updateUnhealthy() { for { select { case <-time.After(b.healthCheckTimeout): b.cleanupUnhealthy() - pinned := b.pinned() + pinned := b.Pinned() if pinned == "" || b.isUnhealthy(pinned) { select { - case b.updateAddrsC <- notifyNext: + case b.updateAddrsC <- NotifyNext: case <-b.stopc: return } @@ -250,7 +264,19 @@ func (b *healthBalancer) updateUnhealthy() { } } -func (b *healthBalancer) updateAddrs(eps ...string) { +// NeedUpdate returns true if all connections are down or +// addresses do not include current pinned address. +func (b *GRPC17Health) NeedUpdate() bool { + // updating notifyCh can trigger new connections, + // need update addrs if all connections are down + // or addrs does not include pinAddr. + b.mu.RLock() + update := !hasAddr(b.addrs, b.pinAddr) + b.mu.RUnlock() + return update +} + +func (b *GRPC17Health) UpdateAddrs(eps ...string) { np := getHostPort2ep(eps) b.mu.Lock() @@ -278,12 +304,12 @@ func (b *healthBalancer) updateAddrs(eps ...string) { b.unhealthyMu.Unlock() } -func (b *healthBalancer) next() { +func (b *GRPC17Health) Next() { b.mu.RLock() downc := b.downc b.mu.RUnlock() select { - case b.updateAddrsC <- notifyNext: + case b.updateAddrsC <- NotifyNext: case <-b.stopc: } // wait until disconnect so new RPCs are not issued on old connection @@ -293,7 +319,7 @@ func (b *healthBalancer) next() { } } -func (b *healthBalancer) updateNotifyLoop() { +func (b *GRPC17Health) updateNotifyLoop() { defer close(b.donec) for { @@ -320,7 +346,7 @@ func (b *healthBalancer) updateNotifyLoop() { default: } case downc == nil: - b.notifyAddrs(notifyReset) + b.notifyAddrs(NotifyReset) select { case <-upc: case msg := <-b.updateAddrsC: @@ -338,7 +364,7 @@ func (b *healthBalancer) updateNotifyLoop() { } select { case <-downc: - b.notifyAddrs(notifyReset) + b.notifyAddrs(NotifyReset) case msg := <-b.updateAddrsC: b.notifyAddrs(msg) case <-b.stopc: @@ -348,8 +374,8 @@ func (b *healthBalancer) updateNotifyLoop() { } } -func (b *healthBalancer) notifyAddrs(msg notifyMsg) { - if msg == notifyNext { +func (b *GRPC17Health) notifyAddrs(msg NotifyMsg) { + if msg == NotifyNext { select { case b.notifyCh <- []grpc.Address{}: case <-b.stopc: @@ -380,7 +406,7 @@ func (b *healthBalancer) notifyAddrs(msg notifyMsg) { } } -func (b *healthBalancer) Up(addr grpc.Address) func(error) { +func (b *GRPC17Health) Up(addr grpc.Address) func(error) { if !b.mayPin(addr) { return func(err error) {} } @@ -402,7 +428,7 @@ func (b *healthBalancer) Up(addr grpc.Address) func(error) { } if b.pinAddr != "" { - logger.Lvl(4).Infof("clientv3/balancer: %q is up but not pinned (already pinned %q)", addr.Addr, b.pinAddr) + lg.Infof("clientv3/balancer: %q is up but not pinned (already pinned %q)", addr.Addr, b.pinAddr) return func(err error) {} } @@ -410,7 +436,7 @@ func (b *healthBalancer) Up(addr grpc.Address) func(error) { close(b.upc) b.downc = make(chan struct{}) b.pinAddr = addr.Addr - logger.Lvl(4).Infof("clientv3/balancer: pin %q", addr.Addr) + lg.Infof("clientv3/balancer: pin %q", addr.Addr) // notify client that a connection is up b.readyOnce.Do(func() { close(b.readyc) }) @@ -420,19 +446,19 @@ func (b *healthBalancer) Up(addr grpc.Address) func(error) { // timeout will induce a network I/O error, and retrying until success; // finding healthy endpoint on retry could take several timeouts and redials. // To avoid wasting retries, gray-list unhealthy endpoints. - b.hostPortError(addr.Addr, err) + b.HostPortError(addr.Addr, err) b.mu.Lock() b.upc = make(chan struct{}) close(b.downc) b.pinAddr = "" b.mu.Unlock() - logger.Lvl(4).Infof("clientv3/balancer: unpin %q (%q)", addr.Addr, err.Error()) + lg.Infof("clientv3/balancer: unpin %q (%q)", addr.Addr, err.Error()) } } -func (b *healthBalancer) mayPin(addr grpc.Address) bool { - if b.endpoint(addr.Addr) == "" { // stale host:port +func (b *GRPC17Health) mayPin(addr grpc.Address) bool { + if b.Endpoint(addr.Addr) == "" { // stale host:port return false } @@ -454,7 +480,7 @@ func (b *healthBalancer) mayPin(addr grpc.Address) bool { // 3. grpc-healthcheck still SERVING, thus retry to pin // instead, return before grpc-healthcheck if failed within healthcheck timeout if elapsed := time.Since(failedTime); elapsed < b.healthCheckTimeout { - logger.Lvl(4).Infof("clientv3/balancer: %q is up but not pinned (failed %v ago, require minimum %v after failure)", addr.Addr, elapsed, b.healthCheckTimeout) + lg.Infof("clientv3/balancer: %q is up but not pinned (failed %v ago, require minimum %v after failure)", addr.Addr, elapsed, b.healthCheckTimeout) return false } @@ -463,11 +489,11 @@ func (b *healthBalancer) mayPin(addr grpc.Address) bool { return true } - b.hostPortError(addr.Addr, errors.New("health check failed")) + b.HostPortError(addr.Addr, errors.New("health check failed")) return false } -func (b *healthBalancer) Get(ctx context.Context, opts grpc.BalancerGetOptions) (grpc.Address, func(), error) { +func (b *GRPC17Health) Get(ctx context.Context, opts grpc.BalancerGetOptions) (grpc.Address, func(), error) { var ( addr string closed bool @@ -515,9 +541,9 @@ func (b *healthBalancer) Get(ctx context.Context, opts grpc.BalancerGetOptions) return grpc.Address{Addr: addr}, func() {}, nil } -func (b *healthBalancer) Notify() <-chan []grpc.Address { return b.notifyCh } +func (b *GRPC17Health) Notify() <-chan []grpc.Address { return b.notifyCh } -func (b *healthBalancer) Close() error { +func (b *GRPC17Health) Close() error { b.mu.Lock() // In case gRPC calls close twice. TODO: remove the checking // when we are sure that gRPC wont call close twice. @@ -553,8 +579,8 @@ func (b *healthBalancer) Close() error { return nil } -func grpcHealthCheck(client *Client, ep string) (bool, error) { - conn, err := client.dial(ep) +func grpcHealthCheck(ep string, dialFunc func(ep string, dopts ...grpc.DialOption) (*grpc.ClientConn, error)) (bool, error) { + conn, err := dialFunc(ep) if err != nil { return false, err } @@ -607,3 +633,25 @@ func getHostPort2ep(eps []string) map[string]string { } return hm } + +func parseEndpoint(endpoint string) (proto string, host string, scheme string) { + proto = "tcp" + host = endpoint + url, uerr := url.Parse(endpoint) + if uerr != nil || !strings.Contains(endpoint, "://") { + return proto, host, scheme + } + scheme = url.Scheme + + // strip scheme:// prefix since grpc dials by host + host = url.Host + switch url.Scheme { + case "http", "https": + case "unix", "unixs": + proto = "unix" + host = url.Host + url.Path + default: + proto, host = "", "" + } + return proto, host, scheme +} diff --git a/vendor/github.com/coreos/etcd/clientv3/client.go b/vendor/github.com/coreos/etcd/clientv3/client.go index 68540108..95afd1f6 100644 --- a/vendor/github.com/coreos/etcd/clientv3/client.go +++ b/vendor/github.com/coreos/etcd/clientv3/client.go @@ -26,6 +26,7 @@ import ( "sync" "time" + "github.com/coreos/etcd/clientv3/balancer" "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" "google.golang.org/grpc" @@ -55,7 +56,7 @@ type Client struct { cfg Config creds *credentials.TransportCredentials - balancer *healthBalancer + balancer *balancer.GRPC17Health mu *sync.Mutex ctx context.Context @@ -93,6 +94,11 @@ func NewFromURL(url string) (*Client, error) { return New(Config{Endpoints: []string{url}}) } +// NewFromURLs creates a new etcdv3 client from URLs. +func NewFromURLs(urls []string) (*Client, error) { + return New(Config{Endpoints: urls}) +} + // Close shuts down the client's etcd connections. func (c *Client) Close() error { c.cancel() @@ -122,18 +128,12 @@ func (c *Client) SetEndpoints(eps ...string) { c.mu.Lock() c.cfg.Endpoints = eps c.mu.Unlock() - c.balancer.updateAddrs(eps...) + c.balancer.UpdateAddrs(eps...) - // updating notifyCh can trigger new connections, - // need update addrs if all connections are down - // or addrs does not include pinAddr. - c.balancer.mu.RLock() - update := !hasAddr(c.balancer.addrs, c.balancer.pinAddr) - c.balancer.mu.RUnlock() - if update { + if c.balancer.NeedUpdate() { select { - case c.balancer.updateAddrsC <- notifyNext: - case <-c.balancer.stopc: + case c.balancer.UpdateAddrsC() <- balancer.NotifyNext: + case <-c.balancer.StopC(): } } } @@ -166,7 +166,7 @@ func (c *Client) autoSync() { err := c.Sync(ctx) cancel() if err != nil && err != c.ctx.Err() { - logger.Println("Auto sync endpoints failed:", err) + lg.Lvl(4).Infof("Auto sync endpoints failed: %v", err) } } } @@ -185,7 +185,7 @@ func (cred authTokenCredential) GetRequestMetadata(ctx context.Context, s ...str cred.tokenMu.RLock() defer cred.tokenMu.RUnlock() return map[string]string{ - "token": cred.token, + rpctypes.TokenFieldNameGRPC: cred.token, }, nil } @@ -245,7 +245,7 @@ func (c *Client) dialSetupOpts(endpoint string, dopts ...grpc.DialOption) (opts opts = append(opts, dopts...) f := func(host string, t time.Duration) (net.Conn, error) { - proto, host, _ := parseEndpoint(c.balancer.endpoint(host)) + proto, host, _ := parseEndpoint(c.balancer.Endpoint(host)) if host == "" && endpoint != "" { // dialing an endpoint not in the balancer; use // endpoint passed into dial @@ -412,9 +412,7 @@ func newClient(cfg *Config) (*Client, error) { client.callOpts = callOpts } - client.balancer = newHealthBalancer(cfg.Endpoints, cfg.DialTimeout, func(ep string) (bool, error) { - return grpcHealthCheck(client, ep) - }) + client.balancer = balancer.NewGRPC17Health(cfg.Endpoints, cfg.DialTimeout, client.dial) // use Endpoints[0] so that for https:// without any tls config given, then // grpc will assume the certificate server name is the endpoint host. @@ -431,7 +429,7 @@ func newClient(cfg *Config) (*Client, error) { hasConn := false waitc := time.After(cfg.DialTimeout) select { - case <-client.balancer.ready(): + case <-client.balancer.Ready(): hasConn = true case <-ctx.Done(): case <-waitc: @@ -561,3 +559,11 @@ func canceledByCaller(stopCtx context.Context, err error) bool { return err == context.Canceled || err == context.DeadlineExceeded } + +func getHost(ep string) string { + url, uerr := url.Parse(ep) + if uerr != nil || !strings.Contains(ep, "://") { + return ep + } + return url.Host +} diff --git a/vendor/github.com/coreos/etcd/clientv3/logger.go b/vendor/github.com/coreos/etcd/clientv3/logger.go index 782e3131..3276372a 100644 --- a/vendor/github.com/coreos/etcd/clientv3/logger.go +++ b/vendor/github.com/coreos/etcd/clientv3/logger.go @@ -18,28 +18,14 @@ import ( "io/ioutil" "sync" + "github.com/coreos/etcd/pkg/logutil" + "google.golang.org/grpc/grpclog" ) -// Logger is the logger used by client library. -// It implements grpclog.LoggerV2 interface. -type Logger interface { - grpclog.LoggerV2 - - // Lvl returns logger if logger's verbosity level >= "lvl". - // Otherwise, logger that discards all logs. - Lvl(lvl int) Logger - - // to satisfy capnslog - - Print(args ...interface{}) - Printf(format string, args ...interface{}) - Println(args ...interface{}) -} - var ( - loggerMu sync.RWMutex - logger Logger + lgMu sync.RWMutex + lg logutil.Logger ) type settableLogger struct { @@ -49,29 +35,29 @@ type settableLogger struct { func init() { // disable client side logs by default - logger = &settableLogger{} + lg = &settableLogger{} SetLogger(grpclog.NewLoggerV2(ioutil.Discard, ioutil.Discard, ioutil.Discard)) } // SetLogger sets client-side Logger. func SetLogger(l grpclog.LoggerV2) { - loggerMu.Lock() - logger = NewLogger(l) + lgMu.Lock() + lg = logutil.NewLogger(l) // override grpclog so that any changes happen with locking - grpclog.SetLoggerV2(logger) - loggerMu.Unlock() + grpclog.SetLoggerV2(lg) + lgMu.Unlock() } -// GetLogger returns the current logger. -func GetLogger() Logger { - loggerMu.RLock() - l := logger - loggerMu.RUnlock() +// GetLogger returns the current logutil.Logger. +func GetLogger() logutil.Logger { + lgMu.RLock() + l := lg + lgMu.RUnlock() return l } -// NewLogger returns a new Logger with grpclog.LoggerV2. -func NewLogger(gl grpclog.LoggerV2) Logger { +// NewLogger returns a new Logger with logutil.Logger. +func NewLogger(gl grpclog.LoggerV2) logutil.Logger { return &settableLogger{l: gl} } @@ -104,32 +90,12 @@ func (s *settableLogger) Print(args ...interface{}) { s.get().In func (s *settableLogger) Printf(format string, args ...interface{}) { s.get().Infof(format, args...) } func (s *settableLogger) Println(args ...interface{}) { s.get().Infoln(args...) } func (s *settableLogger) V(l int) bool { return s.get().V(l) } -func (s *settableLogger) Lvl(lvl int) Logger { +func (s *settableLogger) Lvl(lvl int) grpclog.LoggerV2 { s.mu.RLock() l := s.l s.mu.RUnlock() if l.V(lvl) { return s } - return &noLogger{} + return logutil.NewDiscardLogger() } - -type noLogger struct{} - -func (*noLogger) Info(args ...interface{}) {} -func (*noLogger) Infof(format string, args ...interface{}) {} -func (*noLogger) Infoln(args ...interface{}) {} -func (*noLogger) Warning(args ...interface{}) {} -func (*noLogger) Warningf(format string, args ...interface{}) {} -func (*noLogger) Warningln(args ...interface{}) {} -func (*noLogger) Error(args ...interface{}) {} -func (*noLogger) Errorf(format string, args ...interface{}) {} -func (*noLogger) Errorln(args ...interface{}) {} -func (*noLogger) Fatal(args ...interface{}) {} -func (*noLogger) Fatalf(format string, args ...interface{}) {} -func (*noLogger) Fatalln(args ...interface{}) {} -func (*noLogger) Print(args ...interface{}) {} -func (*noLogger) Printf(format string, args ...interface{}) {} -func (*noLogger) Println(args ...interface{}) {} -func (*noLogger) V(l int) bool { return false } -func (ng *noLogger) Lvl(lvl int) Logger { return ng } diff --git a/vendor/github.com/coreos/etcd/clientv3/grpc_options.go b/vendor/github.com/coreos/etcd/clientv3/options.go similarity index 96% rename from vendor/github.com/coreos/etcd/clientv3/grpc_options.go rename to vendor/github.com/coreos/etcd/clientv3/options.go index 592dd699..fa25811f 100644 --- a/vendor/github.com/coreos/etcd/clientv3/grpc_options.go +++ b/vendor/github.com/coreos/etcd/clientv3/options.go @@ -44,3 +44,6 @@ var ( // Some options are exposed to "clientv3.Config". // Defaults will be overridden by the settings in "clientv3.Config". var defaultCallOpts = []grpc.CallOption{defaultFailFast, defaultMaxCallSendMsgSize, defaultMaxCallRecvMsgSize} + +// MaxLeaseTTL is the maximum lease TTL value +const MaxLeaseTTL = 9000000000 diff --git a/vendor/github.com/coreos/etcd/clientv3/retry.go b/vendor/github.com/coreos/etcd/clientv3/retry.go index f923f74b..6226d787 100644 --- a/vendor/github.com/coreos/etcd/clientv3/retry.go +++ b/vendor/github.com/coreos/etcd/clientv3/retry.go @@ -91,18 +91,18 @@ func (c *Client) newRetryWrapper() retryRPCFunc { if err := readyWait(rpcCtx, c.ctx, c.balancer.ConnectNotify()); err != nil { return err } - pinned := c.balancer.pinned() + pinned := c.balancer.Pinned() err := f(rpcCtx) if err == nil { return nil } - logger.Lvl(4).Infof("clientv3/retry: error %q on pinned endpoint %q", err.Error(), pinned) + lg.Lvl(4).Infof("clientv3/retry: error %q on pinned endpoint %q", err.Error(), pinned) if s, ok := status.FromError(err); ok && (s.Code() == codes.Unavailable || s.Code() == codes.DeadlineExceeded || s.Code() == codes.Internal) { // mark this before endpoint switch is triggered - c.balancer.hostPortError(pinned, err) - c.balancer.next() - logger.Lvl(4).Infof("clientv3/retry: switching from %q due to error %q", pinned, err.Error()) + c.balancer.HostPortError(pinned, err) + c.balancer.Next() + lg.Lvl(4).Infof("clientv3/retry: switching from %q due to error %q", pinned, err.Error()) } if isStop(err) { @@ -115,17 +115,17 @@ func (c *Client) newRetryWrapper() retryRPCFunc { func (c *Client) newAuthRetryWrapper(retryf retryRPCFunc) retryRPCFunc { return func(rpcCtx context.Context, f rpcFunc, rp retryPolicy) error { for { - pinned := c.balancer.pinned() + pinned := c.balancer.Pinned() err := retryf(rpcCtx, f, rp) if err == nil { return nil } - logger.Lvl(4).Infof("clientv3/auth-retry: error %q on pinned endpoint %q", err.Error(), pinned) + lg.Lvl(4).Infof("clientv3/auth-retry: error %q on pinned endpoint %q", err.Error(), pinned) // always stop retry on etcd errors other than invalid auth token if rpctypes.Error(err) == rpctypes.ErrInvalidAuthToken { gterr := c.getToken(rpcCtx) if gterr != nil { - logger.Lvl(4).Infof("clientv3/auth-retry: cannot retry due to error %q(%q) on pinned endpoint %q", err.Error(), gterr.Error(), pinned) + lg.Lvl(4).Infof("clientv3/auth-retry: cannot retry due to error %q(%q) on pinned endpoint %q", err.Error(), gterr.Error(), pinned) return err // return the original error for simplicity } continue diff --git a/vendor/github.com/coreos/etcd/clientv3/watch.go b/vendor/github.com/coreos/etcd/clientv3/watch.go index 9452d0d9..312845cb 100644 --- a/vendor/github.com/coreos/etcd/clientv3/watch.go +++ b/vendor/github.com/coreos/etcd/clientv3/watch.go @@ -22,7 +22,7 @@ import ( v3rpc "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - mvccpb "github.com/coreos/etcd/internal/mvcc/mvccpb" + mvccpb "github.com/coreos/etcd/mvcc/mvccpb" "google.golang.org/grpc" "google.golang.org/grpc/codes" diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/capability.go b/vendor/github.com/coreos/etcd/etcdserver/api/capability.go index 9b73a3d4..7cdd0dfd 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/capability.go +++ b/vendor/github.com/coreos/etcd/etcdserver/api/capability.go @@ -17,7 +17,8 @@ package api import ( "sync" - "github.com/coreos/etcd/internal/version" + "github.com/coreos/etcd/version" + "go.uber.org/zap" "github.com/coreos/go-semver/semver" "github.com/coreos/pkg/capnslog" @@ -56,7 +57,7 @@ func init() { } // UpdateCapability updates the enabledMap when the cluster version increases. -func UpdateCapability(v *semver.Version) { +func UpdateCapability(lg *zap.Logger, v *semver.Version) { if v == nil { // if recovered but version was never set by cluster return @@ -69,7 +70,15 @@ func UpdateCapability(v *semver.Version) { curVersion = v enabledMap = capabilityMaps[curVersion.String()] enableMapMu.Unlock() - plog.Infof("enabled capabilities for version %s", version.Cluster(v.String())) + + if lg != nil { + lg.Info( + "enabled capabilities for version", + zap.String("cluster-version", version.Cluster(v.String())), + ) + } else { + plog.Infof("enabled capabilities for version %s", version.Cluster(v.String())) + } } func IsCapabilityEnabled(c Capability) bool { diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/header.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/header.go index 75da52fb..c46418db 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/header.go +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/header.go @@ -22,7 +22,7 @@ import ( type header struct { clusterID int64 memberID int64 - raftTimer etcdserver.RaftTimer + sg etcdserver.RaftStatusGetter rev func() int64 } @@ -30,7 +30,7 @@ func newHeader(s *etcdserver.EtcdServer) header { return header{ clusterID: int64(s.Cluster().ID()), memberID: int64(s.ID()), - raftTimer: s, + sg: s, rev: func() int64 { return s.KV().Rev() }, } } @@ -42,7 +42,7 @@ func (h *header) fill(rh *pb.ResponseHeader) { } rh.ClusterId = uint64(h.clusterID) rh.MemberId = uint64(h.memberID) - rh.RaftTerm = h.raftTimer.Term() + rh.RaftTerm = h.sg.Term() if rh.Revision == 0 { rh.Revision = h.rev() } diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/lease.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/lease.go index f872e860..5296214f 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/lease.go +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/lease.go @@ -21,16 +21,19 @@ import ( "github.com/coreos/etcd/etcdserver" "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/internal/lease" + "github.com/coreos/etcd/lease" + + "go.uber.org/zap" ) type LeaseServer struct { + lg *zap.Logger hdr header le etcdserver.Lessor } func NewLeaseServer(s *etcdserver.EtcdServer) pb.LeaseServer { - return &LeaseServer{le: s, hdr: newHeader(s)} + return &LeaseServer{lg: s.Cfg.Logger, le: s, hdr: newHeader(s)} } func (ls *LeaseServer) LeaseGrant(ctx context.Context, cr *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { @@ -108,9 +111,17 @@ func (ls *LeaseServer) leaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) erro } if err != nil { if isClientCtxErr(stream.Context().Err(), err) { - plog.Debugf("failed to receive lease keepalive request from gRPC stream (%q)", err.Error()) + if ls.lg != nil { + ls.lg.Debug("failed to receive lease keepalive request from gRPC stream", zap.Error(err)) + } else { + plog.Debugf("failed to receive lease keepalive request from gRPC stream (%q)", err.Error()) + } } else { - plog.Warningf("failed to receive lease keepalive request from gRPC stream (%q)", err.Error()) + if ls.lg != nil { + ls.lg.Warn("failed to receive lease keepalive request from gRPC stream", zap.Error(err)) + } else { + plog.Warningf("failed to receive lease keepalive request from gRPC stream (%q)", err.Error()) + } } return err } @@ -138,9 +149,17 @@ func (ls *LeaseServer) leaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) erro err = stream.Send(resp) if err != nil { if isClientCtxErr(stream.Context().Err(), err) { - plog.Debugf("failed to send lease keepalive response to gRPC stream (%q)", err.Error()) + if ls.lg != nil { + ls.lg.Debug("failed to send lease keepalive response to gRPC stream", zap.Error(err)) + } else { + plog.Debugf("failed to send lease keepalive response to gRPC stream (%q)", err.Error()) + } } else { - plog.Warningf("failed to send lease keepalive response to gRPC stream (%q)", err.Error()) + if ls.lg != nil { + ls.lg.Warn("failed to send lease keepalive response to gRPC stream", zap.Error(err)) + } else { + plog.Warningf("failed to send lease keepalive response to gRPC stream (%q)", err.Error()) + } } return err } diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/maintenance.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/maintenance.go index aa5e90fd..7f51f4f8 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/maintenance.go +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/maintenance.go @@ -19,15 +19,16 @@ import ( "crypto/sha256" "io" + "github.com/coreos/etcd/auth" "github.com/coreos/etcd/etcdserver" "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/internal/auth" - "github.com/coreos/etcd/internal/mvcc" - "github.com/coreos/etcd/internal/mvcc/backend" - "github.com/coreos/etcd/internal/version" - "github.com/coreos/etcd/pkg/types" + "github.com/coreos/etcd/mvcc" + "github.com/coreos/etcd/mvcc/backend" "github.com/coreos/etcd/raft" + "github.com/coreos/etcd/version" + + "go.uber.org/zap" ) type KVGetter interface { @@ -49,19 +50,14 @@ type LeaderTransferrer interface { MoveLeader(ctx context.Context, lead, target uint64) error } -type RaftStatusGetter interface { - etcdserver.RaftTimer - ID() types.ID - Leader() types.ID -} - type AuthGetter interface { AuthInfoFromCtx(ctx context.Context) (*auth.AuthInfo, error) AuthStore() auth.AuthStore } type maintenanceServer struct { - rg RaftStatusGetter + lg *zap.Logger + rg etcdserver.RaftStatusGetter kg KVGetter bg BackendGetter a Alarmer @@ -70,18 +66,30 @@ type maintenanceServer struct { } func NewMaintenanceServer(s *etcdserver.EtcdServer) pb.MaintenanceServer { - srv := &maintenanceServer{rg: s, kg: s, bg: s, a: s, lt: s, hdr: newHeader(s)} + srv := &maintenanceServer{lg: s.Cfg.Logger, rg: s, kg: s, bg: s, a: s, lt: s, hdr: newHeader(s)} return &authMaintenanceServer{srv, s} } func (ms *maintenanceServer) Defragment(ctx context.Context, sr *pb.DefragmentRequest) (*pb.DefragmentResponse, error) { - plog.Noticef("starting to defragment the storage backend...") + if ms.lg != nil { + ms.lg.Info("starting defragment") + } else { + plog.Noticef("starting to defragment the storage backend...") + } err := ms.bg.Backend().Defrag() if err != nil { - plog.Errorf("failed to defragment the storage backend (%v)", err) + if ms.lg != nil { + ms.lg.Warn("failed to defragment", zap.Error(err)) + } else { + plog.Errorf("failed to defragment the storage backend (%v)", err) + } return nil, err } - plog.Noticef("finished defragmenting the storage backend") + if ms.lg != nil { + ms.lg.Info("finished defragment") + } else { + plog.Noticef("finished defragmenting the storage backend") + } return &pb.DefragmentResponse{}, nil } @@ -94,7 +102,11 @@ func (ms *maintenanceServer) Snapshot(sr *pb.SnapshotRequest, srv pb.Maintenance go func() { snap.WriteTo(pw) if err := snap.Close(); err != nil { - plog.Errorf("error closing snapshot (%v)", err) + if ms.lg != nil { + ms.lg.Warn("failed to close snapshot", zap.Error(err)) + } else { + plog.Errorf("error closing snapshot (%v)", err) + } } pw.Close() }() @@ -156,25 +168,24 @@ func (ms *maintenanceServer) Alarm(ctx context.Context, ar *pb.AlarmRequest) (*p } func (ms *maintenanceServer) Status(ctx context.Context, ar *pb.StatusRequest) (*pb.StatusResponse, error) { + hdr := &pb.ResponseHeader{} + ms.hdr.fill(hdr) resp := &pb.StatusResponse{ - Header: &pb.ResponseHeader{Revision: ms.hdr.rev()}, + Header: hdr, Version: version.Version, - DbSize: ms.bg.Backend().Size(), Leader: uint64(ms.rg.Leader()), - RaftIndex: ms.rg.Index(), - RaftTerm: ms.rg.Term(), + RaftIndex: ms.rg.CommittedIndex(), RaftAppliedIndex: ms.rg.AppliedIndex(), + RaftTerm: ms.rg.Term(), + DbSize: ms.bg.Backend().Size(), + DbSizeInUse: ms.bg.Backend().SizeInUse(), } - if uint64(ms.rg.Leader()) == raft.None { + if resp.Leader == raft.None { resp.Errors = append(resp.Errors, etcdserver.ErrNoLeader.Error()) } - alarms := ms.a.Alarms() - if len(alarms) > 0 { - for _, alarm := range alarms { - resp.Errors = append(resp.Errors, alarm.String()) - } + for _, a := range ms.a.Alarms() { + resp.Errors = append(resp.Errors, a.String()) } - ms.hdr.fill(resp.Header) return resp, nil } diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/quota.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/quota.go index 02d99609..4d78d3e7 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/quota.go +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/quota.go @@ -52,7 +52,7 @@ func (qa *quotaAlarmer) check(ctx context.Context, r interface{}) error { func NewQuotaKVServer(s *etcdserver.EtcdServer) pb.KVServer { return "aKVServer{ NewKVServer(s), - quotaAlarmer{etcdserver.NewBackendQuota(s), s, s.ID()}, + quotaAlarmer{etcdserver.NewBackendQuota(s, "kv"), s, s.ID()}, } } @@ -85,6 +85,6 @@ func (s *quotaLeaseServer) LeaseGrant(ctx context.Context, cr *pb.LeaseGrantRequ func NewQuotaLeaseServer(s *etcdserver.EtcdServer) pb.LeaseServer { return "aLeaseServer{ NewLeaseServer(s), - quotaAlarmer{etcdserver.NewBackendQuota(s), s, s.ID()}, + quotaAlarmer{etcdserver.NewBackendQuota(s, "lease"), s, s.ID()}, } } diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go index 446e4f6b..55eab38e 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go @@ -31,8 +31,9 @@ var ( ErrGRPCFutureRev = status.New(codes.OutOfRange, "etcdserver: mvcc: required revision is a future revision").Err() ErrGRPCNoSpace = status.New(codes.ResourceExhausted, "etcdserver: mvcc: database space exceeded").Err() - ErrGRPCLeaseNotFound = status.New(codes.NotFound, "etcdserver: requested lease not found").Err() - ErrGRPCLeaseExist = status.New(codes.FailedPrecondition, "etcdserver: lease already exists").Err() + ErrGRPCLeaseNotFound = status.New(codes.NotFound, "etcdserver: requested lease not found").Err() + ErrGRPCLeaseExist = status.New(codes.FailedPrecondition, "etcdserver: lease already exists").Err() + ErrGRPCLeaseTTLTooLarge = status.New(codes.OutOfRange, "etcdserver: too large lease TTL").Err() ErrGRPCMemberExist = status.New(codes.FailedPrecondition, "etcdserver: member ID already exist").Err() ErrGRPCPeerURLExist = status.New(codes.FailedPrecondition, "etcdserver: Peer URLs already exists").Err() @@ -80,8 +81,9 @@ var ( ErrorDesc(ErrGRPCFutureRev): ErrGRPCFutureRev, ErrorDesc(ErrGRPCNoSpace): ErrGRPCNoSpace, - ErrorDesc(ErrGRPCLeaseNotFound): ErrGRPCLeaseNotFound, - ErrorDesc(ErrGRPCLeaseExist): ErrGRPCLeaseExist, + ErrorDesc(ErrGRPCLeaseNotFound): ErrGRPCLeaseNotFound, + ErrorDesc(ErrGRPCLeaseExist): ErrGRPCLeaseExist, + ErrorDesc(ErrGRPCLeaseTTLTooLarge): ErrGRPCLeaseTTLTooLarge, ErrorDesc(ErrGRPCMemberExist): ErrGRPCMemberExist, ErrorDesc(ErrGRPCPeerURLExist): ErrGRPCPeerURLExist, @@ -131,8 +133,9 @@ var ( ErrFutureRev = Error(ErrGRPCFutureRev) ErrNoSpace = Error(ErrGRPCNoSpace) - ErrLeaseNotFound = Error(ErrGRPCLeaseNotFound) - ErrLeaseExist = Error(ErrGRPCLeaseExist) + ErrLeaseNotFound = Error(ErrGRPCLeaseNotFound) + ErrLeaseExist = Error(ErrGRPCLeaseExist) + ErrLeaseTTLTooLarge = Error(ErrGRPCLeaseTTLTooLarge) ErrMemberExist = Error(ErrGRPCMemberExist) ErrPeerURLExist = Error(ErrGRPCPeerURLExist) diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/metadatafields.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/metadatafields.go new file mode 100644 index 00000000..8f8ac60f --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/metadatafields.go @@ -0,0 +1,20 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rpctypes + +var ( + TokenFieldNameGRPC = "token" + TokenFieldNameSwagger = "authorization" +) diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/util.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/util.go index 5ad962a6..799c1197 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/util.go +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/util.go @@ -18,12 +18,12 @@ import ( "context" "strings" + "github.com/coreos/etcd/auth" "github.com/coreos/etcd/etcdserver" "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" "github.com/coreos/etcd/etcdserver/membership" - "github.com/coreos/etcd/internal/auth" - "github.com/coreos/etcd/internal/lease" - "github.com/coreos/etcd/internal/mvcc" + "github.com/coreos/etcd/lease" + "github.com/coreos/etcd/mvcc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -52,8 +52,9 @@ var toGRPCErrorMap = map[error]error{ etcdserver.ErrKeyNotFound: rpctypes.ErrGRPCKeyNotFound, etcdserver.ErrCorrupt: rpctypes.ErrGRPCCorrupt, - lease.ErrLeaseNotFound: rpctypes.ErrGRPCLeaseNotFound, - lease.ErrLeaseExists: rpctypes.ErrGRPCLeaseExist, + lease.ErrLeaseNotFound: rpctypes.ErrGRPCLeaseNotFound, + lease.ErrLeaseExists: rpctypes.ErrGRPCLeaseExist, + lease.ErrLeaseTTLTooLarge: rpctypes.ErrGRPCLeaseTTLTooLarge, auth.ErrRootUserNotExist: rpctypes.ErrGRPCRootUserNotExist, auth.ErrRootRoleNotExist: rpctypes.ErrGRPCRootRoleNotExist, diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/watch.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/watch.go index 65ad02d1..5a1f621c 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/watch.go +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/watch.go @@ -17,33 +17,39 @@ package v3rpc import ( "context" "io" + "math/rand" "sync" "time" + "github.com/coreos/etcd/auth" "github.com/coreos/etcd/etcdserver" "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/internal/auth" - "github.com/coreos/etcd/internal/mvcc" - "github.com/coreos/etcd/internal/mvcc/mvccpb" + "github.com/coreos/etcd/mvcc" + "github.com/coreos/etcd/mvcc/mvccpb" + + "go.uber.org/zap" ) type watchServer struct { clusterID int64 memberID int64 - raftTimer etcdserver.RaftTimer + sg etcdserver.RaftStatusGetter watchable mvcc.WatchableKV ag AuthGetter + + lg *zap.Logger } func NewWatchServer(s *etcdserver.EtcdServer) pb.WatchServer { return &watchServer{ clusterID: int64(s.Cluster().ID()), memberID: int64(s.ID()), - raftTimer: s, + sg: s, watchable: s.Watchable(), ag: s, + lg: s.Cfg.Logger, } } @@ -57,8 +63,15 @@ var ( func GetProgressReportInterval() time.Duration { progressReportIntervalMu.RLock() - defer progressReportIntervalMu.RUnlock() - return progressReportInterval + interval := progressReportInterval + progressReportIntervalMu.RUnlock() + + // add rand(1/10*progressReportInterval) as jitter so that etcdserver will not + // send progress notifications to watchers around the same time even when watchers + // are created around the same time (which is common when a client restarts itself). + jitter := time.Duration(rand.Int63n(int64(interval) / 10)) + + return interval + jitter } func SetProgressReportInterval(newTimeout time.Duration) { @@ -83,7 +96,7 @@ const ( type serverWatchStream struct { clusterID int64 memberID int64 - raftTimer etcdserver.RaftTimer + sg etcdserver.RaftStatusGetter watchable mvcc.WatchableKV @@ -106,13 +119,15 @@ type serverWatchStream struct { wg sync.WaitGroup ag AuthGetter + + lg *zap.Logger } func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) { sws := serverWatchStream{ clusterID: ws.clusterID, memberID: ws.memberID, - raftTimer: ws.raftTimer, + sg: ws.sg, watchable: ws.watchable, @@ -125,6 +140,8 @@ func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) { closec: make(chan struct{}), ag: ws.ag, + + lg: ws.lg, } sws.wg.Add(1) @@ -141,9 +158,17 @@ func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) { go func() { if rerr := sws.recvLoop(); rerr != nil { if isClientCtxErr(stream.Context().Err(), rerr) { - plog.Debugf("failed to receive watch request from gRPC stream (%q)", rerr.Error()) + if sws.lg != nil { + sws.lg.Debug("failed to receive watch request from gRPC stream", zap.Error(rerr)) + } else { + plog.Debugf("failed to receive watch request from gRPC stream (%q)", rerr.Error()) + } } else { - plog.Warningf("failed to receive watch request from gRPC stream (%q)", rerr.Error()) + if sws.lg != nil { + sws.lg.Warn("failed to receive watch request from gRPC stream", zap.Error(err)) + } else { + plog.Warningf("failed to receive watch request from gRPC stream (%q)", rerr.Error()) + } } errc <- rerr } @@ -347,9 +372,17 @@ func (sws *serverWatchStream) sendLoop() { mvcc.ReportEventReceived(len(evs)) if err := sws.gRPCStream.Send(wr); err != nil { if isClientCtxErr(sws.gRPCStream.Context().Err(), err) { - plog.Debugf("failed to send watch response to gRPC stream (%q)", err.Error()) + if sws.lg != nil { + sws.lg.Debug("failed to send watch response to gRPC stream", zap.Error(err)) + } else { + plog.Debugf("failed to send watch response to gRPC stream (%q)", err.Error()) + } } else { - plog.Warningf("failed to send watch response to gRPC stream (%q)", err.Error()) + if sws.lg != nil { + sws.lg.Warn("failed to send watch response to gRPC stream", zap.Error(err)) + } else { + plog.Warningf("failed to send watch response to gRPC stream (%q)", err.Error()) + } } return } @@ -368,9 +401,17 @@ func (sws *serverWatchStream) sendLoop() { if err := sws.gRPCStream.Send(c); err != nil { if isClientCtxErr(sws.gRPCStream.Context().Err(), err) { - plog.Debugf("failed to send watch control response to gRPC stream (%q)", err.Error()) + if sws.lg != nil { + sws.lg.Debug("failed to send watch control response to gRPC stream", zap.Error(err)) + } else { + plog.Debugf("failed to send watch control response to gRPC stream (%q)", err.Error()) + } } else { - plog.Warningf("failed to send watch control response to gRPC stream (%q)", err.Error()) + if sws.lg != nil { + sws.lg.Warn("failed to send watch control response to gRPC stream", zap.Error(err)) + } else { + plog.Warningf("failed to send watch control response to gRPC stream (%q)", err.Error()) + } } return } @@ -388,9 +429,17 @@ func (sws *serverWatchStream) sendLoop() { mvcc.ReportEventReceived(len(v.Events)) if err := sws.gRPCStream.Send(v); err != nil { if isClientCtxErr(sws.gRPCStream.Context().Err(), err) { - plog.Debugf("failed to send pending watch response to gRPC stream (%q)", err.Error()) + if sws.lg != nil { + sws.lg.Debug("failed to send pending watch response to gRPC stream", zap.Error(err)) + } else { + plog.Debugf("failed to send pending watch response to gRPC stream (%q)", err.Error()) + } } else { - plog.Warningf("failed to send pending watch response to gRPC stream (%q)", err.Error()) + if sws.lg != nil { + sws.lg.Warn("failed to send pending watch response to gRPC stream", zap.Error(err)) + } else { + plog.Warningf("failed to send pending watch response to gRPC stream (%q)", err.Error()) + } } return } @@ -423,7 +472,7 @@ func (sws *serverWatchStream) newResponseHeader(rev int64) *pb.ResponseHeader { ClusterId: uint64(sws.clusterID), MemberId: uint64(sws.memberID), Revision: rev, - RaftTerm: sws.raftTimer.Term(), + RaftTerm: sws.sg.Term(), } } diff --git a/vendor/github.com/coreos/etcd/etcdserver/apply.go b/vendor/github.com/coreos/etcd/etcdserver/apply.go index 93f5ed04..63379dec 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/apply.go +++ b/vendor/github.com/coreos/etcd/etcdserver/apply.go @@ -17,17 +17,19 @@ package etcdserver import ( "bytes" "context" + "fmt" "sort" "time" + "github.com/coreos/etcd/auth" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/internal/auth" - "github.com/coreos/etcd/internal/lease" - "github.com/coreos/etcd/internal/mvcc" - "github.com/coreos/etcd/internal/mvcc/mvccpb" + "github.com/coreos/etcd/lease" + "github.com/coreos/etcd/mvcc" + "github.com/coreos/etcd/mvcc/mvccpb" "github.com/coreos/etcd/pkg/types" "github.com/gogo/protobuf/proto" + "go.uber.org/zap" ) const ( @@ -107,6 +109,8 @@ func (s *EtcdServer) newApplierV3() applierV3 { } func (a *applierV3backend) Apply(r *pb.InternalRaftRequest) *applyResult { + defer warnOfExpensiveRequest(a.s.getLogger(), time.Now(), &pb.InternalRaftStringer{Request: r}) + ar := &applyResult{} // call into a.s.applyV3.F instead of a.F so upper appliers can check individual calls @@ -501,25 +505,39 @@ func (a *applierV3backend) applyTxn(txn mvcc.TxnWrite, rt *pb.TxnRequest, txnPat if !txnPath[0] { reqs = rt.Failure } + + lg := a.s.getLogger() for i, req := range reqs { respi := tresp.Responses[i].Response switch tv := req.Request.(type) { case *pb.RequestOp_RequestRange: resp, err := a.Range(txn, tv.RequestRange) if err != nil { - plog.Panicf("unexpected error during txn: %v", err) + if lg != nil { + lg.Panic("unexpected error during txn", zap.Error(err)) + } else { + plog.Panicf("unexpected error during txn: %v", err) + } } respi.(*pb.ResponseOp_ResponseRange).ResponseRange = resp case *pb.RequestOp_RequestPut: resp, err := a.Put(txn, tv.RequestPut) if err != nil { - plog.Panicf("unexpected error during txn: %v", err) + if lg != nil { + lg.Panic("unexpected error during txn", zap.Error(err)) + } else { + plog.Panicf("unexpected error during txn: %v", err) + } } respi.(*pb.ResponseOp_ResponsePut).ResponsePut = resp case *pb.RequestOp_RequestDeleteRange: resp, err := a.DeleteRange(txn, tv.RequestDeleteRange) if err != nil { - plog.Panicf("unexpected error during txn: %v", err) + if lg != nil { + lg.Panic("unexpected error during txn", zap.Error(err)) + } else { + plog.Panicf("unexpected error during txn: %v", err) + } } respi.(*pb.ResponseOp_ResponseDeleteRange).ResponseDeleteRange = resp case *pb.RequestOp_RequestTxn: @@ -567,6 +585,7 @@ func (a *applierV3backend) Alarm(ar *pb.AlarmRequest) (*pb.AlarmResponse, error) resp := &pb.AlarmResponse{} oldCount := len(a.s.alarmStore.Get(ar.Alarm)) + lg := a.s.getLogger() switch ar.Action { case pb.AlarmRequest_GET: resp.Alarms = a.s.alarmStore.Get(ar.Alarm) @@ -581,14 +600,22 @@ func (a *applierV3backend) Alarm(ar *pb.AlarmRequest) (*pb.AlarmResponse, error) break } - plog.Warningf("alarm %v raised by peer %s", m.Alarm, types.ID(m.MemberID)) + if lg != nil { + lg.Warn("alarm raised", zap.String("alarm", m.Alarm.String()), zap.String("from", types.ID(m.MemberID).String())) + } else { + plog.Warningf("alarm %v raised by peer %s", m.Alarm, types.ID(m.MemberID)) + } switch m.Alarm { case pb.AlarmType_CORRUPT: a.s.applyV3 = newApplierV3Corrupt(a) case pb.AlarmType_NOSPACE: a.s.applyV3 = newApplierV3Capped(a) default: - plog.Errorf("unimplemented alarm activation (%+v)", m) + if lg != nil { + lg.Warn("unimplemented alarm activation", zap.String("alarm", fmt.Sprintf("%+v", m))) + } else { + plog.Errorf("unimplemented alarm activation (%+v)", m) + } } case pb.AlarmRequest_DEACTIVATE: m := a.s.alarmStore.Deactivate(types.ID(ar.MemberID), ar.Alarm) @@ -604,10 +631,18 @@ func (a *applierV3backend) Alarm(ar *pb.AlarmRequest) (*pb.AlarmResponse, error) switch m.Alarm { case pb.AlarmType_NOSPACE, pb.AlarmType_CORRUPT: // TODO: check kv hash before deactivating CORRUPT? - plog.Infof("alarm disarmed %+v", ar) + if lg != nil { + lg.Warn("alarm disarmed", zap.String("alarm", m.Alarm.String()), zap.String("from", types.ID(m.MemberID).String())) + } else { + plog.Infof("alarm disarmed %+v", ar) + } a.s.applyV3 = a.s.newApplierV3() default: - plog.Errorf("unimplemented alarm deactivation (%+v)", m) + if lg != nil { + lg.Warn("unimplemented alarm deactivation", zap.String("alarm", fmt.Sprintf("%+v", m))) + } else { + plog.Errorf("unimplemented alarm deactivation (%+v)", m) + } } default: return nil, nil @@ -771,7 +806,7 @@ type quotaApplierV3 struct { } func newQuotaApplierV3(s *EtcdServer, app applierV3) applierV3 { - return "aApplierV3{app, NewBackendQuota(s)} + return "aApplierV3{app, NewBackendQuota(s, "v3-applier")} } func (a *quotaApplierV3) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, error) { diff --git a/vendor/github.com/coreos/etcd/etcdserver/apply_auth.go b/vendor/github.com/coreos/etcd/etcdserver/apply_auth.go index 9f3516ee..ec939143 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/apply_auth.go +++ b/vendor/github.com/coreos/etcd/etcdserver/apply_auth.go @@ -17,10 +17,10 @@ package etcdserver import ( "sync" + "github.com/coreos/etcd/auth" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/internal/auth" - "github.com/coreos/etcd/internal/lease" - "github.com/coreos/etcd/internal/mvcc" + "github.com/coreos/etcd/lease" + "github.com/coreos/etcd/mvcc" ) type authApplierV3 struct { diff --git a/vendor/github.com/coreos/etcd/etcdserver/apply_v2.go b/vendor/github.com/coreos/etcd/etcdserver/apply_v2.go index 4607d09d..1a710e56 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/apply_v2.go +++ b/vendor/github.com/coreos/etcd/etcdserver/apply_v2.go @@ -21,9 +21,11 @@ import ( "github.com/coreos/etcd/etcdserver/api" "github.com/coreos/etcd/etcdserver/membership" - "github.com/coreos/etcd/internal/store" + "github.com/coreos/etcd/etcdserver/v2store" "github.com/coreos/etcd/pkg/pbutil" + "github.com/coreos/go-semver/semver" + "go.uber.org/zap" ) // ApplierV2 is the interface for processing V2 raft messages @@ -35,12 +37,13 @@ type ApplierV2 interface { Sync(r *RequestV2) Response } -func NewApplierV2(s store.Store, c *membership.RaftCluster) ApplierV2 { +func NewApplierV2(lg *zap.Logger, s v2store.Store, c *membership.RaftCluster) ApplierV2 { return &applierV2store{store: s, cluster: c} } type applierV2store struct { - store store.Store + lg *zap.Logger + store v2store.Store cluster *membership.RaftCluster } @@ -76,7 +79,11 @@ func (a *applierV2store) Put(r *RequestV2) Response { id := membership.MustParseMemberIDFromKey(path.Dir(r.Path)) var attr membership.Attributes if err := json.Unmarshal([]byte(r.Val), &attr); err != nil { - plog.Panicf("unmarshal %s should never fail: %v", r.Val, err) + if a.lg != nil { + a.lg.Panic("failed to unmarshal", zap.String("value", r.Val), zap.Error(err)) + } else { + plog.Panicf("unmarshal %s should never fail: %v", r.Val, err) + } } if a.cluster != nil { a.cluster.UpdateAttributes(id, attr) @@ -104,9 +111,11 @@ func (a *applierV2store) Sync(r *RequestV2) Response { return Response{} } -// applyV2Request interprets r as a call to store.X and returns a Response interpreted -// from store.Event +// applyV2Request interprets r as a call to v2store.X +// and returns a Response interpreted from v2store.Event func (s *EtcdServer) applyV2Request(r *RequestV2) Response { + defer warnOfExpensiveRequest(s.getLogger(), time.Now(), r) + switch r.Method { case "POST": return s.applyV2.Post(r) @@ -124,15 +133,15 @@ func (s *EtcdServer) applyV2Request(r *RequestV2) Response { } } -func (r *RequestV2) TTLOptions() store.TTLOptionSet { +func (r *RequestV2) TTLOptions() v2store.TTLOptionSet { refresh, _ := pbutil.GetBool(r.Refresh) - ttlOptions := store.TTLOptionSet{Refresh: refresh} + ttlOptions := v2store.TTLOptionSet{Refresh: refresh} if r.Expiration != 0 { ttlOptions.ExpireTime = time.Unix(0, r.Expiration) } return ttlOptions } -func toResponse(ev *store.Event, err error) Response { +func toResponse(ev *v2store.Event, err error) Response { return Response{Event: ev, Err: err} } diff --git a/vendor/github.com/coreos/etcd/etcdserver/backend.go b/vendor/github.com/coreos/etcd/etcdserver/backend.go index f6af4cbb..916139f0 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/backend.go +++ b/vendor/github.com/coreos/etcd/etcdserver/backend.go @@ -19,16 +19,19 @@ import ( "os" "time" - "github.com/coreos/etcd/internal/lease" - "github.com/coreos/etcd/internal/mvcc" - "github.com/coreos/etcd/internal/mvcc/backend" - "github.com/coreos/etcd/internal/raftsnap" + "github.com/coreos/etcd/lease" + "github.com/coreos/etcd/mvcc" + "github.com/coreos/etcd/mvcc/backend" "github.com/coreos/etcd/raft/raftpb" + "github.com/coreos/etcd/raftsnap" + + "go.uber.org/zap" ) func newBackend(cfg ServerConfig) backend.Backend { bcfg := backend.DefaultBackendConfig() bcfg.Path = cfg.backendPath() + bcfg.Logger = cfg.Logger if cfg.QuotaBackendBytes > 0 && cfg.QuotaBackendBytes != DefaultQuotaBytes { // permit 10% excess over quota for disarm bcfg.MmapSize = uint64(cfg.QuotaBackendBytes + cfg.QuotaBackendBytes/10) @@ -40,10 +43,10 @@ func newBackend(cfg ServerConfig) backend.Backend { func openSnapshotBackend(cfg ServerConfig, ss *raftsnap.Snapshotter, snapshot raftpb.Snapshot) (backend.Backend, error) { snapPath, err := ss.DBFilePath(snapshot.Metadata.Index) if err != nil { - return nil, fmt.Errorf("database snapshot file path error: %v", err) + return nil, fmt.Errorf("failed to find database snapshot file (%v)", err) } if err := os.Rename(snapPath, cfg.backendPath()); err != nil { - return nil, fmt.Errorf("rename snapshot file error: %v", err) + return nil, fmt.Errorf("failed to rename database snapshot file (%v)", err) } return openBackend(cfg), nil } @@ -51,17 +54,32 @@ func openSnapshotBackend(cfg ServerConfig, ss *raftsnap.Snapshotter, snapshot ra // openBackend returns a backend using the current etcd db. func openBackend(cfg ServerConfig) backend.Backend { fn := cfg.backendPath() - beOpened := make(chan backend.Backend) + + now, beOpened := time.Now(), make(chan backend.Backend) go func() { beOpened <- newBackend(cfg) }() + select { case be := <-beOpened: + if cfg.Logger != nil { + cfg.Logger.Info("opened backend db", zap.String("path", fn), zap.Duration("took", time.Since(now))) + } return be + case <-time.After(10 * time.Second): - plog.Warningf("another etcd process is using %q and holds the file lock, or loading backend file is taking >10 seconds", fn) - plog.Warningf("waiting for it to exit before starting...") + if cfg.Logger != nil { + cfg.Logger.Info( + "db file is flocked by another process, or taking too long", + zap.String("path", fn), + zap.Duration("took", time.Since(now)), + ) + } else { + plog.Warningf("another etcd process is using %q and holds the file lock, or loading backend file is taking >10 seconds", fn) + plog.Warningf("waiting for it to exit before starting...") + } } + return <-beOpened } @@ -71,11 +89,11 @@ func openBackend(cfg ServerConfig) backend.Backend { // case, replace the db with the snapshot db sent by the leader. func recoverSnapshotBackend(cfg ServerConfig, oldbe backend.Backend, snapshot raftpb.Snapshot) (backend.Backend, error) { var cIndex consistentIndex - kv := mvcc.New(oldbe, &lease.FakeLessor{}, &cIndex) + kv := mvcc.New(cfg.Logger, oldbe, &lease.FakeLessor{}, &cIndex) defer kv.Close() if snapshot.Metadata.Index <= kv.ConsistentIndex() { return oldbe, nil } oldbe.Close() - return openSnapshotBackend(cfg, raftsnap.New(cfg.SnapDir()), snapshot) + return openSnapshotBackend(cfg, raftsnap.New(cfg.Logger, cfg.SnapDir()), snapshot) } diff --git a/vendor/github.com/coreos/etcd/etcdserver/cluster_util.go b/vendor/github.com/coreos/etcd/etcdserver/cluster_util.go index bcb9f426..d639ab08 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/cluster_util.go +++ b/vendor/github.com/coreos/etcd/etcdserver/cluster_util.go @@ -23,16 +23,17 @@ import ( "time" "github.com/coreos/etcd/etcdserver/membership" - "github.com/coreos/etcd/internal/version" "github.com/coreos/etcd/pkg/types" + "github.com/coreos/etcd/version" "github.com/coreos/go-semver/semver" + "go.uber.org/zap" ) // isMemberBootstrapped tries to check if the given member has been bootstrapped // in the given cluster. -func isMemberBootstrapped(cl *membership.RaftCluster, member string, rt http.RoundTripper, timeout time.Duration) bool { - rcl, err := getClusterFromRemotePeers(getRemotePeerURLs(cl, member), timeout, false, rt) +func isMemberBootstrapped(lg *zap.Logger, cl *membership.RaftCluster, member string, rt http.RoundTripper, timeout time.Duration) bool { + rcl, err := getClusterFromRemotePeers(lg, getRemotePeerURLs(cl, member), timeout, false, rt) if err != nil { return false } @@ -54,21 +55,26 @@ func isMemberBootstrapped(cl *membership.RaftCluster, member string, rt http.Rou // response, an error is returned. // Each request has a 10-second timeout. Because the upper limit of TTL is 5s, // 10 second is enough for building connection and finishing request. -func GetClusterFromRemotePeers(urls []string, rt http.RoundTripper) (*membership.RaftCluster, error) { - return getClusterFromRemotePeers(urls, 10*time.Second, true, rt) +func GetClusterFromRemotePeers(lg *zap.Logger, urls []string, rt http.RoundTripper) (*membership.RaftCluster, error) { + return getClusterFromRemotePeers(lg, urls, 10*time.Second, true, rt) } // If logerr is true, it prints out more error messages. -func getClusterFromRemotePeers(urls []string, timeout time.Duration, logerr bool, rt http.RoundTripper) (*membership.RaftCluster, error) { +func getClusterFromRemotePeers(lg *zap.Logger, urls []string, timeout time.Duration, logerr bool, rt http.RoundTripper) (*membership.RaftCluster, error) { cc := &http.Client{ Transport: rt, Timeout: timeout, } for _, u := range urls { - resp, err := cc.Get(u + "/members") + addr := u + "/members" + resp, err := cc.Get(addr) if err != nil { if logerr { - plog.Warningf("could not get cluster response from %s: %v", u, err) + if lg != nil { + lg.Warn("failed to get cluster response", zap.String("address", addr), zap.Error(err)) + } else { + plog.Warningf("could not get cluster response from %s: %v", u, err) + } } continue } @@ -76,21 +82,38 @@ func getClusterFromRemotePeers(urls []string, timeout time.Duration, logerr bool resp.Body.Close() if err != nil { if logerr { - plog.Warningf("could not read the body of cluster response: %v", err) + if lg != nil { + lg.Warn("failed to read body of cluster response", zap.String("address", addr), zap.Error(err)) + } else { + plog.Warningf("could not read the body of cluster response: %v", err) + } } continue } var membs []*membership.Member if err = json.Unmarshal(b, &membs); err != nil { if logerr { - plog.Warningf("could not unmarshal cluster response: %v", err) + if lg != nil { + lg.Warn("failed to unmarshal cluster response", zap.String("address", addr), zap.Error(err)) + } else { + plog.Warningf("could not unmarshal cluster response: %v", err) + } } continue } id, err := types.IDFromString(resp.Header.Get("X-Etcd-Cluster-ID")) if err != nil { if logerr { - plog.Warningf("could not parse the cluster ID from cluster res: %v", err) + if lg != nil { + lg.Warn( + "failed to parse cluster ID", + zap.String("address", addr), + zap.String("header", resp.Header.Get("X-Etcd-Cluster-ID")), + zap.Error(err), + ) + } else { + plog.Warningf("could not parse the cluster ID from cluster res: %v", err) + } } continue } @@ -100,12 +123,11 @@ func getClusterFromRemotePeers(urls []string, timeout time.Duration, logerr bool // if membership members are not present then the raft cluster formed will be // an invalid empty cluster hence return failed to get raft cluster member(s) from the given urls error if len(membs) > 0 { - return membership.NewClusterFromMembers("", id, membs), nil + return membership.NewClusterFromMembers(lg, "", id, membs), nil } - - return nil, fmt.Errorf("failed to get raft cluster member(s) from the given urls.") + return nil, fmt.Errorf("failed to get raft cluster member(s) from the given URLs") } - return nil, fmt.Errorf("could not retrieve cluster information from the given urls") + return nil, fmt.Errorf("could not retrieve cluster information from the given URLs") } // getRemotePeerURLs returns peer urls of remote members in the cluster. The @@ -126,7 +148,7 @@ func getRemotePeerURLs(cl *membership.RaftCluster, local string) []string { // The key of the returned map is the member's ID. The value of the returned map // is the semver versions string, including server and cluster. // If it fails to get the version of a member, the key will be nil. -func getVersions(cl *membership.RaftCluster, local types.ID, rt http.RoundTripper) map[string]*version.Versions { +func getVersions(lg *zap.Logger, cl *membership.RaftCluster, local types.ID, rt http.RoundTripper) map[string]*version.Versions { members := cl.Members() vers := make(map[string]*version.Versions) for _, m := range members { @@ -138,9 +160,13 @@ func getVersions(cl *membership.RaftCluster, local types.ID, rt http.RoundTrippe vers[m.ID.String()] = &version.Versions{Server: version.Version, Cluster: cv} continue } - ver, err := getVersion(m, rt) + ver, err := getVersion(lg, m, rt) if err != nil { - plog.Warningf("cannot get the version of member %s (%v)", m.ID, err) + if lg != nil { + lg.Warn("failed to get version", zap.String("remote-peer-id", m.ID.String()), zap.Error(err)) + } else { + plog.Warningf("cannot get the version of member %s (%v)", m.ID, err) + } vers[m.ID.String()] = nil } else { vers[m.ID.String()] = ver @@ -152,7 +178,7 @@ func getVersions(cl *membership.RaftCluster, local types.ID, rt http.RoundTrippe // decideClusterVersion decides the cluster version based on the versions map. // The returned version is the min server version in the map, or nil if the min // version in unknown. -func decideClusterVersion(vers map[string]*version.Versions) *semver.Version { +func decideClusterVersion(lg *zap.Logger, vers map[string]*version.Versions) *semver.Version { var cv *semver.Version lv := semver.Must(semver.NewVersion(version.Version)) @@ -162,12 +188,30 @@ func decideClusterVersion(vers map[string]*version.Versions) *semver.Version { } v, err := semver.NewVersion(ver.Server) if err != nil { - plog.Errorf("cannot understand the version of member %s (%v)", mid, err) + if lg != nil { + lg.Warn( + "failed to parse server version of remote member", + zap.String("remote-peer-id", mid), + zap.String("remote-peer-version", ver.Server), + zap.Error(err), + ) + } else { + plog.Errorf("cannot understand the version of member %s (%v)", mid, err) + } return nil } if lv.LessThan(*v) { - plog.Warningf("the local etcd version %s is not up-to-date", lv.String()) - plog.Warningf("member %s has a higher version %s", mid, ver.Server) + if lg != nil { + lg.Warn( + "local etcd version is not up-to-date", + zap.String("local-member-version", lv.String()), + zap.String("remote-peer-id", mid), + zap.String("remote-peer-version", ver.Server), + ) + } else { + plog.Warningf("the local etcd version %s is not up-to-date", lv.String()) + plog.Warningf("member %s has a higher version %s", mid, ver.Server) + } } if cv == nil { cv = v @@ -184,19 +228,18 @@ func decideClusterVersion(vers map[string]*version.Versions) *semver.Version { // cluster version in the range of [MinClusterVersion, Version] and no known members has a cluster version // out of the range. // We set this rule since when the local member joins, another member might be offline. -func isCompatibleWithCluster(cl *membership.RaftCluster, local types.ID, rt http.RoundTripper) bool { - vers := getVersions(cl, local, rt) +func isCompatibleWithCluster(lg *zap.Logger, cl *membership.RaftCluster, local types.ID, rt http.RoundTripper) bool { + vers := getVersions(lg, cl, local, rt) minV := semver.Must(semver.NewVersion(version.MinClusterVersion)) maxV := semver.Must(semver.NewVersion(version.Version)) maxV = &semver.Version{ Major: maxV.Major, Minor: maxV.Minor, } - - return isCompatibleWithVers(vers, local, minV, maxV) + return isCompatibleWithVers(lg, vers, local, minV, maxV) } -func isCompatibleWithVers(vers map[string]*version.Versions, local types.ID, minV, maxV *semver.Version) bool { +func isCompatibleWithVers(lg *zap.Logger, vers map[string]*version.Versions, local types.ID, minV, maxV *semver.Version) bool { var ok bool for id, v := range vers { // ignore comparison with local version @@ -208,15 +251,42 @@ func isCompatibleWithVers(vers map[string]*version.Versions, local types.ID, min } clusterv, err := semver.NewVersion(v.Cluster) if err != nil { - plog.Errorf("cannot understand the cluster version of member %s (%v)", id, err) + if lg != nil { + lg.Warn( + "failed to parse cluster version of remote member", + zap.String("remote-peer-id", id), + zap.String("remote-peer-cluster-version", v.Cluster), + zap.Error(err), + ) + } else { + plog.Errorf("cannot understand the cluster version of member %s (%v)", id, err) + } continue } if clusterv.LessThan(*minV) { - plog.Warningf("the running cluster version(%v) is lower than the minimal cluster version(%v) supported", clusterv.String(), minV.String()) + if lg != nil { + lg.Warn( + "cluster version of remote member is not compatible; too low", + zap.String("remote-peer-id", id), + zap.String("remote-peer-cluster-version", clusterv.String()), + zap.String("minimum-cluster-version-supported", minV.String()), + ) + } else { + plog.Warningf("the running cluster version(%v) is lower than the minimal cluster version(%v) supported", clusterv.String(), minV.String()) + } return false } if maxV.LessThan(*clusterv) { - plog.Warningf("the running cluster version(%v) is higher than the maximum cluster version(%v) supported", clusterv.String(), maxV.String()) + if lg != nil { + lg.Warn( + "cluster version of remote member is not compatible; too high", + zap.String("remote-peer-id", id), + zap.String("remote-peer-cluster-version", clusterv.String()), + zap.String("minimum-cluster-version-supported", minV.String()), + ) + } else { + plog.Warningf("the running cluster version(%v) is higher than the maximum cluster version(%v) supported", clusterv.String(), maxV.String()) + } return false } ok = true @@ -226,7 +296,7 @@ func isCompatibleWithVers(vers map[string]*version.Versions, local types.ID, min // getVersion returns the Versions of the given member via its // peerURLs. Returns the last error if it fails to get the version. -func getVersion(m *membership.Member, rt http.RoundTripper) (*version.Versions, error) { +func getVersion(lg *zap.Logger, m *membership.Member, rt http.RoundTripper) (*version.Versions, error) { cc := &http.Client{ Transport: rt, } @@ -236,21 +306,49 @@ func getVersion(m *membership.Member, rt http.RoundTripper) (*version.Versions, ) for _, u := range m.PeerURLs { - resp, err = cc.Get(u + "/version") + addr := u + "/version" + resp, err = cc.Get(addr) if err != nil { - plog.Warningf("failed to reach the peerURL(%s) of member %s (%v)", u, m.ID, err) + if lg != nil { + lg.Warn( + "failed to reach the peer URL", + zap.String("address", addr), + zap.String("remote-peer-id", m.ID.String()), + zap.Error(err), + ) + } else { + plog.Warningf("failed to reach the peerURL(%s) of member %s (%v)", u, m.ID, err) + } continue } var b []byte b, err = ioutil.ReadAll(resp.Body) resp.Body.Close() if err != nil { - plog.Warningf("failed to read out the response body from the peerURL(%s) of member %s (%v)", u, m.ID, err) + if lg != nil { + lg.Warn( + "failed to read body of response", + zap.String("address", addr), + zap.String("remote-peer-id", m.ID.String()), + zap.Error(err), + ) + } else { + plog.Warningf("failed to read out the response body from the peerURL(%s) of member %s (%v)", u, m.ID, err) + } continue } var vers version.Versions if err = json.Unmarshal(b, &vers); err != nil { - plog.Warningf("failed to unmarshal the response body got from the peerURL(%s) of member %s (%v)", u, m.ID, err) + if lg != nil { + lg.Warn( + "failed to unmarshal response", + zap.String("address", addr), + zap.String("remote-peer-id", m.ID.String()), + zap.Error(err), + ) + } else { + plog.Warningf("failed to unmarshal the response body got from the peerURL(%s) of member %s (%v)", u, m.ID, err) + } continue } return &vers, nil diff --git a/vendor/github.com/coreos/etcd/etcdserver/config.go b/vendor/github.com/coreos/etcd/etcdserver/config.go index 056af745..767b6c3a 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/config.go +++ b/vendor/github.com/coreos/etcd/etcdserver/config.go @@ -25,6 +25,9 @@ import ( "github.com/coreos/etcd/pkg/netutil" "github.com/coreos/etcd/pkg/transport" "github.com/coreos/etcd/pkg/types" + + "go.uber.org/zap" + "go.uber.org/zap/zapcore" ) // ServerConfig holds the configuration of etcd as taken from the command line or discovery. @@ -44,11 +47,47 @@ type ServerConfig struct { InitialPeerURLsMap types.URLsMap InitialClusterToken string NewCluster bool - ForceNewCluster bool PeerTLSInfo transport.TLSInfo - TickMs uint - ElectionTicks int + CORS map[string]struct{} + + // HostWhitelist lists acceptable hostnames from client requests. + // If server is insecure (no TLS), server only accepts requests + // whose Host header value exists in this white list. + HostWhitelist map[string]struct{} + + TickMs uint + ElectionTicks int + + // InitialElectionTickAdvance is true, then local member fast-forwards + // election ticks to speed up "initial" leader election trigger. This + // benefits the case of larger election ticks. For instance, cross + // datacenter deployment may require longer election timeout of 10-second. + // If true, local node does not need wait up to 10-second. Instead, + // forwards its election ticks to 8-second, and have only 2-second left + // before leader election. + // + // Major assumptions are that: + // - cluster has no active leader thus advancing ticks enables faster + // leader election, or + // - cluster already has an established leader, and rejoining follower + // is likely to receive heartbeats from the leader after tick advance + // and before election timeout. + // + // However, when network from leader to rejoining follower is congested, + // and the follower does not receive leader heartbeat within left election + // ticks, disruptive election has to happen thus affecting cluster + // availabilities. + // + // Disabling this would slow down initial bootstrap process for cross + // datacenter deployments. Make your own tradeoffs by configuring + // --initial-election-tick-advance at the cost of slow initial bootstrap. + // + // If single-node, it advances ticks regardless. + // + // See https://github.com/coreos/etcd/issues/9333 for more detail. + InitialElectionTickAdvance bool + BootstrapTimeout time.Duration AutoCompactionRetention time.Duration @@ -64,14 +103,32 @@ type ServerConfig struct { // ClientCertAuthEnabled is true when cert has been signed by the client CA. ClientCertAuthEnabled bool - AuthToken string + AuthToken string + BcryptCost uint // InitialCorruptCheck is true to check data corruption on boot // before serving any peer/client traffic. InitialCorruptCheck bool CorruptCheckTime time.Duration + // PreVote is true to enable Raft Pre-Vote. + PreVote bool + + // Logger logs server-side operations. + // If not nil, it disables "capnslog" and uses the given logger. + Logger *zap.Logger + + // LoggerConfig is server logger configuration for Raft logger. + // Must be either: "LoggerConfig != nil" or "LoggerCore != nil && LoggerWriteSyncer != nil". + LoggerConfig *zap.Config + // LoggerCore is "zapcore.Core" for raft logger. + // Must be either: "LoggerConfig != nil" or "LoggerCore != nil && LoggerWriteSyncer != nil". + LoggerCore zapcore.Core + LoggerWriteSyncer zapcore.WriteSyncer + Debug bool + + ForceNewCluster bool } // VerifyBootstrap sanity-checks the initial config for bootstrap case @@ -124,7 +181,7 @@ func (c *ServerConfig) advertiseMatchesCluster() error { sort.Strings(apurls) ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second) defer cancel() - ok, err := netutil.URLStringsEqual(ctx, apurls, urls.StringSlice()) + ok, err := netutil.URLStringsEqual(ctx, c.Logger, apurls, urls.StringSlice()) if ok { return nil } @@ -203,28 +260,64 @@ func (c *ServerConfig) PrintWithInitial() { c.print(true) } func (c *ServerConfig) Print() { c.print(false) } func (c *ServerConfig) print(initial bool) { - plog.Infof("name = %s", c.Name) - if c.ForceNewCluster { - plog.Infof("force new cluster") - } - plog.Infof("data dir = %s", c.DataDir) - plog.Infof("member dir = %s", c.MemberDir()) - if c.DedicatedWALDir != "" { - plog.Infof("dedicated WAL dir = %s", c.DedicatedWALDir) - } - plog.Infof("heartbeat = %dms", c.TickMs) - plog.Infof("election = %dms", c.ElectionTicks*int(c.TickMs)) - plog.Infof("snapshot count = %d", c.SnapCount) - if len(c.DiscoveryURL) != 0 { - plog.Infof("discovery URL= %s", c.DiscoveryURL) - if len(c.DiscoveryProxy) != 0 { - plog.Infof("discovery proxy = %s", c.DiscoveryProxy) + // TODO: remove this after dropping "capnslog" + if c.Logger == nil { + plog.Infof("name = %s", c.Name) + if c.ForceNewCluster { + plog.Infof("force new cluster") } - } - plog.Infof("advertise client URLs = %s", c.ClientURLs) - if initial { - plog.Infof("initial advertise peer URLs = %s", c.PeerURLs) - plog.Infof("initial cluster = %s", c.InitialPeerURLsMap) + plog.Infof("data dir = %s", c.DataDir) + plog.Infof("member dir = %s", c.MemberDir()) + if c.DedicatedWALDir != "" { + plog.Infof("dedicated WAL dir = %s", c.DedicatedWALDir) + } + plog.Infof("heartbeat = %dms", c.TickMs) + plog.Infof("election = %dms", c.ElectionTicks*int(c.TickMs)) + plog.Infof("snapshot count = %d", c.SnapCount) + if len(c.DiscoveryURL) != 0 { + plog.Infof("discovery URL= %s", c.DiscoveryURL) + if len(c.DiscoveryProxy) != 0 { + plog.Infof("discovery proxy = %s", c.DiscoveryProxy) + } + } + plog.Infof("advertise client URLs = %s", c.ClientURLs) + if initial { + plog.Infof("initial advertise peer URLs = %s", c.PeerURLs) + plog.Infof("initial cluster = %s", c.InitialPeerURLsMap) + } + } else { + state := "new" + if !c.NewCluster { + state = "existing" + } + c.Logger.Info( + "server configuration", + zap.String("name", c.Name), + zap.String("data-dir", c.DataDir), + zap.String("member-dir", c.MemberDir()), + zap.String("dedicated-wal-dir", c.DedicatedWALDir), + zap.Bool("force-new-cluster", c.ForceNewCluster), + zap.Uint("heartbeat-tick-ms", c.TickMs), + zap.String("heartbeat-interval", fmt.Sprintf("%v", time.Duration(c.TickMs)*time.Millisecond)), + zap.Int("election-tick-ms", c.ElectionTicks), + zap.String("election-timeout", fmt.Sprintf("%v", time.Duration(c.ElectionTicks*int(c.TickMs))*time.Millisecond)), + zap.Bool("initial-election-tick-advance", c.InitialElectionTickAdvance), + zap.Uint64("snapshot-count", c.SnapCount), + zap.Strings("advertise-client-urls", c.getACURLs()), + zap.Strings("initial-advertise-peer-urls", c.getAPURLs()), + zap.Bool("initial", initial), + zap.String("initial-cluster", c.InitialPeerURLsMap.String()), + zap.String("initial-cluster-state", state), + zap.String("initial-cluster-token", c.InitialClusterToken), + zap.Bool("pre-vote", c.PreVote), + zap.Bool("initial-corrupt-check", c.InitialCorruptCheck), + zap.String("corrupt-check-time-interval", c.CorruptCheckTime.String()), + zap.String("auto-compaction-mode", c.AutoCompactionMode), + zap.Duration("auto-compaction-retention", c.AutoCompactionRetention), + zap.String("auto-compaction-interval", c.AutoCompactionRetention.String()), + zap.String("discovery-url", c.DiscoveryURL), + zap.String("discovery-proxy", c.DiscoveryProxy), + ) } } @@ -250,3 +343,19 @@ func (c *ServerConfig) bootstrapTimeout() time.Duration { } func (c *ServerConfig) backendPath() string { return filepath.Join(c.SnapDir(), "db") } + +func (c *ServerConfig) getAPURLs() (ss []string) { + ss = make([]string, len(c.PeerURLs)) + for i := range c.PeerURLs { + ss[i] = c.PeerURLs[i].String() + } + return ss +} + +func (c *ServerConfig) getACURLs() (ss []string) { + ss = make([]string, len(c.ClientURLs)) + for i := range c.ClientURLs { + ss[i] = c.ClientURLs[i].String() + } + return ss +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/corrupt.go b/vendor/github.com/coreos/etcd/etcdserver/corrupt.go index acd57a65..d4cd9f2e 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/corrupt.go +++ b/vendor/github.com/coreos/etcd/etcdserver/corrupt.go @@ -22,8 +22,10 @@ import ( "github.com/coreos/etcd/clientv3" "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/internal/mvcc" + "github.com/coreos/etcd/mvcc" "github.com/coreos/etcd/pkg/types" + + "go.uber.org/zap" ) // CheckInitialHashKV compares initial hash values with its peers @@ -34,7 +36,18 @@ func (s *EtcdServer) CheckInitialHashKV() error { return nil } - plog.Infof("%s starting initial corruption check with timeout %v...", s.ID(), s.Cfg.ReqTimeout()) + lg := s.getLogger() + + if lg != nil { + lg.Info( + "starting initial corruption check", + zap.String("local-member-id", s.ID().String()), + zap.Duration("timeout", s.Cfg.ReqTimeout()), + ) + } else { + plog.Infof("%s starting initial corruption check with timeout %v...", s.ID(), s.Cfg.ReqTimeout()) + } + h, rev, crev, err := s.kv.HashByRev(0) if err != nil { return fmt.Errorf("%s failed to fetch hash (%v)", s.ID(), err) @@ -44,22 +57,70 @@ func (s *EtcdServer) CheckInitialHashKV() error { for _, p := range peers { if p.resp != nil { peerID := types.ID(p.resp.Header.MemberId) + fields := []zap.Field{ + zap.String("local-member-id", s.ID().String()), + zap.Int64("local-member-revision", rev), + zap.Int64("local-member-compact-revision", crev), + zap.Uint32("local-member-hash", h), + zap.String("remote-peer-id", peerID.String()), + zap.Strings("remote-peer-endpoints", p.eps), + zap.Int64("remote-peer-revision", p.resp.Header.Revision), + zap.Int64("remote-peer-compact-revision", p.resp.CompactRevision), + zap.Uint32("remote-peer-hash", p.resp.Hash), + } + if h != p.resp.Hash { if crev == p.resp.CompactRevision { - plog.Errorf("%s's hash %d != %s's hash %d (revision %d, peer revision %d, compact revision %d)", s.ID(), h, peerID, p.resp.Hash, rev, p.resp.Header.Revision, crev) + if lg != nil { + lg.Warn("found different hash values from remote peer", fields...) + } else { + plog.Errorf("%s's hash %d != %s's hash %d (revision %d, peer revision %d, compact revision %d)", s.ID(), h, peerID, p.resp.Hash, rev, p.resp.Header.Revision, crev) + } mismatch++ } else { - plog.Warningf("%s cannot check hash of peer(%s): peer has a different compact revision %d (revision:%d)", s.ID(), peerID, p.resp.CompactRevision, rev) + if lg != nil { + lg.Warn("found different compact revision values from remote peer", fields...) + } else { + plog.Warningf("%s cannot check hash of peer(%s): peer has a different compact revision %d (revision:%d)", s.ID(), peerID, p.resp.CompactRevision, rev) + } } } + continue } + if p.err != nil { switch p.err { case rpctypes.ErrFutureRev: - plog.Warningf("%s cannot check the hash of peer(%q) at revision %d: peer is lagging behind(%q)", s.ID(), p.eps, rev, p.err.Error()) + if lg != nil { + lg.Warn( + "cannot fetch hash from slow remote peer", + zap.String("local-member-id", s.ID().String()), + zap.Int64("local-member-revision", rev), + zap.Int64("local-member-compact-revision", crev), + zap.Uint32("local-member-hash", h), + zap.String("remote-peer-id", p.id.String()), + zap.Strings("remote-peer-endpoints", p.eps), + zap.Error(err), + ) + } else { + plog.Warningf("%s cannot check the hash of peer(%q) at revision %d: peer is lagging behind(%q)", s.ID(), p.eps, rev, p.err.Error()) + } case rpctypes.ErrCompacted: - plog.Warningf("%s cannot check the hash of peer(%q) at revision %d: local node is lagging behind(%q)", s.ID(), p.eps, rev, p.err.Error()) + if lg != nil { + lg.Warn( + "cannot fetch hash from remote peer; local member is behind", + zap.String("local-member-id", s.ID().String()), + zap.Int64("local-member-revision", rev), + zap.Int64("local-member-compact-revision", crev), + zap.Uint32("local-member-hash", h), + zap.String("remote-peer-id", p.id.String()), + zap.Strings("remote-peer-endpoints", p.eps), + zap.Error(err), + ) + } else { + plog.Warningf("%s cannot check the hash of peer(%q) at revision %d: local node is lagging behind(%q)", s.ID(), p.eps, rev, p.err.Error()) + } } } } @@ -67,7 +128,14 @@ func (s *EtcdServer) CheckInitialHashKV() error { return fmt.Errorf("%s found data inconsistency with peers", s.ID()) } - plog.Infof("%s succeeded on initial corruption checking: no corruption", s.ID()) + if lg != nil { + lg.Info( + "initial corruption checking passed; no corruption", + zap.String("local-member-id", s.ID().String()), + ) + } else { + plog.Infof("%s succeeded on initial corruption checking: no corruption", s.ID()) + } return nil } @@ -76,7 +144,18 @@ func (s *EtcdServer) monitorKVHash() { if t == 0 { return } - plog.Infof("enabled corruption checking with %s interval", t) + + lg := s.getLogger() + if lg != nil { + lg.Info( + "enabled corruption checking", + zap.String("local-member-id", s.ID().String()), + zap.Duration("interval", t), + ) + } else { + plog.Infof("enabled corruption checking with %s interval", t) + } + for { select { case <-s.stopping: @@ -87,15 +166,21 @@ func (s *EtcdServer) monitorKVHash() { continue } if err := s.checkHashKV(); err != nil { - plog.Debugf("check hash kv failed %v", err) + if lg != nil { + lg.Warn("failed to check hash KV", zap.Error(err)) + } else { + plog.Debugf("check hash kv failed %v", err) + } } } } func (s *EtcdServer) checkHashKV() error { + lg := s.getLogger() + h, rev, crev, err := s.kv.HashByRev(0) if err != nil { - plog.Fatalf("failed to hash kv store (%v)", err) + return err } peers := s.getPeerHashKVs(rev) @@ -108,7 +193,6 @@ func (s *EtcdServer) checkHashKV() error { h2, rev2, crev2, err := s.kv.HashByRev(0) if err != nil { - plog.Warningf("failed to hash kv store (%v)", err) return err } @@ -119,7 +203,7 @@ func (s *EtcdServer) checkHashKV() error { } alarmed = true a := &pb.AlarmRequest{ - MemberID: uint64(id), + MemberID: id, Action: pb.AlarmRequest_ACTIVATE, Alarm: pb.AlarmType_CORRUPT, } @@ -129,7 +213,19 @@ func (s *EtcdServer) checkHashKV() error { } if h2 != h && rev2 == rev && crev == crev2 { - plog.Warningf("mismatched hashes %d and %d for revision %d", h, h2, rev) + if lg != nil { + lg.Warn( + "found hash mismatch", + zap.Int64("revision-1", rev), + zap.Int64("compact-revision-1", crev), + zap.Uint32("hash-1", h), + zap.Int64("revision-2", rev2), + zap.Int64("compact-revision-2", crev2), + zap.Uint32("hash-2", h2), + ) + } else { + plog.Warningf("mismatched hashes %d and %d for revision %d", h, h2, rev) + } mismatch(uint64(s.ID())) } @@ -141,34 +237,63 @@ func (s *EtcdServer) checkHashKV() error { // leader expects follower's latest revision less than or equal to leader's if p.resp.Header.Revision > rev2 { - plog.Warningf( - "revision %d from member %v, expected at most %d", - p.resp.Header.Revision, - types.ID(id), - rev2) + if lg != nil { + lg.Warn( + "revision from follower must be less than or equal to leader's", + zap.Int64("leader-revision", rev2), + zap.Int64("follower-revision", p.resp.Header.Revision), + zap.String("follower-peer-id", types.ID(id).String()), + ) + } else { + plog.Warningf( + "revision %d from member %v, expected at most %d", + p.resp.Header.Revision, + types.ID(id), + rev2) + } mismatch(id) } // leader expects follower's latest compact revision less than or equal to leader's if p.resp.CompactRevision > crev2 { - plog.Warningf( - "compact revision %d from member %v, expected at most %d", - p.resp.CompactRevision, - types.ID(id), - crev2, - ) + if lg != nil { + lg.Warn( + "compact revision from follower must be less than or equal to leader's", + zap.Int64("leader-compact-revision", crev2), + zap.Int64("follower-compact-revision", p.resp.CompactRevision), + zap.String("follower-peer-id", types.ID(id).String()), + ) + } else { + plog.Warningf( + "compact revision %d from member %v, expected at most %d", + p.resp.CompactRevision, + types.ID(id), + crev2, + ) + } mismatch(id) } // follower's compact revision is leader's old one, then hashes must match if p.resp.CompactRevision == crev && p.resp.Hash != h { - plog.Warningf( - "hash %d at revision %d from member %v, expected hash %d", - p.resp.Hash, - rev, - types.ID(id), - h, - ) + if lg != nil { + lg.Warn( + "same compact revision then hashes must match", + zap.Int64("leader-compact-revision", crev2), + zap.Uint32("leader-hash", h), + zap.Int64("follower-compact-revision", p.resp.CompactRevision), + zap.Uint32("follower-hash", p.resp.Hash), + zap.String("follower-peer-id", types.ID(id).String()), + ) + } else { + plog.Warningf( + "hash %d at revision %d from member %v, expected hash %d", + p.resp.Hash, + rev, + types.ID(id), + h, + ) + } mismatch(id) } } @@ -176,33 +301,47 @@ func (s *EtcdServer) checkHashKV() error { } type peerHashKVResp struct { + id types.ID + eps []string + resp *clientv3.HashKVResponse err error - eps []string } func (s *EtcdServer) getPeerHashKVs(rev int64) (resps []*peerHashKVResp) { // TODO: handle the case when "s.cluster.Members" have not // been populated (e.g. no snapshot to load from disk) mbs := s.cluster.Members() - pURLs := make([][]string, len(mbs)) + pss := make([]peerHashKVResp, len(mbs)) for _, m := range mbs { if m.ID == s.ID() { continue } - pURLs = append(pURLs, m.PeerURLs) + pss = append(pss, peerHashKVResp{id: m.ID, eps: m.PeerURLs}) } - for _, purls := range pURLs { - if len(purls) == 0 { + lg := s.getLogger() + + for _, p := range pss { + if len(p.eps) == 0 { continue } cli, cerr := clientv3.New(clientv3.Config{ DialTimeout: s.Cfg.ReqTimeout(), - Endpoints: purls, + Endpoints: p.eps, }) if cerr != nil { - plog.Warningf("%s failed to create client to peer %q for hash checking (%q)", s.ID(), purls, cerr.Error()) + if lg != nil { + lg.Warn( + "failed to create client to peer URL", + zap.String("local-member-id", s.ID().String()), + zap.String("remote-peer-id", p.id.String()), + zap.Strings("remote-peer-endpoints", p.eps), + zap.Error(cerr), + ) + } else { + plog.Warningf("%s failed to create client to peer %q for hash checking (%q)", s.ID(), p.eps, cerr.Error()) + } continue } @@ -213,15 +352,25 @@ func (s *EtcdServer) getPeerHashKVs(rev int64) (resps []*peerHashKVResp) { resp, cerr = cli.HashKV(ctx, c, rev) cancel() if cerr == nil { - resps = append(resps, &peerHashKVResp{resp: resp}) + resps = append(resps, &peerHashKVResp{id: p.id, eps: p.eps, resp: resp, err: nil}) break } - plog.Warningf("%s hash-kv error %q on peer %q with revision %d", s.ID(), cerr.Error(), c, rev) + if lg != nil { + lg.Warn( + "failed hash kv request", + zap.String("local-member-id", s.ID().String()), + zap.Int64("requested-revision", rev), + zap.String("remote-peer-endpoint", c), + zap.Error(cerr), + ) + } else { + plog.Warningf("%s hash-kv error %q on peer %q with revision %d", s.ID(), cerr.Error(), c, rev) + } } cli.Close() if respsLen == len(resps) { - resps = append(resps, &peerHashKVResp{err: cerr, eps: purls}) + resps = append(resps, &peerHashKVResp{id: p.id, eps: p.eps, resp: nil, err: cerr}) } } return resps diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal_stringer.go b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal_stringer.go new file mode 100644 index 00000000..8e1231c2 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal_stringer.go @@ -0,0 +1,58 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdserverpb + +import "fmt" + +// InternalRaftStringer implements custom proto Stringer: +// redact password, shorten output(TODO). +type InternalRaftStringer struct { + Request *InternalRaftRequest +} + +func (as *InternalRaftStringer) String() string { + switch { + case as.Request.LeaseGrant != nil: + return fmt.Sprintf("header:<%s> lease_grant:", + as.Request.Header.String(), + as.Request.LeaseGrant.TTL, + as.Request.LeaseGrant.ID, + ) + case as.Request.LeaseRevoke != nil: + return fmt.Sprintf("header:<%s> lease_revoke:", + as.Request.Header.String(), + as.Request.LeaseRevoke.ID, + ) + case as.Request.Authenticate != nil: + return fmt.Sprintf("header:<%s> authenticate:", + as.Request.Header.String(), + as.Request.Authenticate.Name, + as.Request.Authenticate.SimpleToken, + ) + case as.Request.AuthUserAdd != nil: + return fmt.Sprintf("header:<%s> auth_user_add:", + as.Request.Header.String(), + as.Request.AuthUserAdd.Name, + ) + case as.Request.AuthUserChangePassword != nil: + return fmt.Sprintf("header:<%s> auth_user_change_password:", + as.Request.Header.String(), + as.Request.AuthUserChangePassword.Name, + ) + default: + // nothing to redact + } + return as.Request.String() +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go index 399a23a4..83808e05 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go +++ b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go @@ -12,9 +12,9 @@ import ( _ "github.com/gogo/protobuf/gogoproto" - mvccpb "github.com/coreos/etcd/internal/mvcc/mvccpb" + mvccpb "github.com/coreos/etcd/mvcc/mvccpb" - authpb "github.com/coreos/etcd/internal/auth/authpb" + authpb "github.com/coreos/etcd/auth/authpb" context "golang.org/x/net/context" @@ -296,7 +296,7 @@ type RangeRequest struct { // greater mod revisions will be filtered away. MaxModRevision int64 `protobuf:"varint,11,opt,name=max_mod_revision,json=maxModRevision,proto3" json:"max_mod_revision,omitempty"` // min_create_revision is the lower bound for returned key create revisions; all keys with - // lesser create trevisions will be filtered away. + // lesser create revisions will be filtered away. MinCreateRevision int64 `protobuf:"varint,12,opt,name=min_create_revision,json=minCreateRevision,proto3" json:"min_create_revision,omitempty"` // max_create_revision is the upper bound for returned key create revisions; all keys with // greater create revisions will be filtered away. @@ -2436,11 +2436,11 @@ type StatusResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // version is the cluster protocol version used by the responding member. Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` - // dbSize is the size of the backend database, in bytes, of the responding member. + // dbSize is the size of the backend database physically allocated, in bytes, of the responding member. DbSize int64 `protobuf:"varint,3,opt,name=dbSize,proto3" json:"dbSize,omitempty"` // leader is the member ID which the responding member believes is the current leader. Leader uint64 `protobuf:"varint,4,opt,name=leader,proto3" json:"leader,omitempty"` - // raftIndex is the current raft index of the responding member. + // raftIndex is the current raft committed index of the responding member. RaftIndex uint64 `protobuf:"varint,5,opt,name=raftIndex,proto3" json:"raftIndex,omitempty"` // raftTerm is the current raft term of the responding member. RaftTerm uint64 `protobuf:"varint,6,opt,name=raftTerm,proto3" json:"raftTerm,omitempty"` @@ -2448,6 +2448,8 @@ type StatusResponse struct { RaftAppliedIndex uint64 `protobuf:"varint,7,opt,name=raftAppliedIndex,proto3" json:"raftAppliedIndex,omitempty"` // errors contains alarm/health information and status. Errors []string `protobuf:"bytes,8,rep,name=errors" json:"errors,omitempty"` + // dbSizeInUse is the size of the backend database logically in use, in bytes, of the responding member. + DbSizeInUse int64 `protobuf:"varint,9,opt,name=dbSizeInUse,proto3" json:"dbSizeInUse,omitempty"` } func (m *StatusResponse) Reset() { *m = StatusResponse{} } @@ -2511,6 +2513,13 @@ func (m *StatusResponse) GetErrors() []string { return nil } +func (m *StatusResponse) GetDbSizeInUse() int64 { + if m != nil { + return m.DbSizeInUse + } + return 0 +} + type AuthEnableRequest struct { } @@ -2781,8 +2790,8 @@ func (m *AuthRoleGrantPermissionRequest) GetPerm() *authpb.Permission { type AuthRoleRevokePermissionRequest struct { Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` - Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` - RangeEnd string `protobuf:"bytes,3,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` + Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` + RangeEnd []byte `protobuf:"bytes,3,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` } func (m *AuthRoleRevokePermissionRequest) Reset() { *m = AuthRoleRevokePermissionRequest{} } @@ -2799,18 +2808,18 @@ func (m *AuthRoleRevokePermissionRequest) GetRole() string { return "" } -func (m *AuthRoleRevokePermissionRequest) GetKey() string { +func (m *AuthRoleRevokePermissionRequest) GetKey() []byte { if m != nil { return m.Key } - return "" + return nil } -func (m *AuthRoleRevokePermissionRequest) GetRangeEnd() string { +func (m *AuthRoleRevokePermissionRequest) GetRangeEnd() []byte { if m != nil { return m.RangeEnd } - return "" + return nil } type AuthEnableResponse struct { @@ -3974,11 +3983,15 @@ type MaintenanceClient interface { Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) // Defragment defragments a member's backend database to recover storage space. Defragment(ctx context.Context, in *DefragmentRequest, opts ...grpc.CallOption) (*DefragmentResponse, error) - // Hash computes the hash of the KV's backend. - // This is designed for testing; do not use this in production when there - // are ongoing transactions. + // Hash computes the hash of whole backend keyspace, + // including key, lease, and other buckets in storage. + // This is designed for testing ONLY! + // Do not rely on this in production with ongoing transactions, + // since Hash operation does not hold MVCC locks. + // Use "HashKV" API instead for "key" bucket consistency checks. Hash(ctx context.Context, in *HashRequest, opts ...grpc.CallOption) (*HashResponse, error) // HashKV computes the hash of all MVCC keys up to a given revision. + // It only iterates "key" bucket in backend storage. HashKV(ctx context.Context, in *HashKVRequest, opts ...grpc.CallOption) (*HashKVResponse, error) // Snapshot sends a snapshot of the entire backend from a member over a stream to a client. Snapshot(ctx context.Context, in *SnapshotRequest, opts ...grpc.CallOption) (Maintenance_SnapshotClient, error) @@ -4089,11 +4102,15 @@ type MaintenanceServer interface { Status(context.Context, *StatusRequest) (*StatusResponse, error) // Defragment defragments a member's backend database to recover storage space. Defragment(context.Context, *DefragmentRequest) (*DefragmentResponse, error) - // Hash computes the hash of the KV's backend. - // This is designed for testing; do not use this in production when there - // are ongoing transactions. + // Hash computes the hash of whole backend keyspace, + // including key, lease, and other buckets in storage. + // This is designed for testing ONLY! + // Do not rely on this in production with ongoing transactions, + // since Hash operation does not hold MVCC locks. + // Use "HashKV" API instead for "key" bucket consistency checks. Hash(context.Context, *HashRequest) (*HashResponse, error) // HashKV computes the hash of all MVCC keys up to a given revision. + // It only iterates "key" bucket in backend storage. HashKV(context.Context, *HashKVRequest) (*HashKVResponse, error) // Snapshot sends a snapshot of the entire backend from a member over a stream to a client. Snapshot(*SnapshotRequest, Maintenance_SnapshotServer) error @@ -7034,6 +7051,11 @@ func (m *StatusResponse) MarshalTo(dAtA []byte) (int, error) { i += copy(dAtA[i:], s) } } + if m.DbSizeInUse != 0 { + dAtA[i] = 0x48 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.DbSizeInUse)) + } return i, nil } @@ -8906,6 +8928,9 @@ func (m *StatusResponse) Size() (n int) { n += 1 + l + sovRpc(uint64(l)) } } + if m.DbSizeInUse != 0 { + n += 1 + sovRpc(uint64(m.DbSizeInUse)) + } return n } @@ -15578,6 +15603,25 @@ func (m *StatusResponse) Unmarshal(dAtA []byte) error { } m.Errors = append(m.Errors, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DbSizeInUse", wireType) + } + m.DbSizeInUse = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DbSizeInUse |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipRpc(dAtA[iNdEx:]) @@ -16908,7 +16952,7 @@ func (m *AuthRoleRevokePermissionRequest) Unmarshal(dAtA []byte) error { if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) } - var stringLen uint64 + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRpc @@ -16918,26 +16962,28 @@ func (m *AuthRoleRevokePermissionRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if byteLen < 0 { return ErrInvalidLengthRpc } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Key = string(dAtA[iNdEx:postIndex]) + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType) } - var stringLen uint64 + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRpc @@ -16947,20 +16993,22 @@ func (m *AuthRoleRevokePermissionRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if byteLen < 0 { return ErrInvalidLengthRpc } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } - m.RangeEnd = string(dAtA[iNdEx:postIndex]) + m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...) + if m.RangeEnd == nil { + m.RangeEnd = []byte{} + } iNdEx = postIndex default: iNdEx = preIndex @@ -18566,237 +18614,239 @@ var ( func init() { proto.RegisterFile("rpc.proto", fileDescriptorRpc) } var fileDescriptorRpc = []byte{ - // 3708 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x5b, 0x5b, 0x6f, 0x1b, 0x49, - 0x76, 0x56, 0x93, 0xe2, 0xed, 0xf0, 0x22, 0xba, 0x24, 0xdb, 0x34, 0x6d, 0xcb, 0x72, 0xf9, 0x26, - 0x5f, 0x46, 0xdc, 0xd5, 0x6e, 0xf2, 0xe0, 0x04, 0x8b, 0x95, 0x25, 0xae, 0xa5, 0x95, 0x2c, 0x69, - 0x5b, 0x94, 0x67, 0x02, 0x6c, 0x22, 0xb4, 0xc8, 0x92, 0xd4, 0x11, 0xd9, 0xcd, 0x74, 0x37, 0x69, - 0xc9, 0x59, 0x24, 0xc0, 0x66, 0x13, 0xe4, 0x25, 0x79, 0xc8, 0x02, 0x41, 0x92, 0xd7, 0x20, 0x58, - 0xec, 0x0f, 0x18, 0xe4, 0x2f, 0xe4, 0x2d, 0x01, 0xf2, 0x07, 0x82, 0x49, 0x5e, 0xf2, 0x0b, 0x72, - 0x79, 0x5a, 0xd4, 0xad, 0xbb, 0xfa, 0x46, 0x69, 0x86, 0x33, 0xf3, 0x22, 0x77, 0x9d, 0x3e, 0x75, - 0xce, 0xa9, 0x53, 0x75, 0x2e, 0xf5, 0x35, 0x0d, 0x25, 0x67, 0xd8, 0x5d, 0x19, 0x3a, 0xb6, 0x67, - 0xa3, 0x0a, 0xf1, 0xba, 0x3d, 0x97, 0x38, 0x63, 0xe2, 0x0c, 0x8f, 0x9b, 0x0b, 0xa7, 0xf6, 0xa9, - 0xcd, 0x5e, 0xb4, 0xe8, 0x13, 0xe7, 0x69, 0x62, 0xca, 0xd3, 0x32, 0x2d, 0x8f, 0x38, 0x96, 0xd1, - 0x6f, 0x0d, 0xc6, 0xdd, 0x2e, 0xfb, 0x33, 0x3c, 0x6e, 0x9d, 0x8f, 0x05, 0xcf, 0xe3, 0x30, 0x8f, - 0x31, 0xf2, 0xce, 0xd8, 0x9f, 0xe1, 0x31, 0xfb, 0x47, 0x70, 0xdd, 0x3b, 0xb5, 0xed, 0xd3, 0x3e, - 0x69, 0x19, 0x43, 0xb3, 0x65, 0x58, 0x96, 0xed, 0x19, 0x9e, 0x69, 0x5b, 0x2e, 0x7f, 0x8b, 0xff, - 0x5c, 0x83, 0x9a, 0x4e, 0xdc, 0xa1, 0x6d, 0xb9, 0x64, 0x93, 0x18, 0x3d, 0xe2, 0xa0, 0xfb, 0x00, - 0xdd, 0xfe, 0xc8, 0xf5, 0x88, 0x73, 0x64, 0xf6, 0x1a, 0xda, 0x92, 0xb6, 0x3c, 0xab, 0x97, 0x04, - 0x65, 0xab, 0x87, 0xee, 0x42, 0x69, 0x40, 0x06, 0xc7, 0xfc, 0x6d, 0x86, 0xbd, 0x2d, 0x72, 0xc2, - 0x56, 0x0f, 0x35, 0xa1, 0xe8, 0x90, 0xb1, 0xe9, 0x9a, 0xb6, 0xd5, 0xc8, 0x2e, 0x69, 0xcb, 0x59, - 0xdd, 0x1f, 0xd3, 0x89, 0x8e, 0x71, 0xe2, 0x1d, 0x79, 0xc4, 0x19, 0x34, 0x66, 0xf9, 0x44, 0x4a, - 0xe8, 0x10, 0x67, 0x80, 0x7f, 0x91, 0x83, 0x8a, 0x6e, 0x58, 0xa7, 0x44, 0x27, 0x7f, 0x34, 0x22, - 0xae, 0x87, 0xea, 0x90, 0x3d, 0x27, 0x97, 0x4c, 0x7d, 0x45, 0xa7, 0x8f, 0x7c, 0xbe, 0x75, 0x4a, - 0x8e, 0x88, 0xc5, 0x15, 0x57, 0xe8, 0x7c, 0xeb, 0x94, 0xb4, 0xad, 0x1e, 0x5a, 0x80, 0x5c, 0xdf, - 0x1c, 0x98, 0x9e, 0xd0, 0xca, 0x07, 0x21, 0x73, 0x66, 0x23, 0xe6, 0xac, 0x03, 0xb8, 0xb6, 0xe3, - 0x1d, 0xd9, 0x4e, 0x8f, 0x38, 0x8d, 0xdc, 0x92, 0xb6, 0x5c, 0x5b, 0x7d, 0xbc, 0xa2, 0x6e, 0xcd, - 0x8a, 0x6a, 0xd0, 0xca, 0x81, 0xed, 0x78, 0x7b, 0x94, 0x57, 0x2f, 0xb9, 0xf2, 0x11, 0xfd, 0x08, - 0xca, 0x4c, 0x88, 0x67, 0x38, 0xa7, 0xc4, 0x6b, 0xe4, 0x99, 0x94, 0x27, 0x57, 0x48, 0xe9, 0x30, - 0x66, 0x9d, 0xa9, 0xe7, 0xcf, 0x08, 0x43, 0xc5, 0x25, 0x8e, 0x69, 0xf4, 0xcd, 0x8f, 0xc6, 0x71, - 0x9f, 0x34, 0x0a, 0x4b, 0xda, 0x72, 0x51, 0x0f, 0xd1, 0xe8, 0xfa, 0xcf, 0xc9, 0xa5, 0x7b, 0x64, - 0x5b, 0xfd, 0xcb, 0x46, 0x91, 0x31, 0x14, 0x29, 0x61, 0xcf, 0xea, 0x5f, 0xb2, 0x4d, 0xb3, 0x47, - 0x96, 0xc7, 0xdf, 0x96, 0xd8, 0xdb, 0x12, 0xa3, 0xb0, 0xd7, 0xcb, 0x50, 0x1f, 0x98, 0xd6, 0xd1, - 0xc0, 0xee, 0x1d, 0xf9, 0x0e, 0x01, 0xe6, 0x90, 0xda, 0xc0, 0xb4, 0xde, 0xd9, 0x3d, 0x5d, 0xba, - 0x85, 0x72, 0x1a, 0x17, 0x61, 0xce, 0xb2, 0xe0, 0x34, 0x2e, 0x54, 0xce, 0x15, 0x98, 0xa7, 0x32, - 0xbb, 0x0e, 0x31, 0x3c, 0x12, 0x30, 0x57, 0x18, 0xf3, 0x8d, 0x81, 0x69, 0xad, 0xb3, 0x37, 0x21, - 0x7e, 0xe3, 0x22, 0xc6, 0x5f, 0x15, 0xfc, 0xc6, 0x45, 0x98, 0x1f, 0xaf, 0x40, 0xc9, 0xf7, 0x39, - 0x2a, 0xc2, 0xec, 0xee, 0xde, 0x6e, 0xbb, 0x3e, 0x83, 0x00, 0xf2, 0x6b, 0x07, 0xeb, 0xed, 0xdd, - 0x8d, 0xba, 0x86, 0xca, 0x50, 0xd8, 0x68, 0xf3, 0x41, 0x06, 0xbf, 0x01, 0x08, 0xbc, 0x8b, 0x0a, - 0x90, 0xdd, 0x6e, 0xff, 0x5e, 0x7d, 0x86, 0xf2, 0xbc, 0x6f, 0xeb, 0x07, 0x5b, 0x7b, 0xbb, 0x75, - 0x8d, 0x4e, 0x5e, 0xd7, 0xdb, 0x6b, 0x9d, 0x76, 0x3d, 0x43, 0x39, 0xde, 0xed, 0x6d, 0xd4, 0xb3, - 0xa8, 0x04, 0xb9, 0xf7, 0x6b, 0x3b, 0x87, 0xed, 0xfa, 0x2c, 0xfe, 0xa5, 0x06, 0x55, 0xb1, 0x5f, - 0x3c, 0x26, 0xd0, 0xf7, 0x21, 0x7f, 0xc6, 0xe2, 0x82, 0x1d, 0xc5, 0xf2, 0xea, 0xbd, 0xc8, 0xe6, - 0x86, 0x62, 0x47, 0x17, 0xbc, 0x08, 0x43, 0xf6, 0x7c, 0xec, 0x36, 0x32, 0x4b, 0xd9, 0xe5, 0xf2, - 0x6a, 0x7d, 0x85, 0x47, 0xee, 0xca, 0x36, 0xb9, 0x7c, 0x6f, 0xf4, 0x47, 0x44, 0xa7, 0x2f, 0x11, - 0x82, 0xd9, 0x81, 0xed, 0x10, 0x76, 0x62, 0x8b, 0x3a, 0x7b, 0xa6, 0xc7, 0x98, 0x6d, 0x9a, 0x38, - 0xad, 0x7c, 0x80, 0x7f, 0xad, 0x01, 0xec, 0x8f, 0xbc, 0xf4, 0xd0, 0x58, 0x80, 0xdc, 0x98, 0x0a, - 0x16, 0x61, 0xc1, 0x07, 0x2c, 0x26, 0x88, 0xe1, 0x12, 0x3f, 0x26, 0xe8, 0x00, 0xdd, 0x86, 0xc2, - 0xd0, 0x21, 0xe3, 0xa3, 0xf3, 0x31, 0x53, 0x52, 0xd4, 0xf3, 0x74, 0xb8, 0x3d, 0x46, 0x0f, 0xa1, - 0x62, 0x9e, 0x5a, 0xb6, 0x43, 0x8e, 0xb8, 0xac, 0x1c, 0x7b, 0x5b, 0xe6, 0x34, 0x66, 0xb7, 0xc2, - 0xc2, 0x05, 0xe7, 0x55, 0x96, 0x1d, 0x4a, 0xc2, 0x16, 0x94, 0x99, 0xa9, 0x53, 0xb9, 0xef, 0x79, - 0x60, 0x63, 0x86, 0x4d, 0x8b, 0xbb, 0x50, 0x58, 0x8d, 0x7f, 0x0a, 0x68, 0x83, 0xf4, 0x89, 0x47, - 0xa6, 0xc9, 0x1e, 0x8a, 0x4f, 0xb2, 0xaa, 0x4f, 0xf0, 0xdf, 0x68, 0x30, 0x1f, 0x12, 0x3f, 0xd5, - 0xb2, 0x1a, 0x50, 0xe8, 0x31, 0x61, 0xdc, 0x82, 0xac, 0x2e, 0x87, 0xe8, 0x25, 0x14, 0x85, 0x01, - 0x6e, 0x23, 0x9b, 0x72, 0x68, 0x0a, 0xdc, 0x26, 0x17, 0xff, 0x3a, 0x03, 0x25, 0xb1, 0xd0, 0xbd, - 0x21, 0x5a, 0x83, 0xaa, 0xc3, 0x07, 0x47, 0x6c, 0x3d, 0xc2, 0xa2, 0x66, 0x7a, 0x12, 0xda, 0x9c, - 0xd1, 0x2b, 0x62, 0x0a, 0x23, 0xa3, 0xdf, 0x81, 0xb2, 0x14, 0x31, 0x1c, 0x79, 0xc2, 0xe5, 0x8d, - 0xb0, 0x80, 0xe0, 0xfc, 0x6d, 0xce, 0xe8, 0x20, 0xd8, 0xf7, 0x47, 0x1e, 0xea, 0xc0, 0x82, 0x9c, - 0xcc, 0x57, 0x23, 0xcc, 0xc8, 0x32, 0x29, 0x4b, 0x61, 0x29, 0xf1, 0xad, 0xda, 0x9c, 0xd1, 0x91, - 0x98, 0xaf, 0xbc, 0x54, 0x4d, 0xf2, 0x2e, 0x78, 0xf2, 0x8e, 0x99, 0xd4, 0xb9, 0xb0, 0xe2, 0x26, - 0x75, 0x2e, 0xac, 0x37, 0x25, 0x28, 0x88, 0x11, 0xfe, 0xe7, 0x0c, 0x80, 0xdc, 0x8d, 0xbd, 0x21, - 0xda, 0x80, 0x9a, 0x23, 0x46, 0x21, 0x6f, 0xdd, 0x4d, 0xf4, 0x96, 0xd8, 0xc4, 0x19, 0xbd, 0x2a, - 0x27, 0x71, 0xe3, 0x7e, 0x00, 0x15, 0x5f, 0x4a, 0xe0, 0xb0, 0x3b, 0x09, 0x0e, 0xf3, 0x25, 0x94, - 0xe5, 0x04, 0xea, 0xb2, 0x4f, 0xe1, 0xa6, 0x3f, 0x3f, 0xc1, 0x67, 0x0f, 0x27, 0xf8, 0xcc, 0x17, - 0x38, 0x2f, 0x25, 0xa8, 0x5e, 0x53, 0x0d, 0x0b, 0xdc, 0x76, 0x27, 0xc1, 0x6d, 0x71, 0xc3, 0xa8, - 0xe3, 0x80, 0xd6, 0x4b, 0x3e, 0xc4, 0xff, 0x9d, 0x85, 0xc2, 0xba, 0x3d, 0x18, 0x1a, 0x0e, 0xdd, - 0x8d, 0xbc, 0x43, 0xdc, 0x51, 0xdf, 0x63, 0xee, 0xaa, 0xad, 0x3e, 0x0a, 0x4b, 0x14, 0x6c, 0xf2, - 0x5f, 0x9d, 0xb1, 0xea, 0x62, 0x0a, 0x9d, 0x2c, 0xca, 0x63, 0xe6, 0x1a, 0x93, 0x45, 0x71, 0x14, - 0x53, 0x64, 0x20, 0x67, 0x83, 0x40, 0x6e, 0x42, 0x61, 0x4c, 0x9c, 0xa0, 0xa4, 0x6f, 0xce, 0xe8, - 0x92, 0x80, 0x9e, 0xc3, 0x5c, 0xb4, 0xbc, 0xe4, 0x04, 0x4f, 0xad, 0x1b, 0xae, 0x46, 0x8f, 0xa0, - 0x12, 0xaa, 0x71, 0x79, 0xc1, 0x57, 0x1e, 0x28, 0x25, 0xee, 0x96, 0xcc, 0xab, 0xb4, 0x1e, 0x57, - 0x36, 0x67, 0x64, 0x66, 0xbd, 0x25, 0x33, 0x6b, 0x51, 0xcc, 0x12, 0xb9, 0x35, 0x94, 0x64, 0x7e, - 0x18, 0x4e, 0x32, 0xf8, 0x87, 0x50, 0x0d, 0x39, 0x88, 0xd6, 0x9d, 0xf6, 0x4f, 0x0e, 0xd7, 0x76, - 0x78, 0x91, 0x7a, 0xcb, 0xea, 0x92, 0x5e, 0xd7, 0x68, 0xad, 0xdb, 0x69, 0x1f, 0x1c, 0xd4, 0x33, - 0xa8, 0x0a, 0xa5, 0xdd, 0xbd, 0xce, 0x11, 0xe7, 0xca, 0xe2, 0xb7, 0xbe, 0x04, 0x51, 0xe4, 0x94, - 0xda, 0x36, 0xa3, 0xd4, 0x36, 0x4d, 0xd6, 0xb6, 0x4c, 0x50, 0xdb, 0x58, 0x99, 0xdb, 0x69, 0xaf, - 0x1d, 0xb4, 0xeb, 0xb3, 0x6f, 0x6a, 0x50, 0xe1, 0xfe, 0x3d, 0x1a, 0x59, 0xb4, 0xd4, 0xfe, 0xa3, - 0x06, 0x10, 0x44, 0x13, 0x6a, 0x41, 0xa1, 0xcb, 0xf5, 0x34, 0x34, 0x96, 0x8c, 0x6e, 0x26, 0x6e, - 0x99, 0x2e, 0xb9, 0xd0, 0x77, 0xa1, 0xe0, 0x8e, 0xba, 0x5d, 0xe2, 0xca, 0x92, 0x77, 0x3b, 0x9a, - 0x0f, 0x45, 0xb6, 0xd2, 0x25, 0x1f, 0x9d, 0x72, 0x62, 0x98, 0xfd, 0x11, 0x2b, 0x80, 0x93, 0xa7, - 0x08, 0x3e, 0xfc, 0xf7, 0x1a, 0x94, 0x95, 0xc3, 0xfb, 0x15, 0x93, 0xf0, 0x3d, 0x28, 0x31, 0x1b, - 0x48, 0x4f, 0xa4, 0xe1, 0xa2, 0x1e, 0x10, 0xd0, 0x6f, 0x43, 0x49, 0x46, 0x80, 0xcc, 0xc4, 0x8d, - 0x64, 0xb1, 0x7b, 0x43, 0x3d, 0x60, 0xc5, 0xdb, 0x70, 0x83, 0x79, 0xa5, 0x4b, 0x9b, 0x6b, 0xe9, - 0x47, 0xb5, 0xfd, 0xd4, 0x22, 0xed, 0x67, 0x13, 0x8a, 0xc3, 0xb3, 0x4b, 0xd7, 0xec, 0x1a, 0x7d, - 0x61, 0x85, 0x3f, 0xc6, 0x3f, 0x06, 0xa4, 0x0a, 0x9b, 0x66, 0xb9, 0xb8, 0x0a, 0xe5, 0x4d, 0xc3, - 0x3d, 0x13, 0x26, 0xe1, 0x97, 0x50, 0xa5, 0xc3, 0xed, 0xf7, 0xd7, 0xb0, 0x91, 0x5d, 0x0e, 0x24, - 0xf7, 0x54, 0x3e, 0x47, 0x30, 0x7b, 0x66, 0xb8, 0x67, 0x6c, 0xa1, 0x55, 0x9d, 0x3d, 0xa3, 0xe7, - 0x50, 0xef, 0xf2, 0x45, 0x1e, 0x45, 0xae, 0x0c, 0x73, 0x82, 0xee, 0x77, 0x82, 0x9f, 0x41, 0x85, - 0xaf, 0xe1, 0xeb, 0x36, 0x02, 0xdf, 0x80, 0xb9, 0x03, 0xcb, 0x18, 0xba, 0x67, 0xb6, 0xac, 0x6e, - 0x74, 0xd1, 0xf5, 0x80, 0x36, 0x95, 0xc6, 0x67, 0x30, 0xe7, 0x90, 0x81, 0x61, 0x5a, 0xa6, 0x75, - 0x7a, 0x74, 0x7c, 0xe9, 0x11, 0x57, 0x5c, 0x98, 0x6a, 0x3e, 0xf9, 0x0d, 0xa5, 0x52, 0xd3, 0x8e, - 0xfb, 0xf6, 0xb1, 0x48, 0x73, 0xec, 0x19, 0x7f, 0xae, 0x41, 0xe5, 0x53, 0xc3, 0xeb, 0xca, 0xad, - 0x43, 0x5b, 0x50, 0xf3, 0x93, 0x1b, 0xa3, 0x08, 0x5b, 0x22, 0x25, 0x96, 0xcd, 0x91, 0xad, 0xb4, - 0xac, 0x8e, 0xd5, 0xae, 0x4a, 0x60, 0xa2, 0x0c, 0xab, 0x4b, 0xfa, 0xbe, 0xa8, 0x4c, 0xba, 0x28, - 0xc6, 0xa8, 0x8a, 0x52, 0x09, 0x6f, 0xe6, 0x82, 0xf6, 0x83, 0xe7, 0x92, 0xcf, 0x33, 0x80, 0xe2, - 0x36, 0x7c, 0xd9, 0x8e, 0xec, 0x09, 0xd4, 0x5c, 0xcf, 0x70, 0x62, 0x67, 0xa3, 0xca, 0xa8, 0x7e, - 0x82, 0x7e, 0x06, 0x73, 0x43, 0xc7, 0x3e, 0x75, 0x88, 0xeb, 0x1e, 0x59, 0xb6, 0x67, 0x9e, 0x5c, - 0x8a, 0xa6, 0xb6, 0x26, 0xc9, 0xbb, 0x8c, 0x8a, 0xda, 0x50, 0x38, 0x31, 0xfb, 0x1e, 0x71, 0xdc, - 0x46, 0x6e, 0x29, 0xbb, 0x5c, 0x5b, 0x7d, 0x79, 0x95, 0xd7, 0x56, 0x7e, 0xc4, 0xf8, 0x3b, 0x97, - 0x43, 0xa2, 0xcb, 0xb9, 0x6a, 0xa3, 0x98, 0x0f, 0x35, 0xcf, 0x77, 0xa0, 0xf8, 0x81, 0x8a, 0xa0, - 0x97, 0xe2, 0x02, 0xef, 0xed, 0xd8, 0x78, 0xab, 0x87, 0x9f, 0x00, 0x04, 0xa2, 0x68, 0x16, 0xde, - 0xdd, 0xdb, 0x3f, 0xec, 0xd4, 0x67, 0x50, 0x05, 0x8a, 0xbb, 0x7b, 0x1b, 0xed, 0x9d, 0x36, 0x4d, - 0xd9, 0xb8, 0x25, 0xdd, 0xa6, 0xba, 0x37, 0x24, 0x57, 0x0b, 0xcb, 0xfd, 0xab, 0x0c, 0x54, 0xc5, - 0x01, 0x99, 0xea, 0x94, 0xaa, 0x2a, 0x32, 0x21, 0x15, 0xb4, 0x61, 0xe5, 0x07, 0xa7, 0x27, 0xfa, - 0x62, 0x39, 0xa4, 0x69, 0x83, 0x9f, 0x03, 0xd2, 0x13, 0x1e, 0xf7, 0xc7, 0x89, 0x91, 0x9d, 0x4b, - 0x8c, 0x6c, 0xf4, 0x08, 0xaa, 0xfe, 0x41, 0x34, 0x5c, 0x51, 0x86, 0x4b, 0x7a, 0x45, 0x9e, 0x31, - 0x4a, 0x43, 0x4f, 0x20, 0x4f, 0xc6, 0xc4, 0xf2, 0xdc, 0x46, 0x99, 0x25, 0xe4, 0xaa, 0x6c, 0x8d, - 0xdb, 0x94, 0xaa, 0x8b, 0x97, 0xf8, 0xb7, 0xe0, 0x06, 0xbb, 0x82, 0xbc, 0x75, 0x0c, 0x4b, 0xbd, - 0x2b, 0x75, 0x3a, 0x3b, 0xc2, 0x75, 0xf4, 0x11, 0xd5, 0x20, 0xb3, 0xb5, 0x21, 0x16, 0x9a, 0xd9, - 0xda, 0xc0, 0x3f, 0xd7, 0x00, 0xa9, 0xf3, 0xa6, 0xf2, 0x65, 0x44, 0xb8, 0x54, 0x9f, 0x0d, 0xd4, - 0x2f, 0x40, 0x8e, 0x38, 0x8e, 0xed, 0x30, 0xaf, 0x95, 0x74, 0x3e, 0xc0, 0x8f, 0x85, 0x0d, 0x3a, - 0x19, 0xdb, 0xe7, 0x7e, 0xcc, 0x70, 0x69, 0x9a, 0x6f, 0xea, 0x36, 0xcc, 0x87, 0xb8, 0xa6, 0x2a, - 0x0c, 0xcf, 0xe0, 0x26, 0x13, 0xb6, 0x4d, 0xc8, 0x70, 0xad, 0x6f, 0x8e, 0x53, 0xb5, 0x0e, 0xe1, - 0x56, 0x94, 0xf1, 0x9b, 0xf5, 0x11, 0xfe, 0x5d, 0xa1, 0xb1, 0x63, 0x0e, 0x48, 0xc7, 0xde, 0x49, - 0xb7, 0x8d, 0x26, 0xce, 0x73, 0x72, 0xe9, 0x8a, 0x0a, 0xca, 0x9e, 0xf1, 0x3f, 0x69, 0x70, 0x3b, - 0x36, 0xfd, 0x1b, 0xde, 0xd5, 0x45, 0x80, 0x53, 0x7a, 0x7c, 0x48, 0x8f, 0xbe, 0xe0, 0x97, 0x77, - 0x85, 0xe2, 0xdb, 0x49, 0x73, 0x4f, 0x45, 0xd8, 0xb9, 0x20, 0xf6, 0x9c, 0xfd, 0x71, 0x65, 0xf9, - 0xb9, 0x0f, 0x65, 0x46, 0x38, 0xf0, 0x0c, 0x6f, 0xe4, 0xc6, 0x36, 0xe3, 0x4f, 0xc4, 0x11, 0x90, - 0x93, 0xa6, 0x5a, 0xd7, 0x77, 0x21, 0xcf, 0xfa, 0x56, 0xd9, 0xb5, 0x45, 0x2e, 0x0a, 0x8a, 0x1d, - 0xba, 0x60, 0xc4, 0x67, 0x90, 0x7f, 0xc7, 0xc0, 0x3e, 0xc5, 0xb2, 0x59, 0xb9, 0x15, 0x96, 0x31, - 0xe0, 0x10, 0x44, 0x49, 0x67, 0xcf, 0xac, 0xc9, 0x21, 0xc4, 0x39, 0xd4, 0x77, 0x78, 0x33, 0x55, - 0xd2, 0xfd, 0x31, 0x75, 0x59, 0xb7, 0x6f, 0x12, 0xcb, 0x63, 0x6f, 0x67, 0xd9, 0x5b, 0x85, 0x82, - 0x57, 0xa0, 0xce, 0x35, 0xad, 0xf5, 0x7a, 0x4a, 0xb3, 0xe2, 0xcb, 0xd3, 0xc2, 0xf2, 0xf0, 0xaf, - 0x34, 0xb8, 0xa1, 0x4c, 0x98, 0xca, 0x31, 0xaf, 0x20, 0xcf, 0x21, 0x4d, 0x51, 0x17, 0x17, 0xc2, - 0xb3, 0xb8, 0x1a, 0x5d, 0xf0, 0xa0, 0x15, 0x28, 0xf0, 0x27, 0xd9, 0x31, 0x26, 0xb3, 0x4b, 0x26, - 0xfc, 0x04, 0xe6, 0x05, 0x89, 0x0c, 0xec, 0xa4, 0xb3, 0xcd, 0x1c, 0x8a, 0x7f, 0x06, 0x0b, 0x61, - 0xb6, 0xa9, 0x96, 0xa4, 0x18, 0x99, 0xb9, 0x8e, 0x91, 0x6b, 0xd2, 0xc8, 0xc3, 0x61, 0x4f, 0x29, - 0xe3, 0xd1, 0x5d, 0x57, 0x77, 0x24, 0x13, 0xd9, 0x11, 0x7f, 0x01, 0x52, 0xc4, 0xb7, 0xba, 0x80, - 0x79, 0x79, 0x1c, 0x76, 0x4c, 0xd7, 0x6f, 0xee, 0x3e, 0x02, 0x52, 0x89, 0xdf, 0xb6, 0x41, 0x1b, - 0xe4, 0xc4, 0x31, 0x4e, 0x07, 0xc4, 0xaf, 0x4f, 0xb4, 0xd5, 0x57, 0x89, 0x53, 0x65, 0xf4, 0x16, - 0xdc, 0x78, 0x67, 0x8f, 0x69, 0x6a, 0xa0, 0xd4, 0x20, 0x64, 0xf8, 0x55, 0xcf, 0xdf, 0x36, 0x7f, - 0x4c, 0x95, 0xab, 0x13, 0xa6, 0x52, 0xfe, 0xaf, 0x1a, 0x54, 0xd6, 0xfa, 0x86, 0x33, 0x90, 0x8a, - 0x7f, 0x00, 0x79, 0x7e, 0x81, 0x11, 0x98, 0xc1, 0xd3, 0xb0, 0x18, 0x95, 0x97, 0x0f, 0xd6, 0xf8, - 0x75, 0x47, 0xcc, 0xa2, 0x86, 0x8b, 0xcf, 0x0a, 0x1b, 0x91, 0xcf, 0x0c, 0x1b, 0xe8, 0x13, 0xc8, - 0x19, 0x74, 0x0a, 0x4b, 0xc1, 0xb5, 0xe8, 0xd5, 0x91, 0x49, 0x63, 0x7d, 0x1b, 0xe7, 0xc2, 0xdf, - 0x87, 0xb2, 0xa2, 0x81, 0x5e, 0x8e, 0xdf, 0xb6, 0x45, 0x03, 0xb6, 0xb6, 0xde, 0xd9, 0x7a, 0xcf, - 0xef, 0xcc, 0x35, 0x80, 0x8d, 0xb6, 0x3f, 0xce, 0xe0, 0xcf, 0xc4, 0x2c, 0x91, 0xef, 0x54, 0x7b, - 0xb4, 0x34, 0x7b, 0x32, 0xd7, 0xb2, 0xe7, 0x02, 0xaa, 0x62, 0xf9, 0xd3, 0xa6, 0x6f, 0x26, 0x2f, - 0x25, 0x7d, 0x2b, 0xc6, 0xeb, 0x82, 0x11, 0xcf, 0x41, 0x55, 0x24, 0x74, 0x71, 0xfe, 0xfe, 0x3a, - 0x03, 0x35, 0x49, 0x99, 0x16, 0xdb, 0x94, 0xb0, 0x0c, 0xaf, 0x00, 0x3e, 0x28, 0x73, 0x0b, 0xf2, - 0xbd, 0xe3, 0x03, 0xf3, 0xa3, 0xc4, 0xa1, 0xc5, 0x88, 0xd2, 0xfb, 0x5c, 0x0f, 0xff, 0x18, 0x24, - 0x46, 0xf4, 0x82, 0xee, 0x18, 0x27, 0xde, 0x96, 0xd5, 0x23, 0x17, 0xac, 0x6f, 0x9c, 0xd5, 0x03, - 0x02, 0xbb, 0xaf, 0x8a, 0x8f, 0x46, 0xac, 0x59, 0x54, 0x3e, 0x22, 0xa1, 0x17, 0x50, 0xa7, 0xcf, - 0x6b, 0xc3, 0x61, 0xdf, 0x24, 0x3d, 0x2e, 0xa0, 0xc0, 0x78, 0x62, 0x74, 0xaa, 0x9d, 0xb5, 0x5e, - 0x6e, 0xa3, 0xc8, 0xd2, 0x96, 0x18, 0xd1, 0x28, 0x5d, 0x1b, 0x79, 0x67, 0x6d, 0xcb, 0x38, 0xee, - 0xcb, 0xac, 0x47, 0x4b, 0x35, 0x25, 0x6e, 0x98, 0xae, 0x4a, 0x6d, 0xc3, 0x3c, 0xa5, 0x12, 0xcb, - 0x33, 0xbb, 0x4a, 0x8a, 0x94, 0x85, 0x50, 0x8b, 0x14, 0x42, 0xc3, 0x75, 0x3f, 0xd8, 0x4e, 0x4f, - 0xb8, 0xc7, 0x1f, 0xe3, 0x0d, 0x2e, 0xfc, 0xd0, 0x0d, 0x95, 0xba, 0x2f, 0x2b, 0x65, 0x39, 0x90, - 0xf2, 0x96, 0x78, 0x13, 0xa4, 0xe0, 0x97, 0x70, 0x53, 0x72, 0x0a, 0xec, 0x70, 0x02, 0xf3, 0x1e, - 0xdc, 0x97, 0xcc, 0xeb, 0x67, 0xf4, 0x72, 0xb6, 0x2f, 0x14, 0x7e, 0x55, 0x3b, 0xdf, 0x40, 0xc3, - 0xb7, 0x93, 0x35, 0xdc, 0x76, 0x5f, 0x35, 0x60, 0xe4, 0x8a, 0x73, 0x57, 0xd2, 0xd9, 0x33, 0xa5, - 0x39, 0x76, 0xdf, 0x6f, 0x2b, 0xe8, 0x33, 0x5e, 0x87, 0x3b, 0x52, 0x86, 0x68, 0x85, 0xc3, 0x42, - 0x62, 0x06, 0x25, 0x09, 0x11, 0x0e, 0xa3, 0x53, 0x27, 0xbb, 0x5d, 0xe5, 0x0c, 0xbb, 0x96, 0xc9, - 0xd4, 0x14, 0x99, 0x37, 0xf9, 0x89, 0xa0, 0x86, 0xa9, 0x55, 0x47, 0x90, 0xa9, 0x00, 0x95, 0x2c, - 0x36, 0x82, 0x92, 0x63, 0x1b, 0x11, 0x13, 0xfd, 0x53, 0x58, 0xf4, 0x8d, 0xa0, 0x7e, 0xdb, 0x27, - 0xce, 0xc0, 0x74, 0x5d, 0x05, 0x6d, 0x4a, 0x5a, 0xf8, 0x53, 0x98, 0x1d, 0x12, 0x91, 0x97, 0xca, - 0xab, 0x68, 0x85, 0x7f, 0x1e, 0x5e, 0x51, 0x26, 0xb3, 0xf7, 0xb8, 0x07, 0x0f, 0xa4, 0x74, 0xee, - 0xd1, 0x44, 0xf1, 0x51, 0xa3, 0xe4, 0xa5, 0x9e, 0xbb, 0x35, 0x7e, 0xa9, 0xcf, 0xf2, 0xbd, 0xf7, - 0x11, 0xd0, 0x1f, 0x73, 0x47, 0xca, 0xd8, 0x9a, 0xaa, 0xde, 0x6c, 0x73, 0x9f, 0xfa, 0x21, 0x39, - 0x95, 0xb0, 0x63, 0x58, 0x08, 0x47, 0xf2, 0x54, 0xa9, 0x70, 0x01, 0x72, 0x9e, 0x7d, 0x4e, 0x64, - 0x22, 0xe4, 0x03, 0x69, 0xb0, 0x1f, 0xe6, 0x53, 0x19, 0x6c, 0x04, 0xc2, 0xd8, 0x91, 0x9c, 0xd6, - 0x5e, 0xba, 0x9b, 0xb2, 0x81, 0xe3, 0x03, 0xbc, 0x0b, 0xb7, 0xa2, 0x69, 0x62, 0x2a, 0x93, 0xdf, - 0xf3, 0x03, 0x9c, 0x94, 0x49, 0xa6, 0x92, 0xfb, 0x93, 0x20, 0x19, 0x28, 0x09, 0x65, 0x2a, 0x91, - 0x3a, 0x34, 0x93, 0xf2, 0xcb, 0xd7, 0x71, 0x5e, 0xfd, 0x74, 0x33, 0x95, 0x30, 0x37, 0x10, 0x36, - 0xfd, 0xf6, 0x07, 0x39, 0x22, 0x3b, 0x31, 0x47, 0x88, 0x20, 0x09, 0xb2, 0xd8, 0x37, 0x70, 0xe8, - 0x84, 0x8e, 0x20, 0x81, 0x4e, 0xab, 0x83, 0xd6, 0x10, 0x5f, 0x07, 0x1b, 0xc8, 0x83, 0xad, 0xa6, - 0xdd, 0xa9, 0x36, 0xe3, 0xd3, 0x20, 0x77, 0xc6, 0x32, 0xf3, 0x54, 0x82, 0x3f, 0x83, 0xa5, 0xf4, - 0xa4, 0x3c, 0x8d, 0xe4, 0x17, 0x2d, 0x28, 0xf9, 0x4d, 0xa9, 0xf2, 0xd3, 0x8a, 0x32, 0x14, 0x76, - 0xf7, 0x0e, 0xf6, 0xd7, 0xd6, 0xdb, 0xfc, 0xb7, 0x15, 0xeb, 0x7b, 0xba, 0x7e, 0xb8, 0xdf, 0xa9, - 0x67, 0x56, 0xff, 0x37, 0x0b, 0x99, 0xed, 0xf7, 0xe8, 0xf7, 0x21, 0xc7, 0x3f, 0x34, 0x4e, 0xf8, - 0xba, 0xdc, 0x9c, 0xf4, 0x2d, 0x15, 0xdf, 0xfd, 0xf9, 0xbf, 0xff, 0xd7, 0x2f, 0x33, 0x37, 0x71, - 0xbd, 0x35, 0xfe, 0xde, 0x31, 0xf1, 0x8c, 0xd6, 0xf9, 0xb8, 0xc5, 0xea, 0xc3, 0x6b, 0xed, 0x05, - 0x3a, 0x84, 0xec, 0xfe, 0xc8, 0x43, 0xa9, 0x5f, 0x9e, 0x9b, 0xe9, 0x9f, 0x58, 0xf1, 0x1d, 0x26, - 0x78, 0x1e, 0xd7, 0x14, 0xc1, 0xc3, 0x91, 0x47, 0xc5, 0x8e, 0xa0, 0xac, 0x7e, 0x24, 0xbd, 0xf2, - 0x93, 0x74, 0xf3, 0xea, 0x0f, 0xb0, 0xf8, 0x21, 0x53, 0x77, 0x17, 0xdf, 0x52, 0xd4, 0xf1, 0x4f, - 0xb9, 0xea, 0x6a, 0x3a, 0x17, 0x16, 0x4a, 0xfd, 0x68, 0xdd, 0x4c, 0xff, 0x2e, 0x9b, 0xb8, 0x1a, - 0xef, 0xc2, 0xa2, 0x62, 0x2d, 0xf1, 0x59, 0xb6, 0xeb, 0xa1, 0x07, 0x09, 0x9f, 0xe5, 0xd4, 0x0f, - 0x50, 0xcd, 0xa5, 0x74, 0x06, 0xa1, 0x68, 0x89, 0x29, 0x6a, 0xe2, 0x9b, 0x8a, 0xa2, 0xae, 0xcf, - 0xf6, 0x5a, 0x7b, 0xb1, 0x7a, 0x0a, 0x39, 0x86, 0x32, 0xa3, 0x3f, 0x90, 0x0f, 0xcd, 0x04, 0xe8, - 0x3c, 0x65, 0xf3, 0x43, 0xf8, 0x34, 0x6e, 0x30, 0x65, 0x08, 0x57, 0xa5, 0x32, 0x86, 0x33, 0xbf, - 0xd6, 0x5e, 0x2c, 0x6b, 0xdf, 0xd1, 0x56, 0xff, 0x67, 0x16, 0x72, 0x0c, 0x72, 0x42, 0x36, 0x40, - 0x80, 0xc8, 0x46, 0x57, 0x19, 0xc3, 0x78, 0xa3, 0xab, 0x8c, 0x83, 0xb9, 0x78, 0x91, 0x29, 0x6e, - 0xe0, 0x79, 0xa9, 0x98, 0xa1, 0x59, 0x2d, 0x06, 0xd0, 0x51, 0x9f, 0x8e, 0x05, 0xe8, 0xc6, 0xc3, - 0x0c, 0x25, 0x09, 0x0c, 0x21, 0xb3, 0xd1, 0x13, 0x92, 0x80, 0xca, 0x62, 0xcc, 0x74, 0xde, 0xc3, - 0xb7, 0x15, 0xcf, 0x72, 0xb5, 0x0e, 0x63, 0xa4, 0x7a, 0xff, 0x4c, 0x83, 0x5a, 0x18, 0x5b, 0x45, - 0x8f, 0x12, 0x24, 0x47, 0x21, 0xda, 0xe6, 0xe3, 0xc9, 0x4c, 0x69, 0x16, 0x70, 0xf5, 0xe7, 0x84, - 0x0c, 0x0d, 0xca, 0x28, 0x1c, 0x8f, 0xfe, 0x42, 0x83, 0xb9, 0x08, 0x60, 0x8a, 0x92, 0x34, 0xc4, - 0xe0, 0xd8, 0xe6, 0x93, 0x2b, 0xb8, 0x84, 0x21, 0x4f, 0x99, 0x21, 0x4b, 0xf8, 0x6e, 0xcc, 0x15, - 0x9e, 0x39, 0x20, 0x9e, 0x2d, 0x8c, 0xf1, 0xb7, 0x81, 0x83, 0x9b, 0x89, 0xdb, 0x10, 0x02, 0x4b, - 0x13, 0xb7, 0x21, 0x8c, 0x8c, 0x4e, 0xd8, 0x06, 0x8e, 0x68, 0xd2, 0x23, 0xfe, 0x7f, 0x59, 0x28, - 0xac, 0xf3, 0x1f, 0x38, 0x22, 0x17, 0x4a, 0x3e, 0x8a, 0x88, 0x16, 0x93, 0x10, 0x9d, 0xe0, 0xb6, - 0xd0, 0x7c, 0x90, 0xfa, 0x5e, 0x68, 0x7f, 0xc2, 0xb4, 0x3f, 0xc0, 0x4d, 0xa9, 0x5d, 0xfc, 0x8e, - 0xb2, 0xc5, 0xa1, 0x83, 0x96, 0xd1, 0xeb, 0xd1, 0x85, 0xff, 0x29, 0x54, 0x54, 0xa8, 0x0f, 0x3d, - 0x4c, 0x44, 0x92, 0x54, 0xb4, 0xb0, 0x89, 0x27, 0xb1, 0x08, 0xed, 0xcb, 0x4c, 0x3b, 0xc6, 0xf7, - 0x53, 0xb4, 0x3b, 0x8c, 0x3d, 0x64, 0x00, 0x87, 0xea, 0x92, 0x0d, 0x08, 0x21, 0x81, 0xc9, 0x06, - 0x84, 0x91, 0xbe, 0x2b, 0x0d, 0x18, 0x31, 0x76, 0x6a, 0xc0, 0x07, 0x80, 0x00, 0x98, 0x43, 0x89, - 0x7e, 0x55, 0xae, 0x4e, 0xd1, 0x90, 0x8f, 0x63, 0x7a, 0xf1, 0x33, 0x17, 0x51, 0xdd, 0x37, 0x5d, - 0x1a, 0xfa, 0xab, 0xbf, 0xca, 0x43, 0xf9, 0x9d, 0x61, 0x5a, 0x1e, 0xb1, 0x0c, 0xab, 0x4b, 0xd0, - 0x09, 0xe4, 0x58, 0x69, 0x8c, 0x66, 0x39, 0x15, 0xaf, 0x8a, 0x66, 0xb9, 0x10, 0x98, 0x83, 0x1f, - 0x33, 0xcd, 0x8b, 0xf8, 0x8e, 0xd4, 0x3c, 0x08, 0xc4, 0xb7, 0x18, 0x0e, 0x43, 0x17, 0xfc, 0x87, - 0x90, 0x17, 0x10, 0x7f, 0x44, 0x58, 0x08, 0x9f, 0x69, 0xde, 0x4b, 0x7e, 0x99, 0x76, 0xbc, 0x54, - 0x55, 0x2e, 0xe3, 0xa5, 0xba, 0x3e, 0x02, 0x04, 0x20, 0x63, 0xd4, 0xb9, 0x31, 0x4c, 0xb2, 0xb9, - 0x94, 0xce, 0x20, 0xf4, 0x3e, 0x67, 0x7a, 0x1f, 0xe1, 0xc5, 0x24, 0xbd, 0x3d, 0x9f, 0x9f, 0xea, - 0x3e, 0x86, 0xd9, 0x4d, 0xc3, 0x3d, 0x43, 0x91, 0x62, 0xa7, 0xfc, 0x26, 0xa1, 0xd9, 0x4c, 0x7a, - 0x25, 0x34, 0x3d, 0x62, 0x9a, 0xee, 0xe3, 0x46, 0x92, 0xa6, 0x33, 0xc3, 0xa5, 0xd5, 0x03, 0x9d, - 0x41, 0x9e, 0xff, 0x4c, 0x21, 0xea, 0xcb, 0xd0, 0x4f, 0x1d, 0xa2, 0xbe, 0x0c, 0xff, 0xb2, 0xe1, - 0x7a, 0x9a, 0x3c, 0x28, 0xca, 0xdf, 0x06, 0xa0, 0xfb, 0x91, 0xad, 0x09, 0xff, 0x8e, 0xa0, 0xb9, - 0x98, 0xf6, 0x5a, 0xe8, 0x7b, 0xc6, 0xf4, 0x3d, 0xc4, 0xf7, 0x12, 0xf7, 0x4e, 0x70, 0xbf, 0xd6, - 0x5e, 0x7c, 0x47, 0xa3, 0x65, 0x02, 0x02, 0xa0, 0x36, 0x16, 0x1d, 0x51, 0xcc, 0x37, 0x16, 0x1d, - 0x31, 0x8c, 0x17, 0xaf, 0x32, 0xe5, 0xaf, 0xf0, 0xb3, 0x24, 0xe5, 0x9e, 0x63, 0x58, 0xee, 0x09, - 0x71, 0x3e, 0xe1, 0x80, 0x9c, 0x7b, 0x66, 0x0e, 0x69, 0xa4, 0xfc, 0xff, 0x1c, 0xcc, 0xd2, 0x7e, - 0x94, 0x96, 0xe7, 0xe0, 0x1a, 0x1f, 0xb5, 0x26, 0x06, 0x9e, 0x45, 0xad, 0x89, 0x23, 0x00, 0xf1, - 0xf2, 0xcc, 0x7e, 0xca, 0x4e, 0x18, 0x13, 0xf5, 0xba, 0x0b, 0x65, 0xe5, 0xae, 0x8f, 0x12, 0x04, - 0x86, 0x91, 0xb9, 0x68, 0x5d, 0x48, 0x00, 0x0a, 0xf0, 0x03, 0xa6, 0xf3, 0x0e, 0x5e, 0x08, 0xe9, - 0xec, 0x71, 0x2e, 0xaa, 0xf4, 0x8f, 0xa1, 0xa2, 0x62, 0x02, 0x28, 0x41, 0x66, 0x04, 0xf9, 0x8b, - 0xa6, 0xc4, 0x24, 0x48, 0x21, 0x9e, 0x1d, 0xfc, 0x9f, 0xed, 0x4b, 0x56, 0xaa, 0x7c, 0x08, 0x05, - 0x01, 0x14, 0x24, 0xad, 0x36, 0x0c, 0x15, 0x26, 0xad, 0x36, 0x82, 0x32, 0xc4, 0xdb, 0x3c, 0xa6, - 0x95, 0xde, 0x87, 0x64, 0x09, 0x12, 0x1a, 0xdf, 0x12, 0x2f, 0x4d, 0x63, 0x80, 0x7d, 0xa5, 0x69, - 0x54, 0xee, 0xa2, 0x93, 0x34, 0x9e, 0x12, 0x4f, 0xc4, 0x92, 0xbc, 0xe7, 0xa1, 0x14, 0x81, 0x6a, - 0xca, 0xc7, 0x93, 0x58, 0xd2, 0xba, 0xf2, 0x40, 0xa9, 0xc8, 0xf7, 0xe8, 0x67, 0x00, 0x01, 0xa4, - 0x11, 0xed, 0xb6, 0x12, 0x71, 0xd1, 0x68, 0xb7, 0x95, 0x8c, 0x8a, 0xc4, 0xf3, 0x47, 0xa0, 0x9b, - 0x5f, 0x0c, 0xa8, 0xf6, 0xbf, 0xd5, 0x00, 0xc5, 0x11, 0x10, 0xf4, 0x32, 0x59, 0x43, 0x22, 0xe2, - 0xda, 0x7c, 0x75, 0x3d, 0xe6, 0xb4, 0x12, 0x11, 0x98, 0xd5, 0x65, 0x33, 0x86, 0x1f, 0xa8, 0x61, - 0xbf, 0xd0, 0xa0, 0x1a, 0x82, 0x50, 0xd0, 0xd3, 0x94, 0x3d, 0x8e, 0x80, 0xb6, 0xcd, 0x67, 0x57, - 0xf2, 0xa5, 0x75, 0x62, 0xca, 0x89, 0x90, 0x8d, 0xf8, 0x5f, 0x6a, 0x50, 0x0b, 0xc3, 0x2e, 0x28, - 0x45, 0x7e, 0x0c, 0xf8, 0x6d, 0x2e, 0x5f, 0xcd, 0x78, 0xf5, 0x56, 0x05, 0xbd, 0xf9, 0x10, 0x0a, - 0x02, 0xac, 0x49, 0x0a, 0x88, 0x30, 0x6c, 0x9c, 0x14, 0x10, 0x11, 0xa4, 0x27, 0x25, 0x20, 0x1c, - 0xbb, 0x4f, 0x94, 0x10, 0x14, 0x88, 0x4e, 0x9a, 0xc6, 0xc9, 0x21, 0x18, 0x81, 0x83, 0x26, 0x69, - 0x0c, 0x42, 0x50, 0xc2, 0x39, 0x28, 0x45, 0xe0, 0x15, 0x21, 0x18, 0x45, 0x83, 0x52, 0x42, 0x90, - 0x29, 0x55, 0x42, 0x30, 0x00, 0x5f, 0x92, 0x42, 0x30, 0x86, 0x88, 0x27, 0x85, 0x60, 0x1c, 0xbf, - 0x49, 0xd9, 0x57, 0xa6, 0x3b, 0x14, 0x82, 0xf3, 0x09, 0x58, 0x0d, 0x7a, 0x95, 0xe2, 0xd0, 0x44, - 0xb0, 0xbd, 0xf9, 0xc9, 0x35, 0xb9, 0x27, 0x9e, 0x7d, 0xbe, 0x15, 0xf2, 0xec, 0xff, 0x83, 0x06, - 0x0b, 0x49, 0x58, 0x0f, 0x4a, 0xd1, 0x95, 0x02, 0xd4, 0x37, 0x57, 0xae, 0xcb, 0x7e, 0xb5, 0xd7, - 0xfc, 0x68, 0x78, 0x53, 0xff, 0x97, 0x2f, 0x16, 0xb5, 0x7f, 0xfb, 0x62, 0x51, 0xfb, 0x8f, 0x2f, - 0x16, 0xb5, 0xbf, 0xfb, 0xcf, 0xc5, 0x99, 0xe3, 0x3c, 0xfb, 0x0f, 0x64, 0xdf, 0xfb, 0x4d, 0x00, - 0x00, 0x00, 0xff, 0xff, 0x80, 0x82, 0x49, 0x96, 0xd9, 0x36, 0x00, 0x00, + // 3738 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x5b, 0xdd, 0x6f, 0x1b, 0xc7, + 0x76, 0xd7, 0x92, 0xe2, 0xd7, 0xe1, 0x87, 0xa8, 0x91, 0x64, 0xd3, 0xb4, 0x2d, 0xcb, 0x63, 0x3b, + 0x56, 0xec, 0x44, 0x4c, 0x94, 0xa4, 0x05, 0xdc, 0x36, 0x88, 0x2c, 0x31, 0x96, 0x22, 0x59, 0x52, + 0x56, 0x94, 0xf3, 0x81, 0xa0, 0xc2, 0x8a, 0x1c, 0x49, 0x5b, 0x91, 0xbb, 0xcc, 0xee, 0x92, 0x96, + 0xd2, 0xa2, 0x29, 0x82, 0xf4, 0xa1, 0x05, 0xfa, 0x92, 0x00, 0x45, 0xfb, 0xd0, 0xa7, 0xa2, 0x28, + 0xf2, 0x50, 0xa0, 0x2f, 0xc1, 0x05, 0xee, 0x5f, 0x70, 0xdf, 0xee, 0x05, 0xee, 0x3f, 0x70, 0x91, + 0x9b, 0x97, 0xfb, 0x5f, 0x5c, 0xcc, 0xd7, 0xee, 0xec, 0x72, 0x57, 0x72, 0xc2, 0x24, 0x2f, 0xf2, + 0xce, 0xcc, 0x99, 0xf3, 0x3b, 0x73, 0x66, 0xe6, 0x9c, 0x99, 0xdf, 0xd0, 0x50, 0x70, 0xfa, 0xed, + 0xa5, 0xbe, 0x63, 0x7b, 0x36, 0x2a, 0x11, 0xaf, 0xdd, 0x71, 0x89, 0x33, 0x24, 0x4e, 0xff, 0xb0, + 0x3e, 0x7b, 0x6c, 0x1f, 0xdb, 0xac, 0xa1, 0x41, 0xbf, 0xb8, 0x4c, 0xfd, 0x1a, 0x95, 0x69, 0xf4, + 0x86, 0xed, 0x36, 0xfb, 0xd3, 0x3f, 0x6c, 0x9c, 0x0e, 0x45, 0xd3, 0x75, 0xd6, 0x64, 0x0c, 0xbc, + 0x13, 0xf6, 0xa7, 0x7f, 0xc8, 0xfe, 0x11, 0x8d, 0x37, 0x8e, 0x6d, 0xfb, 0xb8, 0x4b, 0x1a, 0x46, + 0xdf, 0x6c, 0x18, 0x96, 0x65, 0x7b, 0x86, 0x67, 0xda, 0x96, 0xcb, 0x5b, 0xf1, 0x3f, 0x6b, 0x50, + 0xd1, 0x89, 0xdb, 0xb7, 0x2d, 0x97, 0xac, 0x13, 0xa3, 0x43, 0x1c, 0x74, 0x13, 0xa0, 0xdd, 0x1d, + 0xb8, 0x1e, 0x71, 0x0e, 0xcc, 0x4e, 0x4d, 0x5b, 0xd0, 0x16, 0x27, 0xf5, 0x82, 0xa8, 0xd9, 0xe8, + 0xa0, 0xeb, 0x50, 0xe8, 0x91, 0xde, 0x21, 0x6f, 0x4d, 0xb1, 0xd6, 0x3c, 0xaf, 0xd8, 0xe8, 0xa0, + 0x3a, 0xe4, 0x1d, 0x32, 0x34, 0x5d, 0xd3, 0xb6, 0x6a, 0xe9, 0x05, 0x6d, 0x31, 0xad, 0xfb, 0x65, + 0xda, 0xd1, 0x31, 0x8e, 0xbc, 0x03, 0x8f, 0x38, 0xbd, 0xda, 0x24, 0xef, 0x48, 0x2b, 0x5a, 0xc4, + 0xe9, 0xe1, 0x2f, 0x33, 0x50, 0xd2, 0x0d, 0xeb, 0x98, 0xe8, 0xe4, 0xd3, 0x01, 0x71, 0x3d, 0x54, + 0x85, 0xf4, 0x29, 0x39, 0x67, 0xf0, 0x25, 0x9d, 0x7e, 0xf2, 0xfe, 0xd6, 0x31, 0x39, 0x20, 0x16, + 0x07, 0x2e, 0xd1, 0xfe, 0xd6, 0x31, 0x69, 0x5a, 0x1d, 0x34, 0x0b, 0x99, 0xae, 0xd9, 0x33, 0x3d, + 0x81, 0xca, 0x0b, 0x21, 0x73, 0x26, 0x23, 0xe6, 0xac, 0x02, 0xb8, 0xb6, 0xe3, 0x1d, 0xd8, 0x4e, + 0x87, 0x38, 0xb5, 0xcc, 0x82, 0xb6, 0x58, 0x59, 0xbe, 0xbb, 0xa4, 0x4e, 0xc4, 0x92, 0x6a, 0xd0, + 0xd2, 0x9e, 0xed, 0x78, 0x3b, 0x54, 0x56, 0x2f, 0xb8, 0xf2, 0x13, 0xbd, 0x0b, 0x45, 0xa6, 0xc4, + 0x33, 0x9c, 0x63, 0xe2, 0xd5, 0xb2, 0x4c, 0xcb, 0xbd, 0x4b, 0xb4, 0xb4, 0x98, 0xb0, 0xce, 0xe0, + 0xf9, 0x37, 0xc2, 0x50, 0x72, 0x89, 0x63, 0x1a, 0x5d, 0xf3, 0x33, 0xe3, 0xb0, 0x4b, 0x6a, 0xb9, + 0x05, 0x6d, 0x31, 0xaf, 0x87, 0xea, 0xe8, 0xf8, 0x4f, 0xc9, 0xb9, 0x7b, 0x60, 0x5b, 0xdd, 0xf3, + 0x5a, 0x9e, 0x09, 0xe4, 0x69, 0xc5, 0x8e, 0xd5, 0x3d, 0x67, 0x93, 0x66, 0x0f, 0x2c, 0x8f, 0xb7, + 0x16, 0x58, 0x6b, 0x81, 0xd5, 0xb0, 0xe6, 0x45, 0xa8, 0xf6, 0x4c, 0xeb, 0xa0, 0x67, 0x77, 0x0e, + 0x7c, 0x87, 0x00, 0x73, 0x48, 0xa5, 0x67, 0x5a, 0x4f, 0xed, 0x8e, 0x2e, 0xdd, 0x42, 0x25, 0x8d, + 0xb3, 0xb0, 0x64, 0x51, 0x48, 0x1a, 0x67, 0xaa, 0xe4, 0x12, 0xcc, 0x50, 0x9d, 0x6d, 0x87, 0x18, + 0x1e, 0x09, 0x84, 0x4b, 0x4c, 0x78, 0xba, 0x67, 0x5a, 0xab, 0xac, 0x25, 0x24, 0x6f, 0x9c, 0x8d, + 0xc8, 0x97, 0x85, 0xbc, 0x71, 0x16, 0x96, 0xc7, 0x4b, 0x50, 0xf0, 0x7d, 0x8e, 0xf2, 0x30, 0xb9, + 0xbd, 0xb3, 0xdd, 0xac, 0x4e, 0x20, 0x80, 0xec, 0xca, 0xde, 0x6a, 0x73, 0x7b, 0xad, 0xaa, 0xa1, + 0x22, 0xe4, 0xd6, 0x9a, 0xbc, 0x90, 0xc2, 0x8f, 0x01, 0x02, 0xef, 0xa2, 0x1c, 0xa4, 0x37, 0x9b, + 0x1f, 0x55, 0x27, 0xa8, 0xcc, 0xb3, 0xa6, 0xbe, 0xb7, 0xb1, 0xb3, 0x5d, 0xd5, 0x68, 0xe7, 0x55, + 0xbd, 0xb9, 0xd2, 0x6a, 0x56, 0x53, 0x54, 0xe2, 0xe9, 0xce, 0x5a, 0x35, 0x8d, 0x0a, 0x90, 0x79, + 0xb6, 0xb2, 0xb5, 0xdf, 0xac, 0x4e, 0xe2, 0xaf, 0x35, 0x28, 0x8b, 0xf9, 0xe2, 0x7b, 0x02, 0xbd, + 0x09, 0xd9, 0x13, 0xb6, 0x2f, 0xd8, 0x52, 0x2c, 0x2e, 0xdf, 0x88, 0x4c, 0x6e, 0x68, 0xef, 0xe8, + 0x42, 0x16, 0x61, 0x48, 0x9f, 0x0e, 0xdd, 0x5a, 0x6a, 0x21, 0xbd, 0x58, 0x5c, 0xae, 0x2e, 0xf1, + 0x0d, 0xbb, 0xb4, 0x49, 0xce, 0x9f, 0x19, 0xdd, 0x01, 0xd1, 0x69, 0x23, 0x42, 0x30, 0xd9, 0xb3, + 0x1d, 0xc2, 0x56, 0x6c, 0x5e, 0x67, 0xdf, 0x74, 0x19, 0xb3, 0x49, 0x13, 0xab, 0x95, 0x17, 0xf0, + 0x37, 0x1a, 0xc0, 0xee, 0xc0, 0x4b, 0xde, 0x1a, 0xb3, 0x90, 0x19, 0x52, 0xc5, 0x62, 0x5b, 0xf0, + 0x02, 0xdb, 0x13, 0xc4, 0x70, 0x89, 0xbf, 0x27, 0x68, 0x01, 0x5d, 0x85, 0x5c, 0xdf, 0x21, 0xc3, + 0x83, 0xd3, 0x21, 0x03, 0xc9, 0xeb, 0x59, 0x5a, 0xdc, 0x1c, 0xa2, 0xdb, 0x50, 0x32, 0x8f, 0x2d, + 0xdb, 0x21, 0x07, 0x5c, 0x57, 0x86, 0xb5, 0x16, 0x79, 0x1d, 0xb3, 0x5b, 0x11, 0xe1, 0x8a, 0xb3, + 0xaa, 0xc8, 0x16, 0xad, 0xc2, 0x16, 0x14, 0x99, 0xa9, 0x63, 0xb9, 0xef, 0xe5, 0xc0, 0xc6, 0x14, + 0xeb, 0x36, 0xea, 0x42, 0x61, 0x35, 0xfe, 0x04, 0xd0, 0x1a, 0xe9, 0x12, 0x8f, 0x8c, 0x13, 0x3d, + 0x14, 0x9f, 0xa4, 0x55, 0x9f, 0xe0, 0xaf, 0x34, 0x98, 0x09, 0xa9, 0x1f, 0x6b, 0x58, 0x35, 0xc8, + 0x75, 0x98, 0x32, 0x6e, 0x41, 0x5a, 0x97, 0x45, 0xf4, 0x10, 0xf2, 0xc2, 0x00, 0xb7, 0x96, 0x4e, + 0x58, 0x34, 0x39, 0x6e, 0x93, 0x8b, 0xbf, 0x49, 0x41, 0x41, 0x0c, 0x74, 0xa7, 0x8f, 0x56, 0xa0, + 0xec, 0xf0, 0xc2, 0x01, 0x1b, 0x8f, 0xb0, 0xa8, 0x9e, 0x1c, 0x84, 0xd6, 0x27, 0xf4, 0x92, 0xe8, + 0xc2, 0xaa, 0xd1, 0x5f, 0x41, 0x51, 0xaa, 0xe8, 0x0f, 0x3c, 0xe1, 0xf2, 0x5a, 0x58, 0x41, 0xb0, + 0xfe, 0xd6, 0x27, 0x74, 0x10, 0xe2, 0xbb, 0x03, 0x0f, 0xb5, 0x60, 0x56, 0x76, 0xe6, 0xa3, 0x11, + 0x66, 0xa4, 0x99, 0x96, 0x85, 0xb0, 0x96, 0xd1, 0xa9, 0x5a, 0x9f, 0xd0, 0x91, 0xe8, 0xaf, 0x34, + 0xaa, 0x26, 0x79, 0x67, 0x3c, 0x78, 0x8f, 0x98, 0xd4, 0x3a, 0xb3, 0x46, 0x4d, 0x6a, 0x9d, 0x59, + 0x8f, 0x0b, 0x90, 0x13, 0x25, 0xfc, 0xab, 0x14, 0x80, 0x9c, 0x8d, 0x9d, 0x3e, 0x5a, 0x83, 0x8a, + 0x23, 0x4a, 0x21, 0x6f, 0x5d, 0x8f, 0xf5, 0x96, 0x98, 0xc4, 0x09, 0xbd, 0x2c, 0x3b, 0x71, 0xe3, + 0xde, 0x86, 0x92, 0xaf, 0x25, 0x70, 0xd8, 0xb5, 0x18, 0x87, 0xf9, 0x1a, 0x8a, 0xb2, 0x03, 0x75, + 0xd9, 0x07, 0x30, 0xe7, 0xf7, 0x8f, 0xf1, 0xd9, 0xed, 0x0b, 0x7c, 0xe6, 0x2b, 0x9c, 0x91, 0x1a, + 0x54, 0xaf, 0xa9, 0x86, 0x05, 0x6e, 0xbb, 0x16, 0xe3, 0xb6, 0x51, 0xc3, 0xa8, 0xe3, 0x80, 0xe6, + 0x4b, 0x5e, 0xc4, 0x7f, 0x4a, 0x43, 0x6e, 0xd5, 0xee, 0xf5, 0x0d, 0x87, 0xce, 0x46, 0xd6, 0x21, + 0xee, 0xa0, 0xeb, 0x31, 0x77, 0x55, 0x96, 0xef, 0x84, 0x35, 0x0a, 0x31, 0xf9, 0xaf, 0xce, 0x44, + 0x75, 0xd1, 0x85, 0x76, 0x16, 0xe9, 0x31, 0xf5, 0x02, 0x9d, 0x45, 0x72, 0x14, 0x5d, 0xe4, 0x46, + 0x4e, 0x07, 0x1b, 0xb9, 0x0e, 0xb9, 0x21, 0x71, 0x82, 0x94, 0xbe, 0x3e, 0xa1, 0xcb, 0x0a, 0xf4, + 0x32, 0x4c, 0x45, 0xd3, 0x4b, 0x46, 0xc8, 0x54, 0xda, 0xe1, 0x6c, 0x74, 0x07, 0x4a, 0xa1, 0x1c, + 0x97, 0x15, 0x72, 0xc5, 0x9e, 0x92, 0xe2, 0xae, 0xc8, 0xb8, 0x4a, 0xf3, 0x71, 0x69, 0x7d, 0x42, + 0x46, 0xd6, 0x2b, 0x32, 0xb2, 0xe6, 0x45, 0x2f, 0x11, 0x5b, 0x43, 0x41, 0xe6, 0x9d, 0x70, 0x90, + 0xc1, 0xef, 0x40, 0x39, 0xe4, 0x20, 0x9a, 0x77, 0x9a, 0xef, 0xef, 0xaf, 0x6c, 0xf1, 0x24, 0xf5, + 0x84, 0xe5, 0x25, 0xbd, 0xaa, 0xd1, 0x5c, 0xb7, 0xd5, 0xdc, 0xdb, 0xab, 0xa6, 0x50, 0x19, 0x0a, + 0xdb, 0x3b, 0xad, 0x03, 0x2e, 0x95, 0xc6, 0x4f, 0x7c, 0x0d, 0x22, 0xc9, 0x29, 0xb9, 0x6d, 0x42, + 0xc9, 0x6d, 0x9a, 0xcc, 0x6d, 0xa9, 0x20, 0xb7, 0xb1, 0x34, 0xb7, 0xd5, 0x5c, 0xd9, 0x6b, 0x56, + 0x27, 0x1f, 0x57, 0xa0, 0xc4, 0xfd, 0x7b, 0x30, 0xb0, 0x68, 0xaa, 0xfd, 0x6f, 0x0d, 0x20, 0xd8, + 0x4d, 0xa8, 0x01, 0xb9, 0x36, 0xc7, 0xa9, 0x69, 0x2c, 0x18, 0xcd, 0xc5, 0x4e, 0x99, 0x2e, 0xa5, + 0xd0, 0xeb, 0x90, 0x73, 0x07, 0xed, 0x36, 0x71, 0x65, 0xca, 0xbb, 0x1a, 0x8d, 0x87, 0x22, 0x5a, + 0xe9, 0x52, 0x8e, 0x76, 0x39, 0x32, 0xcc, 0xee, 0x80, 0x25, 0xc0, 0x8b, 0xbb, 0x08, 0x39, 0xfc, + 0x9f, 0x1a, 0x14, 0x95, 0xc5, 0xfb, 0x23, 0x83, 0xf0, 0x0d, 0x28, 0x30, 0x1b, 0x48, 0x47, 0x84, + 0xe1, 0xbc, 0x1e, 0x54, 0xa0, 0xbf, 0x80, 0x82, 0xdc, 0x01, 0x32, 0x12, 0xd7, 0xe2, 0xd5, 0xee, + 0xf4, 0xf5, 0x40, 0x14, 0x6f, 0xc2, 0x34, 0xf3, 0x4a, 0x9b, 0x1e, 0xae, 0xa5, 0x1f, 0xd5, 0xe3, + 0xa7, 0x16, 0x39, 0x7e, 0xd6, 0x21, 0xdf, 0x3f, 0x39, 0x77, 0xcd, 0xb6, 0xd1, 0x15, 0x56, 0xf8, + 0x65, 0xfc, 0x1e, 0x20, 0x55, 0xd9, 0x38, 0xc3, 0xc5, 0x65, 0x28, 0xae, 0x1b, 0xee, 0x89, 0x30, + 0x09, 0x3f, 0x84, 0x32, 0x2d, 0x6e, 0x3e, 0x7b, 0x01, 0x1b, 0xd9, 0xe5, 0x40, 0x4a, 0x8f, 0xe5, + 0x73, 0x04, 0x93, 0x27, 0x86, 0x7b, 0xc2, 0x06, 0x5a, 0xd6, 0xd9, 0x37, 0x7a, 0x19, 0xaa, 0x6d, + 0x3e, 0xc8, 0x83, 0xc8, 0x95, 0x61, 0x4a, 0xd4, 0xfb, 0x27, 0xc1, 0x0f, 0xa1, 0xc4, 0xc7, 0xf0, + 0x53, 0x1b, 0x81, 0xa7, 0x61, 0x6a, 0xcf, 0x32, 0xfa, 0xee, 0x89, 0x2d, 0xb3, 0x1b, 0x1d, 0x74, + 0x35, 0xa8, 0x1b, 0x0b, 0xf1, 0x3e, 0x4c, 0x39, 0xa4, 0x67, 0x98, 0x96, 0x69, 0x1d, 0x1f, 0x1c, + 0x9e, 0x7b, 0xc4, 0x15, 0x17, 0xa6, 0x8a, 0x5f, 0xfd, 0x98, 0xd6, 0x52, 0xd3, 0x0e, 0xbb, 0xf6, + 0xa1, 0x08, 0x73, 0xec, 0x1b, 0x7f, 0xab, 0x41, 0xe9, 0x03, 0xc3, 0x6b, 0xcb, 0xa9, 0x43, 0x1b, + 0x50, 0xf1, 0x83, 0x1b, 0xab, 0x11, 0xb6, 0x44, 0x52, 0x2c, 0xeb, 0x23, 0x8f, 0xd2, 0x32, 0x3b, + 0x96, 0xdb, 0x6a, 0x05, 0x53, 0x65, 0x58, 0x6d, 0xd2, 0xf5, 0x55, 0xa5, 0x92, 0x55, 0x31, 0x41, + 0x55, 0x95, 0x5a, 0xf1, 0x78, 0x2a, 0x38, 0x7e, 0xf0, 0x58, 0xf2, 0x6d, 0x0a, 0xd0, 0xa8, 0x0d, + 0x3f, 0xf4, 0x44, 0x76, 0x0f, 0x2a, 0xae, 0x67, 0x38, 0x23, 0x6b, 0xa3, 0xcc, 0x6a, 0xfd, 0x00, + 0x7d, 0x1f, 0xa6, 0xfa, 0x8e, 0x7d, 0xec, 0x10, 0xd7, 0x3d, 0xb0, 0x6c, 0xcf, 0x3c, 0x3a, 0x17, + 0x87, 0xda, 0x8a, 0xac, 0xde, 0x66, 0xb5, 0xa8, 0x09, 0xb9, 0x23, 0xb3, 0xeb, 0x11, 0xc7, 0xad, + 0x65, 0x16, 0xd2, 0x8b, 0x95, 0xe5, 0x87, 0x97, 0x79, 0x6d, 0xe9, 0x5d, 0x26, 0xdf, 0x3a, 0xef, + 0x13, 0x5d, 0xf6, 0x55, 0x0f, 0x8a, 0xd9, 0xd0, 0xe1, 0xf9, 0x1a, 0xe4, 0x9f, 0x53, 0x15, 0xf4, + 0x52, 0x9c, 0xe3, 0x67, 0x3b, 0x56, 0xde, 0xe8, 0xe0, 0x7b, 0x00, 0x81, 0x2a, 0x1a, 0x85, 0xb7, + 0x77, 0x76, 0xf7, 0x5b, 0xd5, 0x09, 0x54, 0x82, 0xfc, 0xf6, 0xce, 0x5a, 0x73, 0xab, 0x49, 0x43, + 0x36, 0x6e, 0x48, 0xb7, 0xa9, 0xee, 0x0d, 0xe9, 0xd5, 0xc2, 0x7a, 0xff, 0x2d, 0x05, 0x65, 0xb1, + 0x40, 0xc6, 0x5a, 0xa5, 0x2a, 0x44, 0x2a, 0x04, 0x41, 0x0f, 0xac, 0x7c, 0xe1, 0x74, 0xc4, 0xb9, + 0x58, 0x16, 0x69, 0xd8, 0xe0, 0xeb, 0x80, 0x74, 0x84, 0xc7, 0xfd, 0x72, 0xec, 0xce, 0xce, 0xc4, + 0xee, 0x6c, 0x74, 0x07, 0xca, 0xfe, 0x42, 0x34, 0x5c, 0x91, 0x86, 0x0b, 0x7a, 0x49, 0xae, 0x31, + 0x5a, 0x87, 0xee, 0x41, 0x96, 0x0c, 0x89, 0xe5, 0xb9, 0xb5, 0x22, 0x0b, 0xc8, 0x65, 0x79, 0x34, + 0x6e, 0xd2, 0x5a, 0x5d, 0x34, 0xe2, 0xb7, 0x60, 0x9a, 0x5d, 0x41, 0x9e, 0x38, 0x86, 0xa5, 0xde, + 0x95, 0x5a, 0xad, 0x2d, 0xe1, 0x3a, 0xfa, 0x89, 0x2a, 0x90, 0xda, 0x58, 0x13, 0x03, 0x4d, 0x6d, + 0xac, 0xe1, 0x2f, 0x34, 0x40, 0x6a, 0xbf, 0xb1, 0x7c, 0x19, 0x51, 0x2e, 0xe1, 0xd3, 0x01, 0xfc, + 0x2c, 0x64, 0x88, 0xe3, 0xd8, 0x0e, 0xf3, 0x5a, 0x41, 0xe7, 0x05, 0x7c, 0x57, 0xd8, 0xa0, 0x93, + 0xa1, 0x7d, 0xea, 0xef, 0x19, 0xae, 0x4d, 0xf3, 0x4d, 0xdd, 0x84, 0x99, 0x90, 0xd4, 0x58, 0x89, + 0xe1, 0x3e, 0xcc, 0x31, 0x65, 0x9b, 0x84, 0xf4, 0x57, 0xba, 0xe6, 0x30, 0x11, 0xb5, 0x0f, 0x57, + 0xa2, 0x82, 0x3f, 0xaf, 0x8f, 0xf0, 0x5f, 0x0b, 0xc4, 0x96, 0xd9, 0x23, 0x2d, 0x7b, 0x2b, 0xd9, + 0x36, 0x1a, 0x38, 0x4f, 0xc9, 0xb9, 0x2b, 0x32, 0x28, 0xfb, 0xc6, 0xff, 0xa3, 0xc1, 0xd5, 0x91, + 0xee, 0x3f, 0xf3, 0xac, 0xce, 0x03, 0x1c, 0xd3, 0xe5, 0x43, 0x3a, 0xb4, 0x81, 0x5f, 0xde, 0x95, + 0x1a, 0xdf, 0x4e, 0x1a, 0x7b, 0x4a, 0xc2, 0xce, 0x59, 0x31, 0xe7, 0xec, 0x8f, 0x2b, 0xd3, 0xcf, + 0x4d, 0x28, 0xb2, 0x8a, 0x3d, 0xcf, 0xf0, 0x06, 0xee, 0xc8, 0x64, 0xfc, 0xa3, 0x58, 0x02, 0xb2, + 0xd3, 0x58, 0xe3, 0x7a, 0x1d, 0xb2, 0xec, 0xdc, 0x2a, 0x4f, 0x6d, 0x91, 0x8b, 0x82, 0x62, 0x87, + 0x2e, 0x04, 0xf1, 0x09, 0x64, 0x9f, 0x32, 0xb2, 0x4f, 0xb1, 0x6c, 0x52, 0x4e, 0x85, 0x65, 0xf4, + 0x38, 0x05, 0x51, 0xd0, 0xd9, 0x37, 0x3b, 0xe4, 0x10, 0xe2, 0xec, 0xeb, 0x5b, 0xfc, 0x30, 0x55, + 0xd0, 0xfd, 0x32, 0x75, 0x59, 0xbb, 0x6b, 0x12, 0xcb, 0x63, 0xad, 0x93, 0xac, 0x55, 0xa9, 0xc1, + 0x4b, 0x50, 0xe5, 0x48, 0x2b, 0x9d, 0x8e, 0x72, 0x58, 0xf1, 0xf5, 0x69, 0x61, 0x7d, 0xf8, 0x7f, + 0x35, 0x98, 0x56, 0x3a, 0x8c, 0xe5, 0x98, 0x57, 0x20, 0xcb, 0x29, 0x4d, 0x91, 0x17, 0x67, 0xc3, + 0xbd, 0x38, 0x8c, 0x2e, 0x64, 0xd0, 0x12, 0xe4, 0xf8, 0x97, 0x3c, 0x31, 0xc6, 0x8b, 0x4b, 0x21, + 0x7c, 0x0f, 0x66, 0x44, 0x15, 0xe9, 0xd9, 0x71, 0x6b, 0x9b, 0x39, 0x14, 0xff, 0x03, 0xcc, 0x86, + 0xc5, 0xc6, 0x1a, 0x92, 0x62, 0x64, 0xea, 0x45, 0x8c, 0x5c, 0x91, 0x46, 0xee, 0xf7, 0x3b, 0x4a, + 0x1a, 0x8f, 0xce, 0xba, 0x3a, 0x23, 0xa9, 0xc8, 0x8c, 0xf8, 0x03, 0x90, 0x2a, 0x7e, 0xd1, 0x01, + 0xcc, 0xc8, 0xe5, 0xb0, 0x65, 0xba, 0xfe, 0xe1, 0xee, 0x33, 0x40, 0x6a, 0xe5, 0x2f, 0x6d, 0xd0, + 0x1a, 0x39, 0x72, 0x8c, 0xe3, 0x1e, 0xf1, 0xf3, 0x13, 0x3d, 0xea, 0xab, 0x95, 0x63, 0x45, 0xf4, + 0x06, 0x4c, 0x3f, 0xb5, 0x87, 0x34, 0x34, 0xd0, 0xda, 0x60, 0xcb, 0xf0, 0xab, 0x9e, 0x3f, 0x6d, + 0x7e, 0x99, 0x82, 0xab, 0x1d, 0xc6, 0x02, 0xff, 0xad, 0x06, 0xa5, 0x95, 0xae, 0xe1, 0xf4, 0x24, + 0xf0, 0xdb, 0x90, 0xe5, 0x17, 0x18, 0xc1, 0x19, 0xbc, 0x14, 0x56, 0xa3, 0xca, 0xf2, 0xc2, 0x0a, + 0xbf, 0xee, 0x88, 0x5e, 0xd4, 0x70, 0xf1, 0xac, 0xb0, 0x16, 0x79, 0x66, 0x58, 0x43, 0xaf, 0x42, + 0xc6, 0xa0, 0x5d, 0x58, 0x08, 0xae, 0x44, 0xaf, 0x8e, 0x4c, 0x1b, 0x3b, 0xb7, 0x71, 0x29, 0xfc, + 0x26, 0x14, 0x15, 0x04, 0x7a, 0x39, 0x7e, 0xd2, 0x14, 0x07, 0xb0, 0x95, 0xd5, 0xd6, 0xc6, 0x33, + 0x7e, 0x67, 0xae, 0x00, 0xac, 0x35, 0xfd, 0x72, 0x0a, 0x7f, 0x28, 0x7a, 0x89, 0x78, 0xa7, 0xda, + 0xa3, 0x25, 0xd9, 0x93, 0x7a, 0x21, 0x7b, 0xce, 0xa0, 0x2c, 0x86, 0x3f, 0x6e, 0xf8, 0x66, 0xfa, + 0x12, 0xc2, 0xb7, 0x62, 0xbc, 0x2e, 0x04, 0xf1, 0x14, 0x94, 0x45, 0x40, 0x17, 0xeb, 0xef, 0xff, + 0x53, 0x50, 0x91, 0x35, 0xe3, 0x72, 0x9b, 0x92, 0x96, 0xe1, 0x19, 0xc0, 0x27, 0x65, 0xae, 0x40, + 0xb6, 0x73, 0xb8, 0x67, 0x7e, 0x26, 0x79, 0x68, 0x51, 0xa2, 0xf5, 0x5d, 0x8e, 0xc3, 0x1f, 0x83, + 0x44, 0x89, 0x5e, 0xd0, 0x1d, 0xe3, 0xc8, 0xdb, 0xb0, 0x3a, 0xe4, 0x8c, 0x9d, 0x1b, 0x27, 0xf5, + 0xa0, 0x82, 0xdd, 0x57, 0xc5, 0xa3, 0x11, 0x3b, 0x2c, 0x2a, 0x8f, 0x48, 0xe8, 0x01, 0x54, 0xe9, + 0xf7, 0x4a, 0xbf, 0xdf, 0x35, 0x49, 0x87, 0x2b, 0xc8, 0x31, 0x99, 0x91, 0x7a, 0x8a, 0xce, 0x8e, + 0x5e, 0x6e, 0x2d, 0xcf, 0xc2, 0x96, 0x28, 0xa1, 0x05, 0x28, 0x72, 0xfb, 0x36, 0xac, 0x7d, 0x97, + 0xb0, 0x97, 0x94, 0xb4, 0xae, 0x56, 0xd1, 0x7d, 0xbc, 0x32, 0xf0, 0x4e, 0x9a, 0x96, 0x71, 0xd8, + 0x95, 0x71, 0x91, 0x26, 0x73, 0x5a, 0xb9, 0x66, 0xba, 0x6a, 0x6d, 0x13, 0x66, 0x68, 0x2d, 0xb1, + 0x3c, 0xb3, 0xad, 0x04, 0x51, 0x99, 0x2a, 0xb5, 0x48, 0xaa, 0x34, 0x5c, 0xf7, 0xb9, 0xed, 0x74, + 0x84, 0x03, 0xfd, 0x32, 0x5e, 0xe3, 0xca, 0xf7, 0xdd, 0x50, 0x32, 0xfc, 0xa1, 0x5a, 0x16, 0x03, + 0x2d, 0x4f, 0x88, 0x77, 0x81, 0x16, 0xfc, 0x10, 0xe6, 0xa4, 0xa4, 0x60, 0x17, 0x2f, 0x10, 0xde, + 0x81, 0x9b, 0x52, 0x78, 0xf5, 0x84, 0x5e, 0xdf, 0x76, 0x05, 0xe0, 0x8f, 0xb5, 0xf3, 0x31, 0xd4, + 0x7c, 0x3b, 0xd9, 0x91, 0xdc, 0xee, 0xaa, 0x06, 0x0c, 0x5c, 0xb1, 0x32, 0x0b, 0x3a, 0xfb, 0xa6, + 0x75, 0x8e, 0xdd, 0xf5, 0x0f, 0x1e, 0xf4, 0x1b, 0xaf, 0xc2, 0x35, 0xa9, 0x43, 0x1c, 0x96, 0xc3, + 0x4a, 0x46, 0x0c, 0x8a, 0x53, 0x22, 0x1c, 0x46, 0xbb, 0x5e, 0xec, 0x76, 0x55, 0x32, 0xec, 0x5a, + 0xa6, 0x53, 0x53, 0x74, 0xce, 0xf1, 0x15, 0x41, 0x0d, 0x53, 0xf3, 0x92, 0xa8, 0xa6, 0x0a, 0xd4, + 0x6a, 0x31, 0x11, 0xb4, 0x7a, 0x64, 0x22, 0x46, 0x54, 0x7f, 0x02, 0xf3, 0xbe, 0x11, 0xd4, 0x6f, + 0xbb, 0xc4, 0xe9, 0x99, 0xae, 0xab, 0xf0, 0x51, 0x71, 0x03, 0x7f, 0x09, 0x26, 0xfb, 0x44, 0x44, + 0xae, 0xe2, 0x32, 0x5a, 0xe2, 0x0f, 0xc8, 0x4b, 0x4a, 0x67, 0xd6, 0x8e, 0x3b, 0x70, 0x4b, 0x6a, + 0xe7, 0x1e, 0x8d, 0x55, 0x1f, 0x35, 0x4a, 0x5e, 0xfb, 0x53, 0x09, 0xd7, 0xfe, 0x74, 0x84, 0x23, + 0x7d, 0x8f, 0x3b, 0x52, 0xee, 0xad, 0xb1, 0x32, 0xd2, 0x26, 0xf7, 0xa9, 0xbf, 0x25, 0xc7, 0x52, + 0x76, 0x08, 0xb3, 0xe1, 0x9d, 0x3c, 0x56, 0xb0, 0x9c, 0x85, 0x8c, 0x67, 0x9f, 0x12, 0x19, 0x2a, + 0x79, 0x41, 0x1a, 0xec, 0x6f, 0xf3, 0xb1, 0x0c, 0x36, 0x02, 0x65, 0x6c, 0x49, 0x8e, 0x6b, 0x2f, + 0x9d, 0x4d, 0x79, 0xc4, 0xe3, 0x05, 0xbc, 0x0d, 0x57, 0xa2, 0x61, 0x62, 0x2c, 0x93, 0x9f, 0xf1, + 0x05, 0x1c, 0x17, 0x49, 0xc6, 0xd2, 0xfb, 0x7e, 0x10, 0x0c, 0x94, 0x80, 0x32, 0x96, 0x4a, 0x1d, + 0xea, 0x71, 0xf1, 0xe5, 0xa7, 0x58, 0xaf, 0x7e, 0xb8, 0x19, 0x4b, 0x99, 0x1b, 0x28, 0x1b, 0x7f, + 0xfa, 0x83, 0x18, 0x91, 0xbe, 0x30, 0x46, 0x88, 0x4d, 0x12, 0x44, 0xb1, 0x9f, 0x61, 0xd1, 0x09, + 0x8c, 0x20, 0x80, 0x8e, 0x8b, 0x41, 0x73, 0x88, 0x8f, 0xc1, 0x0a, 0x72, 0x61, 0xab, 0x61, 0x77, + 0xac, 0xc9, 0xf8, 0x20, 0x88, 0x9d, 0x23, 0x91, 0x79, 0x2c, 0xc5, 0x1f, 0xc2, 0x42, 0x72, 0x50, + 0x1e, 0x47, 0xf3, 0x83, 0x06, 0x14, 0xfc, 0x63, 0xab, 0xf2, 0xe3, 0x8b, 0x22, 0xe4, 0xb6, 0x77, + 0xf6, 0x76, 0x57, 0x56, 0x9b, 0xfc, 0xd7, 0x17, 0xab, 0x3b, 0xba, 0xbe, 0xbf, 0xdb, 0xaa, 0xa6, + 0x96, 0xbf, 0x4f, 0x43, 0x6a, 0xf3, 0x19, 0xfa, 0x08, 0x32, 0xfc, 0x29, 0xf2, 0x82, 0xf7, 0xe7, + 0xfa, 0x45, 0xaf, 0xad, 0xf8, 0xea, 0x17, 0xbf, 0xff, 0xfe, 0xeb, 0xd4, 0x34, 0x2e, 0x35, 0x86, + 0x6f, 0x34, 0x4e, 0x87, 0x0d, 0x96, 0x1b, 0x1e, 0x69, 0x0f, 0xd0, 0xfb, 0x90, 0xde, 0x1d, 0x78, + 0x28, 0xf1, 0x5d, 0xba, 0x9e, 0xfc, 0x00, 0x8b, 0xe7, 0x98, 0xd2, 0x29, 0x0c, 0x42, 0x69, 0x7f, + 0xe0, 0x51, 0x95, 0x9f, 0x42, 0x51, 0x7d, 0x3e, 0xbd, 0xf4, 0xb1, 0xba, 0x7e, 0xf9, 0xd3, 0x2c, + 0xbe, 0xc9, 0xa0, 0xae, 0x62, 0x24, 0xa0, 0xf8, 0x03, 0xaf, 0x3a, 0x8a, 0xd6, 0x99, 0x85, 0x12, + 0x9f, 0xb2, 0xeb, 0xc9, 0xaf, 0xb5, 0x23, 0xa3, 0xf0, 0xce, 0x2c, 0xaa, 0xf2, 0xef, 0xc4, 0x43, + 0x6d, 0xdb, 0x43, 0xb7, 0x62, 0x1e, 0xea, 0xd4, 0x27, 0xa9, 0xfa, 0x42, 0xb2, 0x80, 0x00, 0xb9, + 0xc1, 0x40, 0xae, 0xe0, 0x69, 0x01, 0xd2, 0xf6, 0x45, 0x1e, 0x69, 0x0f, 0x96, 0xdb, 0x90, 0x61, + 0x9c, 0x33, 0xfa, 0x58, 0x7e, 0xd4, 0x63, 0x88, 0xf4, 0x84, 0x89, 0x0e, 0xb1, 0xd5, 0x78, 0x96, + 0x01, 0x55, 0x70, 0x81, 0x02, 0x31, 0xc6, 0xf9, 0x91, 0xf6, 0x60, 0x51, 0x7b, 0x4d, 0x5b, 0xfe, + 0xbf, 0x0c, 0x64, 0x18, 0xf9, 0x84, 0x4e, 0x01, 0x02, 0x6e, 0x36, 0x3a, 0xba, 0x11, 0xb6, 0x37, + 0x3a, 0xba, 0x51, 0x5a, 0x17, 0xd7, 0x19, 0xe8, 0x2c, 0x9e, 0xa2, 0xa0, 0x8c, 0xd3, 0x6a, 0x30, + 0x9a, 0x8e, 0xfa, 0xf1, 0x5f, 0x34, 0xc1, 0xbd, 0xf1, 0xbd, 0x84, 0xe2, 0xb4, 0x85, 0x08, 0xda, + 0xe8, 0x72, 0x88, 0x21, 0x67, 0xf1, 0x5b, 0x0c, 0xb0, 0x81, 0xab, 0x01, 0xa0, 0xc3, 0x24, 0x1e, + 0x69, 0x0f, 0x3e, 0xae, 0xe1, 0x19, 0xe1, 0xe5, 0x48, 0x0b, 0xfa, 0x1c, 0x2a, 0x61, 0xd2, 0x15, + 0xdd, 0x89, 0xc1, 0x8a, 0x72, 0xb7, 0xf5, 0xbb, 0x17, 0x0b, 0x09, 0x9b, 0xe6, 0x99, 0x4d, 0x02, + 0x9c, 0x23, 0x9f, 0x12, 0xd2, 0x37, 0xa8, 0x90, 0x98, 0x03, 0xf4, 0x5f, 0x1a, 0x4c, 0x45, 0x58, + 0x54, 0x14, 0xa7, 0x7d, 0x84, 0xa3, 0xad, 0xdf, 0xbb, 0x44, 0x4a, 0x18, 0xf1, 0x37, 0xcc, 0x88, + 0xbf, 0xc4, 0xb3, 0x81, 0x11, 0x9e, 0xd9, 0x23, 0x9e, 0x2d, 0xac, 0xf8, 0xf8, 0x06, 0xbe, 0x1a, + 0x72, 0x4e, 0xa8, 0x35, 0x98, 0x2c, 0xce, 0x84, 0xc6, 0x4e, 0x56, 0x88, 0x59, 0x8d, 0x9d, 0xac, + 0x30, 0x8d, 0x1a, 0x37, 0x59, 0x9c, 0xf7, 0x8c, 0x9b, 0x2c, 0xbf, 0x65, 0x99, 0xfd, 0x54, 0x82, + 0xff, 0x40, 0x12, 0xd9, 0x50, 0xf0, 0x59, 0x48, 0x34, 0x1f, 0xc7, 0x08, 0x05, 0x77, 0x89, 0xfa, + 0xad, 0xc4, 0x76, 0x61, 0xd0, 0x6d, 0x66, 0xd0, 0x75, 0x7c, 0x85, 0x22, 0x8b, 0xdf, 0x60, 0x36, + 0x38, 0xed, 0xd0, 0x30, 0x3a, 0x1d, 0xea, 0x88, 0xbf, 0x87, 0x92, 0x4a, 0x13, 0xa2, 0xdb, 0xb1, + 0x2c, 0x94, 0xca, 0x34, 0xd6, 0xf1, 0x45, 0x22, 0x02, 0xf9, 0x2e, 0x43, 0x9e, 0xc7, 0xd7, 0x62, + 0x90, 0x1d, 0x26, 0x1a, 0x02, 0xe7, 0x14, 0x5f, 0x3c, 0x78, 0x88, 0x41, 0x8c, 0x07, 0x0f, 0x33, + 0x84, 0x17, 0x82, 0x0f, 0x98, 0x28, 0x05, 0x77, 0x01, 0x02, 0x32, 0x0f, 0xc5, 0xfa, 0x52, 0xb9, + 0x4c, 0x45, 0x83, 0xc3, 0x28, 0x0f, 0x88, 0x31, 0x83, 0x15, 0xeb, 0x2e, 0x02, 0xdb, 0x35, 0x5d, + 0x1a, 0x24, 0x96, 0xff, 0x35, 0x0b, 0xc5, 0xa7, 0x86, 0x69, 0x79, 0xc4, 0x32, 0xac, 0x36, 0x41, + 0x87, 0x90, 0x61, 0x89, 0x32, 0x1a, 0x07, 0x55, 0x7e, 0x2b, 0x1a, 0x07, 0x43, 0xe4, 0x0f, 0x5e, + 0x60, 0xa8, 0x75, 0x3c, 0x47, 0x51, 0x7b, 0x81, 0xea, 0x06, 0xe3, 0x6c, 0xe8, 0x40, 0x8f, 0x20, + 0x2b, 0x9e, 0x03, 0x22, 0x8a, 0x42, 0x5c, 0x4e, 0xfd, 0x46, 0x7c, 0x63, 0xdc, 0x52, 0x52, 0x61, + 0x5c, 0x26, 0x47, 0x71, 0x86, 0x00, 0x01, 0x19, 0x19, 0x75, 0xe8, 0x08, 0x77, 0x59, 0x5f, 0x48, + 0x16, 0x10, 0x98, 0xf7, 0x18, 0xe6, 0x2d, 0x5c, 0x8f, 0x62, 0x76, 0x7c, 0x59, 0x8a, 0xfb, 0xb7, + 0x30, 0xb9, 0x6e, 0xb8, 0x27, 0x28, 0x92, 0xfa, 0x94, 0xdf, 0x2d, 0xd4, 0xeb, 0x71, 0x4d, 0x02, + 0xe5, 0x16, 0x43, 0xb9, 0xc6, 0x23, 0x89, 0x8a, 0x72, 0x62, 0xb8, 0x34, 0xa7, 0xa0, 0x0e, 0x64, + 0xf9, 0xcf, 0x18, 0xa2, 0xfe, 0x0b, 0xfd, 0x14, 0x22, 0xea, 0xbf, 0xf0, 0x2f, 0x1f, 0x2e, 0x47, + 0xe9, 0x43, 0x5e, 0xfe, 0x6e, 0x00, 0xdd, 0x8c, 0x4c, 0x45, 0xf8, 0x37, 0x06, 0xf5, 0xf9, 0xa4, + 0x66, 0x81, 0x75, 0x87, 0x61, 0xdd, 0xc4, 0xb5, 0x91, 0xb9, 0x12, 0x92, 0x8f, 0xb4, 0x07, 0xaf, + 0x69, 0xe8, 0x73, 0x80, 0x80, 0xbf, 0x1d, 0xd9, 0x00, 0x51, 0x2a, 0x78, 0x64, 0x03, 0x8c, 0x50, + 0xbf, 0x78, 0x89, 0xe1, 0x2e, 0xe2, 0x3b, 0x51, 0x5c, 0xcf, 0x31, 0x2c, 0xf7, 0x88, 0x38, 0xaf, + 0x72, 0x8e, 0xce, 0x3d, 0x31, 0xfb, 0x74, 0x33, 0xfc, 0x7a, 0x0a, 0x26, 0xe9, 0x01, 0x94, 0xe6, + 0xe9, 0xe0, 0xde, 0x1e, 0xb5, 0x64, 0x84, 0x2d, 0x8b, 0x5a, 0x32, 0x7a, 0xe5, 0x0f, 0xe7, 0x69, + 0xf6, 0xcb, 0x76, 0xc2, 0x04, 0xa8, 0xa3, 0x6d, 0x28, 0x2a, 0x17, 0x7b, 0x14, 0xa3, 0x2c, 0x4c, + 0xc3, 0x45, 0x23, 0x7f, 0x0c, 0x2b, 0x80, 0xaf, 0x33, 0xbc, 0x39, 0x1e, 0xf9, 0x19, 0x5e, 0x87, + 0x4b, 0x50, 0xc0, 0xe7, 0x50, 0x52, 0x2f, 0xff, 0x28, 0x46, 0x5f, 0x84, 0xe2, 0x8b, 0x46, 0xb9, + 0x38, 0xee, 0x20, 0xbc, 0xf1, 0xfd, 0x5f, 0xef, 0x4b, 0x31, 0x0a, 0xdc, 0x85, 0x9c, 0x60, 0x03, + 0xe2, 0x46, 0x19, 0xe6, 0x03, 0xe3, 0x46, 0x19, 0xa1, 0x12, 0xc2, 0x67, 0x3b, 0x86, 0x48, 0x2f, + 0x3c, 0x32, 0x93, 0x08, 0xb4, 0x27, 0xc4, 0x4b, 0x42, 0x0b, 0xc8, 0xad, 0x24, 0x34, 0xe5, 0xb2, + 0x99, 0x84, 0x76, 0x4c, 0x3c, 0xb1, 0x5d, 0xe4, 0x25, 0x0e, 0x25, 0x28, 0x53, 0xa3, 0x37, 0xbe, + 0x48, 0x24, 0xee, 0xe8, 0x1d, 0x00, 0x8a, 0xd0, 0x8d, 0xce, 0x00, 0x02, 0xae, 0x22, 0x7a, 0x9e, + 0x8a, 0x25, 0x3c, 0xa3, 0xe7, 0xa9, 0x78, 0xba, 0x23, 0x1c, 0x1a, 0x02, 0x5c, 0x7e, 0xf2, 0xa7, + 0xc8, 0x5f, 0x69, 0x80, 0x46, 0x69, 0x0d, 0xf4, 0x30, 0x5e, 0x7b, 0x2c, 0x8d, 0x5a, 0x7f, 0xe5, + 0xc5, 0x84, 0xe3, 0xa2, 0x7d, 0x60, 0x52, 0x9b, 0x49, 0xf7, 0x9f, 0x53, 0xa3, 0xfe, 0x49, 0x83, + 0x72, 0x88, 0x13, 0x41, 0x2f, 0x25, 0xcc, 0x69, 0x84, 0x85, 0xad, 0xdf, 0xbf, 0x54, 0x2e, 0xee, + 0xa0, 0xa9, 0xac, 0x00, 0x79, 0xe2, 0xfe, 0x52, 0x83, 0x4a, 0x98, 0x43, 0x41, 0x09, 0xba, 0x47, + 0x58, 0xdc, 0xfa, 0xe2, 0xe5, 0x82, 0x17, 0x4f, 0x4f, 0x70, 0xd8, 0xee, 0x42, 0x4e, 0xb0, 0x2e, + 0x71, 0x0b, 0x3f, 0xcc, 0xff, 0xc6, 0x2d, 0xfc, 0x08, 0x65, 0x13, 0xb3, 0xf0, 0x1d, 0xbb, 0x4b, + 0x94, 0x6d, 0x26, 0x68, 0x99, 0x24, 0xb4, 0x8b, 0xb7, 0x59, 0x84, 0xd3, 0x49, 0x42, 0x0b, 0xb6, + 0x99, 0xe4, 0x63, 0x50, 0x82, 0xb2, 0x4b, 0xb6, 0x59, 0x94, 0xce, 0x89, 0xd9, 0x66, 0x0c, 0x50, + 0xd9, 0x66, 0x01, 0x73, 0x12, 0xb7, 0xcd, 0x46, 0xe8, 0xec, 0xb8, 0x6d, 0x36, 0x4a, 0xbe, 0xc4, + 0xcc, 0x23, 0xc3, 0x0d, 0x6d, 0xb3, 0x99, 0x18, 0x92, 0x05, 0xbd, 0x92, 0xe0, 0xc4, 0x58, 0x96, + 0xbc, 0xfe, 0xea, 0x0b, 0x4a, 0x27, 0xae, 0x71, 0xee, 0x7e, 0xb9, 0xc6, 0xff, 0x5d, 0x83, 0xd9, + 0x38, 0x82, 0x06, 0x25, 0xe0, 0x24, 0xb0, 0xeb, 0xf5, 0xa5, 0x17, 0x15, 0xbf, 0xd8, 0x5b, 0xfe, + 0xaa, 0x7f, 0x5c, 0xfd, 0xcd, 0x77, 0xf3, 0xda, 0xef, 0xbe, 0x9b, 0xd7, 0xfe, 0xf0, 0xdd, 0xbc, + 0xf6, 0x1f, 0x7f, 0x9c, 0x9f, 0x38, 0xcc, 0xb2, 0xff, 0x13, 0xf6, 0xc6, 0x9f, 0x03, 0x00, 0x00, + 0xff, 0xff, 0x3f, 0x89, 0x92, 0xdc, 0x9a, 0x36, 0x00, 0x00, } diff --git a/vendor/github.com/coreos/etcd/etcdserver/metrics.go b/vendor/github.com/coreos/etcd/etcdserver/metrics.go index c86d27a8..6b005bfb 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/metrics.go +++ b/vendor/github.com/coreos/etcd/etcdserver/metrics.go @@ -17,8 +17,8 @@ package etcdserver import ( "time" - "github.com/coreos/etcd/internal/version" "github.com/coreos/etcd/pkg/runtime" + "github.com/coreos/etcd/version" "github.com/prometheus/client_golang/prometheus" ) @@ -30,6 +30,12 @@ var ( Name: "has_leader", Help: "Whether or not a leader exists. 1 is existence, 0 is not.", }) + isLeader = prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: "etcd", + Subsystem: "server", + Name: "is_leader", + Help: "Whether or not this member is a leader. 1 if is, 0 otherwise.", + }) leaderChanges = prometheus.NewCounter(prometheus.CounterOpts{ Namespace: "etcd", Subsystem: "server", @@ -77,6 +83,7 @@ var ( func init() { prometheus.MustRegister(hasLeader) + prometheus.MustRegister(isLeader) prometheus.MustRegister(leaderChanges) prometheus.MustRegister(proposalsCommitted) prometheus.MustRegister(proposalsApplied) diff --git a/vendor/github.com/coreos/etcd/etcdserver/quota.go b/vendor/github.com/coreos/etcd/etcdserver/quota.go index 87126f15..1662c4b5 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/quota.go +++ b/vendor/github.com/coreos/etcd/etcdserver/quota.go @@ -16,6 +16,9 @@ package etcdserver import ( pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + + humanize "github.com/dustin/go-humanize" + "go.uber.org/zap" ) const ( @@ -57,18 +60,58 @@ const ( kvOverhead = 256 ) -func NewBackendQuota(s *EtcdServer) Quota { +func NewBackendQuota(s *EtcdServer, name string) Quota { + lg := s.getLogger() + if s.Cfg.QuotaBackendBytes < 0 { // disable quotas if negative - plog.Warningf("disabling backend quota") + if lg != nil { + lg.Info( + "disabled backend quota", + zap.String("quota-name", name), + zap.Int64("quota-size-bytes", s.Cfg.QuotaBackendBytes), + ) + } else { + plog.Warningf("disabling backend quota") + } return &passthroughQuota{} } + if s.Cfg.QuotaBackendBytes == 0 { // use default size if no quota size given + if lg != nil { + lg.Info( + "enabled backend quota with default value", + zap.String("quota-name", name), + zap.Int64("quota-size-bytes", DefaultQuotaBytes), + zap.String("quota-size", humanize.Bytes(uint64(DefaultQuotaBytes))), + ) + } return &backendQuota{s, DefaultQuotaBytes} } + if s.Cfg.QuotaBackendBytes > MaxQuotaBytes { - plog.Warningf("backend quota %v exceeds maximum recommended quota %v", s.Cfg.QuotaBackendBytes, MaxQuotaBytes) + if lg != nil { + lg.Warn( + "quota exceeds the maximum value", + zap.String("quota-name", name), + zap.Int64("quota-size-bytes", s.Cfg.QuotaBackendBytes), + zap.String("quota-size", humanize.Bytes(uint64(s.Cfg.QuotaBackendBytes))), + zap.Int64("quota-maximum-size-bytes", MaxQuotaBytes), + zap.String("quota-maximum-size", humanize.Bytes(uint64(MaxQuotaBytes))), + ) + } else { + plog.Warningf("backend quota %v exceeds maximum recommended quota %v", s.Cfg.QuotaBackendBytes, MaxQuotaBytes) + } + } + + if lg != nil { + lg.Info( + "enabled backend quota", + zap.String("quota-name", name), + zap.Int64("quota-size-bytes", s.Cfg.QuotaBackendBytes), + zap.String("quota-size", humanize.Bytes(uint64(s.Cfg.QuotaBackendBytes))), + ) } return &backendQuota{s, s.Cfg.QuotaBackendBytes} } diff --git a/vendor/github.com/coreos/etcd/etcdserver/raft.go b/vendor/github.com/coreos/etcd/etcdserver/raft.go index ff11349f..4b3ad280 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/raft.go +++ b/vendor/github.com/coreos/etcd/etcdserver/raft.go @@ -17,14 +17,15 @@ package etcdserver import ( "encoding/json" "expvar" + "log" "sort" "sync" - "sync/atomic" "time" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" "github.com/coreos/etcd/etcdserver/membership" "github.com/coreos/etcd/pkg/contention" + "github.com/coreos/etcd/pkg/logutil" "github.com/coreos/etcd/pkg/pbutil" "github.com/coreos/etcd/pkg/types" "github.com/coreos/etcd/raft" @@ -32,7 +33,9 @@ import ( "github.com/coreos/etcd/rafthttp" "github.com/coreos/etcd/wal" "github.com/coreos/etcd/wal/walpb" + "github.com/coreos/pkg/capnslog" + "go.uber.org/zap" ) const ( @@ -71,12 +74,6 @@ func init() { })) } -type RaftTimer interface { - Index() uint64 - AppliedIndex() uint64 - Term() uint64 -} - // apply contains entries, snapshot to be applied. Once // an apply is consumed, the entries will be persisted to // to raft storage concurrently; the application must read @@ -89,14 +86,9 @@ type apply struct { } type raftNode struct { - // Cache of the latest raft index and raft term the server has seen. - // These three unit64 fields must be the first elements to keep 64-bit - // alignment for atomic access to the fields. - index uint64 - appliedindex uint64 - term uint64 - lead uint64 + lg *zap.Logger + tickMu *sync.Mutex raftNodeConfig // a chan to send/receive snapshot @@ -118,6 +110,8 @@ type raftNode struct { } type raftNodeConfig struct { + lg *zap.Logger + // to check if msg receiver is removed from cluster isIDRemoved func(id uint64) bool raft.Node @@ -133,6 +127,8 @@ type raftNodeConfig struct { func newRaftNode(cfg raftNodeConfig) *raftNode { r := &raftNode{ + lg: cfg.lg, + tickMu: new(sync.Mutex), raftNodeConfig: cfg, // set up contention detectors for raft heartbeat message. // expect to send a heartbeat within 2 heartbeat intervals. @@ -151,6 +147,13 @@ func newRaftNode(cfg raftNodeConfig) *raftNode { return r } +// raft.Node does not have locks in Raft package +func (r *raftNode) tick() { + r.tickMu.Lock() + r.Tick() + r.tickMu.Unlock() +} + // start prepares and starts raftNode in a new goroutine. It is no longer safe // to modify the fields after it has been started. func (r *raftNode) start(rh *raftReadyHandler) { @@ -163,10 +166,10 @@ func (r *raftNode) start(rh *raftReadyHandler) { for { select { case <-r.ticker.C: - r.Tick() + r.tick() case rd := <-r.Ready(): if rd.SoftState != nil { - newLeader := rd.SoftState.Lead != raft.None && atomic.LoadUint64(&r.lead) != rd.SoftState.Lead + newLeader := rd.SoftState.Lead != raft.None && rh.getLead() != rd.SoftState.Lead if newLeader { leaderChanges.Inc() } @@ -177,8 +180,13 @@ func (r *raftNode) start(rh *raftReadyHandler) { hasLeader.Set(1) } - atomic.StoreUint64(&r.lead, rd.SoftState.Lead) + rh.updateLead(rd.SoftState.Lead) islead = rd.RaftState == raft.StateLeader + if islead { + isLeader.Set(1) + } else { + isLeader.Set(0) + } rh.updateLeadership(newLeader) r.td.Reset() } @@ -187,7 +195,11 @@ func (r *raftNode) start(rh *raftReadyHandler) { select { case r.readStateC <- rd.ReadStates[len(rd.ReadStates)-1]: case <-time.After(internalTimeout): - plog.Warningf("timed out sending read state") + if r.lg != nil { + r.lg.Warn("timed out sending read state", zap.Duration("timeout", internalTimeout)) + } else { + plog.Warningf("timed out sending read state") + } case <-r.stopped: return } @@ -218,7 +230,11 @@ func (r *raftNode) start(rh *raftReadyHandler) { // gofail: var raftBeforeSave struct{} if err := r.storage.Save(rd.HardState, rd.Entries); err != nil { - plog.Fatalf("raft save state and entries error: %v", err) + if r.lg != nil { + r.lg.Fatal("failed to save Raft hard state and entries", zap.Error(err)) + } else { + plog.Fatalf("raft save state and entries error: %v", err) + } } if !raft.IsEmptyHardState(rd.HardState) { proposalsCommitted.Set(float64(rd.HardState.Commit)) @@ -228,14 +244,22 @@ func (r *raftNode) start(rh *raftReadyHandler) { if !raft.IsEmptySnap(rd.Snapshot) { // gofail: var raftBeforeSaveSnap struct{} if err := r.storage.SaveSnap(rd.Snapshot); err != nil { - plog.Fatalf("raft save snapshot error: %v", err) + if r.lg != nil { + r.lg.Fatal("failed to save Raft snapshot", zap.Error(err)) + } else { + plog.Fatalf("raft save snapshot error: %v", err) + } } // etcdserver now claim the snapshot has been persisted onto the disk notifyc <- struct{}{} // gofail: var raftAfterSaveSnap struct{} r.raftStorage.ApplySnapshot(rd.Snapshot) - plog.Infof("raft applied incoming snapshot at index %d", rd.Snapshot.Metadata.Index) + if r.lg != nil { + r.lg.Info("applied incoming Raft snapshot", zap.Uint64("snapshot-index", rd.Snapshot.Metadata.Index)) + } else { + plog.Infof("raft applied incoming snapshot at index %d", rd.Snapshot.Metadata.Index) + } // gofail: var raftAfterApplySnap struct{} } @@ -332,8 +356,16 @@ func (r *raftNode) processMessages(ms []raftpb.Message) []raftpb.Message { ok, exceed := r.td.Observe(ms[i].To) if !ok { // TODO: limit request rate. - plog.Warningf("failed to send out heartbeat on time (exceeded the %v timeout for %v)", r.heartbeat, exceed) - plog.Warningf("server is likely overloaded") + if r.lg != nil { + r.lg.Warn( + "heartbeat took too long to send out; server is overloaded, likely from slow disk", + zap.Duration("exceeded", exceed), + zap.Duration("heartbeat-interval", r.heartbeat), + ) + } else { + plog.Warningf("failed to send out heartbeat on time (exceeded the %v timeout for %v)", r.heartbeat, exceed) + plog.Warningf("server is likely overloaded") + } } } } @@ -354,7 +386,11 @@ func (r *raftNode) onStop() { r.ticker.Stop() r.transport.Stop() if err := r.storage.Close(); err != nil { - plog.Panicf("raft close storage error: %v", err) + if r.lg != nil { + r.lg.Panic("failed to close Raft storage", zap.Error(err)) + } else { + plog.Panicf("raft close storage error: %v", err) + } } close(r.done) } @@ -370,13 +406,13 @@ func (r *raftNode) resumeSending() { p.Resume() } -// advanceTicksForElection advances ticks to the node for fast election. -// This reduces the time to wait for first leader election if bootstrapping the whole -// cluster, while leaving at least 1 heartbeat for possible existing leader -// to contact it. -func advanceTicksForElection(n raft.Node, electionTicks int) { - for i := 0; i < electionTicks-1; i++ { - n.Tick() +// advanceTicks advances ticks of Raft node. +// This can be used for fast-forwarding election +// ticks in multi data-center deployments, thus +// speeding up election process. +func (r *raftNode) advanceTicks(ticks int) { + for i := 0; i < ticks; i++ { + r.tick() } } @@ -389,19 +425,36 @@ func startNode(cfg ServerConfig, cl *membership.RaftCluster, ids []types.ID) (id ClusterID: uint64(cl.ID()), }, ) - if w, err = wal.Create(cfg.WALDir(), metadata); err != nil { - plog.Fatalf("create wal error: %v", err) + if w, err = wal.Create(cfg.Logger, cfg.WALDir(), metadata); err != nil { + if cfg.Logger != nil { + cfg.Logger.Fatal("failed to create WAL", zap.Error(err)) + } else { + plog.Fatalf("create wal error: %v", err) + } } peers := make([]raft.Peer, len(ids)) for i, id := range ids { - ctx, err := json.Marshal((*cl).Member(id)) + var ctx []byte + ctx, err = json.Marshal((*cl).Member(id)) if err != nil { - plog.Panicf("marshal member should never fail: %v", err) + if cfg.Logger != nil { + cfg.Logger.Panic("failed to marshal member", zap.Error(err)) + } else { + plog.Panicf("marshal member should never fail: %v", err) + } } peers[i] = raft.Peer{ID: uint64(id), Context: ctx} } id = member.ID - plog.Infof("starting member %s in cluster %s", id, cl.ID()) + if cfg.Logger != nil { + cfg.Logger.Info( + "starting local member", + zap.String("local-member-id", id.String()), + zap.String("cluster-id", cl.ID().String()), + ) + } else { + plog.Infof("starting member %s in cluster %s", id, cl.ID()) + } s = raft.NewMemoryStorage() c := &raft.Config{ ID: uint64(id), @@ -411,13 +464,24 @@ func startNode(cfg ServerConfig, cl *membership.RaftCluster, ids []types.ID) (id MaxSizePerMsg: maxSizePerMsg, MaxInflightMsgs: maxInflightMsgs, CheckQuorum: true, + PreVote: cfg.PreVote, + } + if cfg.Logger != nil { + // called after capnslog setting in "init" function + if cfg.LoggerConfig != nil { + c.Logger, err = logutil.NewRaftLogger(cfg.LoggerConfig) + if err != nil { + log.Fatalf("cannot create raft logger %v", err) + } + } else if cfg.LoggerCore != nil && cfg.LoggerWriteSyncer != nil { + c.Logger = logutil.NewRaftLoggerFromZapCore(cfg.LoggerCore, cfg.LoggerWriteSyncer) + } } n = raft.StartNode(c, peers) raftStatusMu.Lock() raftStatus = n.Status raftStatusMu.Unlock() - advanceTicksForElection(n, c.ElectionTick) return id, n, s, w } @@ -426,11 +490,20 @@ func restartNode(cfg ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *member if snapshot != nil { walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term } - w, id, cid, st, ents := readWAL(cfg.WALDir(), walsnap) + w, id, cid, st, ents := readWAL(cfg.Logger, cfg.WALDir(), walsnap) - plog.Infof("restarting member %s in cluster %s at commit index %d", id, cid, st.Commit) - cl := membership.NewCluster("") - cl.SetID(cid) + if cfg.Logger != nil { + cfg.Logger.Info( + "restarting local member", + zap.String("cluster-id", cid.String()), + zap.String("local-member-id", id.String()), + zap.Uint64("commit-index", st.Commit), + ) + } else { + plog.Infof("restarting member %s in cluster %s at commit index %d", id, cid, st.Commit) + } + cl := membership.NewCluster(cfg.Logger, "") + cl.SetID(id, cid) s := raft.NewMemoryStorage() if snapshot != nil { s.ApplySnapshot(*snapshot) @@ -445,13 +518,25 @@ func restartNode(cfg ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *member MaxSizePerMsg: maxSizePerMsg, MaxInflightMsgs: maxInflightMsgs, CheckQuorum: true, + PreVote: cfg.PreVote, + } + if cfg.Logger != nil { + // called after capnslog setting in "init" function + var err error + if cfg.LoggerConfig != nil { + c.Logger, err = logutil.NewRaftLogger(cfg.LoggerConfig) + if err != nil { + log.Fatalf("cannot create raft logger %v", err) + } + } else if cfg.LoggerCore != nil && cfg.LoggerWriteSyncer != nil { + c.Logger = logutil.NewRaftLoggerFromZapCore(cfg.LoggerCore, cfg.LoggerWriteSyncer) + } } n := raft.RestartNode(c) raftStatusMu.Lock() raftStatus = n.Status raftStatusMu.Unlock() - advanceTicksForElection(n, c.ElectionTick) return id, cl, n, s, w } @@ -460,33 +545,62 @@ func restartAsStandaloneNode(cfg ServerConfig, snapshot *raftpb.Snapshot) (types if snapshot != nil { walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term } - w, id, cid, st, ents := readWAL(cfg.WALDir(), walsnap) + w, id, cid, st, ents := readWAL(cfg.Logger, cfg.WALDir(), walsnap) // discard the previously uncommitted entries for i, ent := range ents { if ent.Index > st.Commit { - plog.Infof("discarding %d uncommitted WAL entries ", len(ents)-i) + if cfg.Logger != nil { + cfg.Logger.Info( + "discarding uncommitted WAL entries", + zap.Uint64("entry-index", ent.Index), + zap.Uint64("commit-index-from-wal", st.Commit), + zap.Int("number-of-discarded-entries", len(ents)-i), + ) + } else { + plog.Infof("discarding %d uncommitted WAL entries ", len(ents)-i) + } ents = ents[:i] break } } // force append the configuration change entries - toAppEnts := createConfigChangeEnts(getIDs(snapshot, ents), uint64(id), st.Term, st.Commit) + toAppEnts := createConfigChangeEnts( + cfg.Logger, + getIDs(cfg.Logger, snapshot, ents), + uint64(id), + st.Term, + st.Commit, + ) ents = append(ents, toAppEnts...) // force commit newly appended entries err := w.Save(raftpb.HardState{}, toAppEnts) if err != nil { - plog.Fatalf("%v", err) + if cfg.Logger != nil { + cfg.Logger.Fatal("failed to save hard state and entries", zap.Error(err)) + } else { + plog.Fatalf("%v", err) + } } if len(ents) != 0 { st.Commit = ents[len(ents)-1].Index } - plog.Printf("forcing restart of member %s in cluster %s at commit index %d", id, cid, st.Commit) - cl := membership.NewCluster("") - cl.SetID(cid) + if cfg.Logger != nil { + cfg.Logger.Info( + "forcing restart member", + zap.String("cluster-id", cid.String()), + zap.String("local-member-id", id.String()), + zap.Uint64("commit-index", st.Commit), + ) + } else { + plog.Printf("forcing restart of member %s in cluster %s at commit index %d", id, cid, st.Commit) + } + + cl := membership.NewCluster(cfg.Logger, "") + cl.SetID(id, cid) s := raft.NewMemoryStorage() if snapshot != nil { s.ApplySnapshot(*snapshot) @@ -500,7 +614,21 @@ func restartAsStandaloneNode(cfg ServerConfig, snapshot *raftpb.Snapshot) (types Storage: s, MaxSizePerMsg: maxSizePerMsg, MaxInflightMsgs: maxInflightMsgs, + CheckQuorum: true, + PreVote: cfg.PreVote, } + if cfg.Logger != nil { + // called after capnslog setting in "init" function + if cfg.LoggerConfig != nil { + c.Logger, err = logutil.NewRaftLogger(cfg.LoggerConfig) + if err != nil { + log.Fatalf("cannot create raft logger %v", err) + } + } else if cfg.LoggerCore != nil && cfg.LoggerWriteSyncer != nil { + c.Logger = logutil.NewRaftLoggerFromZapCore(cfg.LoggerCore, cfg.LoggerWriteSyncer) + } + } + n := raft.RestartNode(c) raftStatus = n.Status return id, cl, n, s, w @@ -511,7 +639,7 @@ func restartAsStandaloneNode(cfg ServerConfig, snapshot *raftpb.Snapshot) (types // ID-related entry: // - ConfChangeAddNode, in which case the contained ID will be added into the set. // - ConfChangeRemoveNode, in which case the contained ID will be removed from the set. -func getIDs(snap *raftpb.Snapshot, ents []raftpb.Entry) []uint64 { +func getIDs(lg *zap.Logger, snap *raftpb.Snapshot, ents []raftpb.Entry) []uint64 { ids := make(map[uint64]bool) if snap != nil { for _, id := range snap.Metadata.ConfState.Nodes { @@ -532,7 +660,11 @@ func getIDs(snap *raftpb.Snapshot, ents []raftpb.Entry) []uint64 { case raftpb.ConfChangeUpdateNode: // do nothing default: - plog.Panicf("ConfChange Type should be either ConfChangeAddNode or ConfChangeRemoveNode!") + if lg != nil { + lg.Panic("unknown ConfChange Type", zap.String("type", cc.Type.String())) + } else { + plog.Panicf("ConfChange Type should be either ConfChangeAddNode or ConfChangeRemoveNode!") + } } } sids := make(types.Uint64Slice, 0, len(ids)) @@ -548,7 +680,7 @@ func getIDs(snap *raftpb.Snapshot, ents []raftpb.Entry) []uint64 { // `self` is _not_ removed, even if present in the set. // If `self` is not inside the given ids, it creates a Raft entry to add a // default member with the given `self`. -func createConfigChangeEnts(ids []uint64, self uint64, term, index uint64) []raftpb.Entry { +func createConfigChangeEnts(lg *zap.Logger, ids []uint64, self uint64, term, index uint64) []raftpb.Entry { ents := make([]raftpb.Entry, 0) next := index + 1 found := false @@ -577,7 +709,11 @@ func createConfigChangeEnts(ids []uint64, self uint64, term, index uint64) []raf } ctx, err := json.Marshal(m) if err != nil { - plog.Panicf("marshal member should never fail: %v", err) + if lg != nil { + lg.Panic("failed to marshal member", zap.Error(err)) + } else { + plog.Panicf("marshal member should never fail: %v", err) + } } cc := &raftpb.ConfChange{ Type: raftpb.ConfChangeAddNode, diff --git a/vendor/github.com/coreos/etcd/etcdserver/server.go b/vendor/github.com/coreos/etcd/etcdserver/server.go index ca75d3a2..c27716e4 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/server.go +++ b/vendor/github.com/coreos/etcd/etcdserver/server.go @@ -29,22 +29,20 @@ import ( "sync/atomic" "time" + "github.com/coreos/etcd/alarm" + "github.com/coreos/etcd/auth" + "github.com/coreos/etcd/compactor" + "github.com/coreos/etcd/discovery" "github.com/coreos/etcd/etcdserver/api" "github.com/coreos/etcd/etcdserver/api/v2http/httptypes" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" "github.com/coreos/etcd/etcdserver/membership" "github.com/coreos/etcd/etcdserver/stats" - "github.com/coreos/etcd/internal/alarm" - "github.com/coreos/etcd/internal/auth" - "github.com/coreos/etcd/internal/compactor" - "github.com/coreos/etcd/internal/discovery" - "github.com/coreos/etcd/internal/lease" - "github.com/coreos/etcd/internal/lease/leasehttp" - "github.com/coreos/etcd/internal/mvcc" - "github.com/coreos/etcd/internal/mvcc/backend" - "github.com/coreos/etcd/internal/raftsnap" - "github.com/coreos/etcd/internal/store" - "github.com/coreos/etcd/internal/version" + "github.com/coreos/etcd/etcdserver/v2store" + "github.com/coreos/etcd/lease" + "github.com/coreos/etcd/lease/leasehttp" + "github.com/coreos/etcd/mvcc" + "github.com/coreos/etcd/mvcc/backend" "github.com/coreos/etcd/pkg/fileutil" "github.com/coreos/etcd/pkg/idutil" "github.com/coreos/etcd/pkg/pbutil" @@ -55,10 +53,14 @@ import ( "github.com/coreos/etcd/raft" "github.com/coreos/etcd/raft/raftpb" "github.com/coreos/etcd/rafthttp" + "github.com/coreos/etcd/raftsnap" + "github.com/coreos/etcd/version" "github.com/coreos/etcd/wal" "github.com/coreos/go-semver/semver" "github.com/coreos/pkg/capnslog" + humanize "github.com/dustin/go-humanize" + "go.uber.org/zap" ) const ( @@ -112,13 +114,15 @@ func init() { type Response struct { Term uint64 Index uint64 - Event *store.Event - Watcher store.Watcher + Event *v2store.Event + Watcher v2store.Watcher Err error } type ServerV2 interface { Server + Leader() types.ID + // Do takes a V2 request and attempts to fulfill it, returning a Response. Do(ctx context.Context, r pb.Request) (Response, error) stats.Stats @@ -127,16 +131,12 @@ type ServerV2 interface { type ServerV3 interface { Server - ID() types.ID - RaftTimer + RaftStatusGetter } func (s *EtcdServer) ClientCertAuthEnabled() bool { return s.Cfg.ClientCertAuthEnabled } type Server interface { - // Leader returns the ID of the leader Server. - Leader() types.ID - // AddMember attempts to add a member into the cluster. It will return // ErrIDRemoved if member ID is removed from the cluster, or return // ErrIDExists if member ID exists in the cluster. @@ -174,6 +174,9 @@ type EtcdServer struct { inflightSnapshots int64 // must use atomic operations to access; keep 64-bit aligned. appliedIndex uint64 // must use atomic operations to access; keep 64-bit aligned. committedIndex uint64 // must use atomic operations to access; keep 64-bit aligned. + term uint64 // must use atomic operations to access; keep 64-bit aligned. + lead uint64 // must use atomic operations to access; keep 64-bit aligned. + // consistIndex used to hold the offset of current executing entry // It is initialized to 0 before executing any entry. consistIndex consistentIndex // must use atomic operations to access; keep 64-bit aligned. @@ -182,6 +185,9 @@ type EtcdServer struct { readych chan struct{} Cfg ServerConfig + lgMu *sync.RWMutex + lg *zap.Logger + w wait.Wait readMu sync.RWMutex @@ -205,7 +211,7 @@ type EtcdServer struct { cluster *membership.RaftCluster - store store.Store + v2store v2store.Store snapshotter *raftsnap.Snapshotter applyV2 ApplierV2 @@ -251,12 +257,14 @@ type EtcdServer struct { leadTimeMu sync.RWMutex leadElectedTime time.Time + + *AccessController } // NewServer creates a new EtcdServer from the supplied configuration. The // configuration is considered static for the lifetime of the EtcdServer. func NewServer(cfg ServerConfig) (srv *EtcdServer, err error) { - st := store.New(StoreClusterPrefix, StoreKeysPrefix) + st := v2store.New(StoreClusterPrefix, StoreKeysPrefix) var ( w *wal.WAL @@ -267,7 +275,17 @@ func NewServer(cfg ServerConfig) (srv *EtcdServer, err error) { ) if cfg.MaxRequestBytes > recommendedMaxRequestBytes { - plog.Warningf("MaxRequestBytes %v exceeds maximum recommended size %v", cfg.MaxRequestBytes, recommendedMaxRequestBytes) + if cfg.Logger != nil { + cfg.Logger.Warn( + "exceeded recommended requet limit", + zap.Uint("max-request-bytes", cfg.MaxRequestBytes), + zap.String("max-request-size", humanize.Bytes(uint64(cfg.MaxRequestBytes))), + zap.Int("recommended-request-bytes", recommendedMaxRequestBytes), + zap.String("recommended-request-size", humanize.Bytes(uint64(recommendedMaxRequestBytes))), + ) + } else { + plog.Warningf("MaxRequestBytes %v exceeds maximum recommended size %v", cfg.MaxRequestBytes, recommendedMaxRequestBytes) + } } if terr := fileutil.TouchDirAll(cfg.DataDir); terr != nil { @@ -277,9 +295,17 @@ func NewServer(cfg ServerConfig) (srv *EtcdServer, err error) { haveWAL := wal.Exist(cfg.WALDir()) if err = fileutil.TouchDirAll(cfg.SnapDir()); err != nil { - plog.Fatalf("create snapshot directory error: %v", err) + if cfg.Logger != nil { + cfg.Logger.Fatal( + "failed to create snapshot directory", + zap.String("path", cfg.SnapDir()), + zap.Error(err), + ) + } else { + plog.Fatalf("create snapshot directory error: %v", err) + } } - ss := raftsnap.New(cfg.SnapDir()) + ss := raftsnap.New(cfg.Logger, cfg.SnapDir()) bepath := cfg.backendPath() beExist := fileutil.Exist(bepath) @@ -305,42 +331,44 @@ func NewServer(cfg ServerConfig) (srv *EtcdServer, err error) { if err = cfg.VerifyJoinExisting(); err != nil { return nil, err } - cl, err = membership.NewClusterFromURLsMap(cfg.InitialClusterToken, cfg.InitialPeerURLsMap) + cl, err = membership.NewClusterFromURLsMap(cfg.Logger, cfg.InitialClusterToken, cfg.InitialPeerURLsMap) if err != nil { return nil, err } - existingCluster, gerr := GetClusterFromRemotePeers(getRemotePeerURLs(cl, cfg.Name), prt) + existingCluster, gerr := GetClusterFromRemotePeers(cfg.Logger, getRemotePeerURLs(cl, cfg.Name), prt) if gerr != nil { return nil, fmt.Errorf("cannot fetch cluster info from peer urls: %v", gerr) } - if err = membership.ValidateClusterAndAssignIDs(cl, existingCluster); err != nil { + if err = membership.ValidateClusterAndAssignIDs(cfg.Logger, cl, existingCluster); err != nil { return nil, fmt.Errorf("error validating peerURLs %s: %v", existingCluster, err) } - if !isCompatibleWithCluster(cl, cl.MemberByName(cfg.Name).ID, prt) { + if !isCompatibleWithCluster(cfg.Logger, cl, cl.MemberByName(cfg.Name).ID, prt) { return nil, fmt.Errorf("incompatible with current running cluster") } remotes = existingCluster.Members() - cl.SetID(existingCluster.ID()) + cl.SetID(types.ID(0), existingCluster.ID()) cl.SetStore(st) cl.SetBackend(be) cfg.Print() id, n, s, w = startNode(cfg, cl, nil) + cl.SetID(id, existingCluster.ID()) + case !haveWAL && cfg.NewCluster: if err = cfg.VerifyBootstrap(); err != nil { return nil, err } - cl, err = membership.NewClusterFromURLsMap(cfg.InitialClusterToken, cfg.InitialPeerURLsMap) + cl, err = membership.NewClusterFromURLsMap(cfg.Logger, cfg.InitialClusterToken, cfg.InitialPeerURLsMap) if err != nil { return nil, err } m := cl.MemberByName(cfg.Name) - if isMemberBootstrapped(cl, cfg.Name, prt, cfg.bootstrapTimeout()) { + if isMemberBootstrapped(cfg.Logger, cl, cfg.Name, prt, cfg.bootstrapTimeout()) { return nil, fmt.Errorf("member %s has already been bootstrapped", m.ID) } if cfg.ShouldDiscover() { var str string - str, err = discovery.JoinCluster(cfg.DiscoveryURL, cfg.DiscoveryProxy, m.ID, cfg.InitialPeerURLsMap.String()) + str, err = discovery.JoinCluster(cfg.Logger, cfg.DiscoveryURL, cfg.DiscoveryProxy, m.ID, cfg.InitialPeerURLsMap.String()) if err != nil { return nil, &DiscoveryError{Op: "join", Err: err} } @@ -352,7 +380,7 @@ func NewServer(cfg ServerConfig) (srv *EtcdServer, err error) { if checkDuplicateURL(urlsmap) { return nil, fmt.Errorf("discovery cluster %s has duplicate url", urlsmap) } - if cl, err = membership.NewClusterFromURLsMap(cfg.InitialClusterToken, urlsmap); err != nil { + if cl, err = membership.NewClusterFromURLsMap(cfg.Logger, cfg.InitialClusterToken, urlsmap); err != nil { return nil, err } } @@ -360,6 +388,8 @@ func NewServer(cfg ServerConfig) (srv *EtcdServer, err error) { cl.SetBackend(be) cfg.PrintWithInitial() id, n, s, w = startNode(cfg, cl, cl.MemberIDs()) + cl.SetID(id, cl.ID()) + case haveWAL: if err = fileutil.IsDirWriteable(cfg.MemberDir()); err != nil { return nil, fmt.Errorf("cannot write to member directory: %v", err) @@ -370,7 +400,14 @@ func NewServer(cfg ServerConfig) (srv *EtcdServer, err error) { } if cfg.ShouldDiscover() { - plog.Warningf("discovery token ignored since a cluster has already been initialized. Valid log found at %q", cfg.WALDir()) + if cfg.Logger != nil { + cfg.Logger.Warn( + "discovery token is ignored since cluster already initialized; valid logs are found", + zap.String("wal-dir", cfg.WALDir()), + ) + } else { + plog.Warningf("discovery token ignored since a cluster has already been initialized. Valid log found at %q", cfg.WALDir()) + } } snapshot, err = ss.Load() if err != nil && err != raftsnap.ErrNoSnapshot { @@ -378,19 +415,50 @@ func NewServer(cfg ServerConfig) (srv *EtcdServer, err error) { } if snapshot != nil { if err = st.Recovery(snapshot.Data); err != nil { - plog.Panicf("recovered store from snapshot error: %v", err) + if cfg.Logger != nil { + cfg.Logger.Panic("failed to recover from snapshot") + } else { + plog.Panicf("recovered store from snapshot error: %v", err) + } } - plog.Infof("recovered store from snapshot at index %d", snapshot.Metadata.Index) + + if cfg.Logger != nil { + cfg.Logger.Info( + "recovered v2 store from snapshot", + zap.Uint64("snapshot-index", snapshot.Metadata.Index), + zap.String("snapshot-size", humanize.Bytes(uint64(snapshot.Size()))), + ) + } else { + plog.Infof("recovered store from snapshot at index %d", snapshot.Metadata.Index) + } + if be, err = recoverSnapshotBackend(cfg, be, *snapshot); err != nil { - plog.Panicf("recovering backend from snapshot error: %v", err) + if cfg.Logger != nil { + cfg.Logger.Panic("failed to recover v3 backend from snapshot", zap.Error(err)) + } else { + plog.Panicf("recovering backend from snapshot error: %v", err) + } + } + if cfg.Logger != nil { + s1, s2 := be.Size(), be.SizeInUse() + cfg.Logger.Info( + "recovered v3 backend from snapshot", + zap.Int64("backend-size-bytes", s1), + zap.String("backend-size", humanize.Bytes(uint64(s1))), + zap.Int64("backend-size-in-use-bytes", s2), + zap.String("backend-size-in-use", humanize.Bytes(uint64(s2))), + ) } } + cfg.Print() + if !cfg.ForceNewCluster { id, cl, n, s, w = restartNode(cfg, snapshot) } else { id, cl, n, s, w = restartAsStandaloneNode(cfg, snapshot) } + cl.SetStore(st) cl.SetBackend(be) cl.Recover(api.UpdateCapability) @@ -398,6 +466,7 @@ func NewServer(cfg ServerConfig) (srv *EtcdServer, err error) { os.RemoveAll(bepath) return nil, fmt.Errorf("database file (%v) of the backend is missing", bepath) } + default: return nil, fmt.Errorf("unsupported bootstrap config") } @@ -413,11 +482,14 @@ func NewServer(cfg ServerConfig) (srv *EtcdServer, err error) { srv = &EtcdServer{ readych: make(chan struct{}), Cfg: cfg, + lgMu: new(sync.RWMutex), + lg: cfg.Logger, errorc: make(chan error, 1), - store: st, + v2store: st, snapshotter: ss, r: *newRaftNode( raftNodeConfig{ + lg: cfg.Logger, isIDRemoved: func(id uint64) bool { return cl.IsIDRemoved(types.ID(id)) }, Node: n, heartbeat: heartbeat, @@ -425,18 +497,19 @@ func NewServer(cfg ServerConfig) (srv *EtcdServer, err error) { storage: NewStorage(w, ss), }, ), - id: id, - attributes: membership.Attributes{Name: cfg.Name, ClientURLs: cfg.ClientURLs.StringSlice()}, - cluster: cl, - stats: sstats, - lstats: lstats, - SyncTicker: time.NewTicker(500 * time.Millisecond), - peerRt: prt, - reqIDGen: idutil.NewGenerator(uint16(id), time.Now()), - forceVersionC: make(chan struct{}), + id: id, + attributes: membership.Attributes{Name: cfg.Name, ClientURLs: cfg.ClientURLs.StringSlice()}, + cluster: cl, + stats: sstats, + lstats: lstats, + SyncTicker: time.NewTicker(500 * time.Millisecond), + peerRt: prt, + reqIDGen: idutil.NewGenerator(uint16(id), time.Now()), + forceVersionC: make(chan struct{}), + AccessController: &AccessController{CORS: cfg.CORS, HostWhitelist: cfg.HostWhitelist}, } - srv.applyV2 = &applierV2store{store: srv.store, cluster: srv.cluster} + srv.applyV2 = &applierV2store{store: srv.v2store, cluster: srv.cluster} srv.be = be minTTL := time.Duration((3*cfg.ElectionTicks)/2) * heartbeat @@ -444,16 +517,23 @@ func NewServer(cfg ServerConfig) (srv *EtcdServer, err error) { // always recover lessor before kv. When we recover the mvcc.KV it will reattach keys to its leases. // If we recover mvcc.KV first, it will attach the keys to the wrong lessor before it recovers. srv.lessor = lease.NewLessor(srv.be, int64(math.Ceil(minTTL.Seconds()))) - srv.kv = mvcc.New(srv.be, srv.lessor, &srv.consistIndex) + srv.kv = mvcc.New(srv.getLogger(), srv.be, srv.lessor, &srv.consistIndex) if beExist { kvindex := srv.kv.ConsistentIndex() // TODO: remove kvindex != 0 checking when we do not expect users to upgrade // etcd from pre-3.0 release. if snapshot != nil && kvindex < snapshot.Metadata.Index { if kvindex != 0 { - return nil, fmt.Errorf("database file (%v index %d) does not match with snapshot (index %d).", bepath, kvindex, snapshot.Metadata.Index) + return nil, fmt.Errorf("database file (%v index %d) does not match with snapshot (index %d)", bepath, kvindex, snapshot.Metadata.Index) + } + if cfg.Logger != nil { + cfg.Logger.Warn( + "consistent index was never saved", + zap.Uint64("snapshot-index", snapshot.Metadata.Index), + ) + } else { + plog.Warningf("consistent index never saved (snapshot index=%d)", snapshot.Metadata.Index) } - plog.Warningf("consistent index never saved (snapshot index=%d)", snapshot.Metadata.Index) } } newSrv := srv // since srv == nil in defer if srv is returned as nil @@ -466,18 +546,22 @@ func NewServer(cfg ServerConfig) (srv *EtcdServer, err error) { }() srv.consistIndex.setConsistentIndex(srv.kv.ConsistentIndex()) - tp, err := auth.NewTokenProvider(cfg.AuthToken, + tp, err := auth.NewTokenProvider(cfg.Logger, cfg.AuthToken, func(index uint64) <-chan struct{} { return srv.applyWait.Wait(index) }, ) if err != nil { - plog.Errorf("failed to create token provider: %s", err) + if cfg.Logger != nil { + cfg.Logger.Warn("failed to create token provider", zap.Error(err)) + } else { + plog.Errorf("failed to create token provider: %s", err) + } return nil, err } - srv.authStore = auth.NewAuthStore(srv.be, tp) + srv.authStore = auth.NewAuthStore(srv.getLogger(), srv.be, tp, int(cfg.BcryptCost)) if num := cfg.AutoCompactionRetention; num != 0 { - srv.compactor, err = compactor.New(cfg.AutoCompactionMode, num, srv.kv, srv) + srv.compactor, err = compactor.New(cfg.Logger, cfg.AutoCompactionMode, num, srv.kv, srv) if err != nil { return nil, err } @@ -491,6 +575,7 @@ func NewServer(cfg ServerConfig) (srv *EtcdServer, err error) { // TODO: move transport initialization near the definition of remote tr := &rafthttp.Transport{ + Logger: cfg.Logger, TLSInfo: cfg.PeerTLSInfo, DialTimeout: cfg.peerDialTimeout(), ID: id, @@ -521,12 +606,90 @@ func NewServer(cfg ServerConfig) (srv *EtcdServer, err error) { return srv, nil } +func (s *EtcdServer) getLogger() *zap.Logger { + s.lgMu.RLock() + l := s.lg + s.lgMu.RUnlock() + return l +} + +func (s *EtcdServer) adjustTicks() { + lg := s.getLogger() + clusterN := len(s.cluster.Members()) + + // single-node fresh start, or single-node recovers from snapshot + if clusterN == 1 { + ticks := s.Cfg.ElectionTicks - 1 + if lg != nil { + lg.Info( + "started as single-node; fast-forwarding election ticks", + zap.String("local-member-id", s.ID().String()), + zap.Int("forward-ticks", ticks), + zap.Int("election-ticks", s.Cfg.ElectionTicks), + ) + } else { + plog.Infof("%s as single-node; fast-forwarding %d ticks (election ticks %d)", s.ID(), ticks, s.Cfg.ElectionTicks) + } + s.r.advanceTicks(ticks) + return + } + + if !s.Cfg.InitialElectionTickAdvance { + if lg != nil { + lg.Info("skipping initial election tick advance", zap.Int("election-ticks", s.Cfg.ElectionTicks)) + } + return + } + if lg != nil { + lg.Info("starting initial election tick advance", zap.Int("election-ticks", s.Cfg.ElectionTicks)) + } + + // retry up to "rafthttp.ConnReadTimeout", which is 5-sec + // until peer connection reports; otherwise: + // 1. all connections failed, or + // 2. no active peers, or + // 3. restarted single-node with no snapshot + // then, do nothing, because advancing ticks would have no effect + waitTime := rafthttp.ConnReadTimeout + itv := 50 * time.Millisecond + for i := int64(0); i < int64(waitTime/itv); i++ { + select { + case <-time.After(itv): + case <-s.stopping: + return + } + + peerN := s.r.transport.ActivePeers() + if peerN > 1 { + // multi-node received peer connection reports + // adjust ticks, in case slow leader message receive + ticks := s.Cfg.ElectionTicks - 2 + + if lg != nil { + lg.Info( + "initialized peer connections; fast-forwarding election ticks", + zap.String("local-member-id", s.ID().String()), + zap.Int("forward-ticks", ticks), + zap.Int("election-ticks", s.Cfg.ElectionTicks), + zap.Int("active-remote-members", peerN), + ) + } else { + plog.Infof("%s initialized peer connection; fast-forwarding %d ticks (election ticks %d) with %d active peer(s)", s.ID(), ticks, s.Cfg.ElectionTicks, peerN) + } + + s.r.advanceTicks(ticks) + return + } + } +} + // Start performs any initialization of the Server necessary for it to // begin serving requests. It must be called before Do or Process. // Start must be non-blocking; any long-running server functionality // should be implemented in goroutines. func (s *EtcdServer) Start() { s.start() + s.goAttach(func() { s.adjustTicks() }) s.goAttach(func() { s.publish(s.Cfg.ReqTimeout()) }) s.goAttach(s.purgeFile) s.goAttach(func() { monitorFileDescriptor(s.stopping) }) @@ -539,8 +702,13 @@ func (s *EtcdServer) Start() { // modify a server's fields after it has been sent to Start. // This function is just used for testing. func (s *EtcdServer) start() { + lg := s.getLogger() if s.Cfg.SnapCount == 0 { - plog.Infof("set snapshot count to default %d", DefaultSnapCount) + if lg != nil { + + } else { + plog.Infof("set snapshot count to default %d", DefaultSnapCount) + } s.Cfg.SnapCount = DefaultSnapCount } s.w = wait.New() @@ -552,9 +720,28 @@ func (s *EtcdServer) start() { s.readwaitc = make(chan struct{}, 1) s.readNotifier = newNotifier() if s.ClusterVersion() != nil { - plog.Infof("starting server... [version: %v, cluster version: %v]", version.Version, version.Cluster(s.ClusterVersion().String())) + if lg != nil { + lg.Info( + "starting etcd server", + zap.String("local-member-id", s.ID().String()), + zap.String("local-server-version", version.Version), + zap.String("cluster-id", s.Cluster().ID().String()), + zap.String("cluster-version", version.Cluster(s.ClusterVersion().String())), + ) + } else { + plog.Infof("starting server... [version: %v, cluster version: %v]", version.Version, version.Cluster(s.ClusterVersion().String())) + } } else { - plog.Infof("starting server... [version: %v, cluster version: to_be_decided]", version.Version) + if lg != nil { + lg.Info( + "starting etcd server", + zap.String("local-member-id", s.ID().String()), + zap.String("local-server-version", version.Version), + zap.String("cluster-version", "to_be_decided"), + ) + } else { + plog.Infof("starting server... [version: %v, cluster version: to_be_decided]", version.Version) + } } // TODO: if this is an empty log, writes all peer infos // into the first entry @@ -564,26 +751,38 @@ func (s *EtcdServer) start() { func (s *EtcdServer) purgeFile() { var dberrc, serrc, werrc <-chan error if s.Cfg.MaxSnapFiles > 0 { - dberrc = fileutil.PurgeFile(s.Cfg.SnapDir(), "snap.db", s.Cfg.MaxSnapFiles, purgeFileInterval, s.done) - serrc = fileutil.PurgeFile(s.Cfg.SnapDir(), "snap", s.Cfg.MaxSnapFiles, purgeFileInterval, s.done) + dberrc = fileutil.PurgeFile(s.getLogger(), s.Cfg.SnapDir(), "snap.db", s.Cfg.MaxSnapFiles, purgeFileInterval, s.done) + serrc = fileutil.PurgeFile(s.getLogger(), s.Cfg.SnapDir(), "snap", s.Cfg.MaxSnapFiles, purgeFileInterval, s.done) } if s.Cfg.MaxWALFiles > 0 { - werrc = fileutil.PurgeFile(s.Cfg.WALDir(), "wal", s.Cfg.MaxWALFiles, purgeFileInterval, s.done) + werrc = fileutil.PurgeFile(s.getLogger(), s.Cfg.WALDir(), "wal", s.Cfg.MaxWALFiles, purgeFileInterval, s.done) } + + lg := s.getLogger() select { case e := <-dberrc: - plog.Fatalf("failed to purge snap db file %v", e) + if lg != nil { + lg.Fatal("failed to purge snap db file", zap.Error(e)) + } else { + plog.Fatalf("failed to purge snap db file %v", e) + } case e := <-serrc: - plog.Fatalf("failed to purge snap file %v", e) + if lg != nil { + lg.Fatal("failed to purge snap file", zap.Error(e)) + } else { + plog.Fatalf("failed to purge snap file %v", e) + } case e := <-werrc: - plog.Fatalf("failed to purge wal file %v", e) + if lg != nil { + lg.Fatal("failed to purge wal file", zap.Error(e)) + } else { + plog.Fatalf("failed to purge wal file %v", e) + } case <-s.stopping: return } } -func (s *EtcdServer) ID() types.ID { return s.id } - func (s *EtcdServer) Cluster() api.Cluster { return s.cluster } func (s *EtcdServer) ApplyWait() <-chan struct{} { return s.applyWait.Wait(s.getCommittedIndex()) } @@ -607,7 +806,15 @@ func (s *EtcdServer) RaftHandler() http.Handler { return s.r.transport.Handler() // machine, respecting any timeout of the given context. func (s *EtcdServer) Process(ctx context.Context, m raftpb.Message) error { if s.cluster.IsIDRemoved(types.ID(m.From)) { - plog.Warningf("reject message from removed member %s", types.ID(m.From).String()) + if lg := s.getLogger(); lg != nil { + lg.Warn( + "rejected Raft message from removed member", + zap.String("local-member-id", s.ID().String()), + zap.String("removed-member-id", types.ID(m.From).String()), + ) + } else { + plog.Warningf("reject message from removed member %s", types.ID(m.From).String()) + } return httptypes.NewHTTPError(http.StatusForbidden, "cannot process message from removed member") } if m.Type == raftpb.MsgApp { @@ -637,14 +844,22 @@ type etcdProgress struct { // and helps decouple state machine logic from Raft algorithms. // TODO: add a state machine interface to apply the commit entries and do snapshot/recover type raftReadyHandler struct { + getLead func() (lead uint64) + updateLead func(lead uint64) updateLeadership func(newLeader bool) updateCommittedIndex func(uint64) } func (s *EtcdServer) run() { + lg := s.getLogger() + sn, err := s.r.raftStorage.Snapshot() if err != nil { - plog.Panicf("get snapshot from raft storage error: %v", err) + if lg != nil { + lg.Panic("failed to get snapshot from Raft storage", zap.Error(err)) + } else { + plog.Panicf("get snapshot from raft storage error: %v", err) + } } // asynchronously accept apply packets, dispatch progress in-order @@ -666,6 +881,8 @@ func (s *EtcdServer) run() { return } rh := &raftReadyHandler{ + getLead: func() (lead uint64) { return s.getLead() }, + updateLead: func(lead uint64) { s.setLead(lead) }, updateLeadership: func(newLeader bool) { if !s.isLeader() { if s.lessor != nil { @@ -770,20 +987,36 @@ func (s *EtcdServer) run() { lid := lease.ID s.goAttach(func() { ctx := s.authStore.WithRoot(s.ctx) - if _, lerr := s.LeaseRevoke(ctx, &pb.LeaseRevokeRequest{ID: int64(lid)}); lerr != nil { - plog.Warningf("failed to revoke %016x (%q)", lid, lerr.Error()) + _, lerr := s.LeaseRevoke(ctx, &pb.LeaseRevokeRequest{ID: int64(lid)}) + if lerr == nil { + leaseExpired.Inc() + } else { + if lg != nil { + lg.Warn( + "failed to revoke lease", + zap.String("lease-id", fmt.Sprintf("%016x", lid)), + zap.Error(lerr), + ) + } else { + plog.Warningf("failed to revoke %016x (%q)", lid, lerr.Error()) + } } - leaseExpired.Inc() + <-c }) } }) case err := <-s.errorc: - plog.Errorf("%s", err) - plog.Infof("the data-dir used by this member must be removed.") + if lg != nil { + lg.Warn("server error", zap.Error(err)) + lg.Warn("data-dir used by this member must be removed") + } else { + plog.Errorf("%s", err) + plog.Infof("the data-dir used by this member must be removed.") + } return case <-getSyncC(): - if s.store.HasTTLKeys() { + if s.v2store.HasTTLKeys() { s.sync(s.Cfg.ReqTimeout()) } case <-s.stop: @@ -794,16 +1027,11 @@ func (s *EtcdServer) run() { func (s *EtcdServer) applyAll(ep *etcdProgress, apply *apply) { s.applySnapshot(ep, apply) - st := time.Now() s.applyEntries(ep, apply) - d := time.Since(st) - entriesNum := len(apply.entries) - if entriesNum != 0 && d > time.Duration(entriesNum)*warnApplyDuration { - plog.Warningf("apply entries took too long [%v for %d entries]", d, len(apply.entries)) - plog.Warningf("avoid queries with large range/delete range!") - } + proposalsApplied.Set(float64(ep.appliedi)) s.applyWait.Trigger(ep.appliedi) + // wait for the raft routine to finish the disk writes before triggering a // snapshot. or applied index might be greater than the last index in raft // storage, since the raft routine might be slower than apply routine. @@ -824,12 +1052,42 @@ func (s *EtcdServer) applySnapshot(ep *etcdProgress, apply *apply) { return } - plog.Infof("applying snapshot at index %d...", ep.snapi) - defer plog.Infof("finished applying incoming snapshot at index %d", ep.snapi) + lg := s.getLogger() + if lg != nil { + lg.Info( + "applying snapshot", + zap.Uint64("current-snapshot-index", ep.snapi), + zap.Uint64("current-applied-index", ep.appliedi), + zap.Uint64("incoming-snapshot-index", apply.snapshot.Metadata.Index), + ) + } else { + plog.Infof("applying snapshot at index %d...", ep.snapi) + } + defer func() { + if lg != nil { + lg.Info( + "applied snapshot", + zap.Uint64("current-snapshot-index", ep.snapi), + zap.Uint64("current-applied-index", ep.appliedi), + zap.Uint64("incoming-snapshot-index", apply.snapshot.Metadata.Index), + ) + } else { + plog.Infof("finished applying incoming snapshot at index %d", ep.snapi) + } + }() if apply.snapshot.Metadata.Index <= ep.appliedi { - plog.Panicf("snapshot index [%d] should > appliedi[%d] + 1", - apply.snapshot.Metadata.Index, ep.appliedi) + if lg != nil { + lg.Panic( + "unexpected leader snapshot from outdated index", + zap.Uint64("current-snapshot-index", ep.snapi), + zap.Uint64("current-applied-index", ep.appliedi), + zap.Uint64("incoming-leader-snapshot-index", apply.snapshot.Metadata.Index), + ) + } else { + plog.Panicf("snapshot index [%d] should > appliedi[%d] + 1", + apply.snapshot.Metadata.Index, ep.appliedi) + } } // wait for raftNode to persist snapshot onto the disk @@ -837,25 +1095,51 @@ func (s *EtcdServer) applySnapshot(ep *etcdProgress, apply *apply) { newbe, err := openSnapshotBackend(s.Cfg, s.snapshotter, apply.snapshot) if err != nil { - plog.Panic(err) + if lg != nil { + lg.Panic("failed to open snapshot backend", zap.Error(err)) + } else { + plog.Panic(err) + } } // always recover lessor before kv. When we recover the mvcc.KV it will reattach keys to its leases. // If we recover mvcc.KV first, it will attach the keys to the wrong lessor before it recovers. if s.lessor != nil { - plog.Info("recovering lessor...") + if lg != nil { + lg.Info("restoring lease store") + } else { + plog.Info("recovering lessor...") + } + s.lessor.Recover(newbe, func() lease.TxnDelete { return s.kv.Write() }) - plog.Info("finished recovering lessor") + + if lg != nil { + lg.Info("restored lease store") + } else { + plog.Info("finished recovering lessor") + } } - plog.Info("restoring mvcc store...") + if lg != nil { + lg.Info("restoring mvcc store") + } else { + plog.Info("restoring mvcc store...") + } if err := s.kv.Restore(newbe); err != nil { - plog.Panicf("restore KV error: %v", err) + if lg != nil { + lg.Panic("failed to restore mvcc store", zap.Error(err)) + } else { + plog.Panicf("restore KV error: %v", err) + } } - s.consistIndex.setConsistentIndex(s.kv.ConsistentIndex()) - plog.Info("finished restoring mvcc store") + s.consistIndex.setConsistentIndex(s.kv.ConsistentIndex()) + if lg != nil { + lg.Info("restored mvcc store") + } else { + plog.Info("finished restoring mvcc store") + } // Closing old backend might block until all the txns // on the backend are finished. @@ -863,53 +1147,126 @@ func (s *EtcdServer) applySnapshot(ep *etcdProgress, apply *apply) { s.bemu.Lock() oldbe := s.be go func() { - plog.Info("closing old backend...") - defer plog.Info("finished closing old backend") - + if lg != nil { + lg.Info("closing old backend file") + } else { + plog.Info("closing old backend...") + } + defer func() { + if lg != nil { + lg.Info("closed old backend file") + } else { + plog.Info("finished closing old backend") + } + }() if err := oldbe.Close(); err != nil { - plog.Panicf("close backend error: %v", err) + if lg != nil { + lg.Panic("failed to close old backend", zap.Error(err)) + } else { + plog.Panicf("close backend error: %v", err) + } } }() s.be = newbe s.bemu.Unlock() - plog.Info("recovering alarms...") - if err := s.restoreAlarms(); err != nil { - plog.Panicf("restore alarms error: %v", err) + if lg != nil { + lg.Info("restoring alarm store") + } else { + plog.Info("recovering alarms...") + } + + if err := s.restoreAlarms(); err != nil { + if lg != nil { + lg.Panic("failed to restore alarm store", zap.Error(err)) + } else { + plog.Panicf("restore alarms error: %v", err) + } + } + + if lg != nil { + lg.Info("restored alarm store") + } else { + plog.Info("finished recovering alarms") } - plog.Info("finished recovering alarms") if s.authStore != nil { - plog.Info("recovering auth store...") + if lg != nil { + lg.Info("restoring auth store") + } else { + plog.Info("recovering auth store...") + } + s.authStore.Recover(newbe) - plog.Info("finished recovering auth store") + + if lg != nil { + lg.Info("restored auth store") + } else { + plog.Info("finished recovering auth store") + } } - plog.Info("recovering store v2...") - if err := s.store.Recovery(apply.snapshot.Data); err != nil { - plog.Panicf("recovery store error: %v", err) + if lg != nil { + lg.Info("restoring v2 store") + } else { + plog.Info("recovering store v2...") + } + if err := s.v2store.Recovery(apply.snapshot.Data); err != nil { + if lg != nil { + lg.Panic("failed to restore v2 store", zap.Error(err)) + } else { + plog.Panicf("recovery store error: %v", err) + } + } + + if lg != nil { + lg.Info("restored v2 store") + } else { + plog.Info("finished recovering store v2") } - plog.Info("finished recovering store v2") s.cluster.SetBackend(s.be) - plog.Info("recovering cluster configuration...") - s.cluster.Recover(api.UpdateCapability) - plog.Info("finished recovering cluster configuration") - plog.Info("removing old peers from network...") + if lg != nil { + lg.Info("restoring cluster configuration") + } else { + plog.Info("recovering cluster configuration...") + } + + s.cluster.Recover(api.UpdateCapability) + + if lg != nil { + lg.Info("restored cluster configuration") + lg.Info("removing old peers from network") + } else { + plog.Info("finished recovering cluster configuration") + plog.Info("removing old peers from network...") + } + // recover raft transport s.r.transport.RemoveAllPeers() - plog.Info("finished removing old peers from network") - plog.Info("adding peers from new cluster configuration into network...") + if lg != nil { + lg.Info("removed old peers from network") + lg.Info("adding peers from new cluster configuration") + } else { + plog.Info("finished removing old peers from network") + plog.Info("adding peers from new cluster configuration into network...") + } + for _, m := range s.cluster.Members() { if m.ID == s.ID() { continue } s.r.transport.AddPeer(m.ID, m.PeerURLs) } - plog.Info("finished adding peers from new cluster configuration into network...") + + if lg != nil { + lg.Info("added peers from new cluster configuration") + } else { + plog.Info("finished adding peers from new cluster configuration into network...") + } ep.appliedt = apply.snapshot.Metadata.Term ep.appliedi = apply.snapshot.Metadata.Index @@ -923,7 +1280,15 @@ func (s *EtcdServer) applyEntries(ep *etcdProgress, apply *apply) { } firsti := apply.entries[0].Index if firsti > ep.appliedi+1 { - plog.Panicf("first index of committed entry[%d] should <= appliedi[%d] + 1", firsti, ep.appliedi) + if lg := s.getLogger(); lg != nil { + lg.Panic( + "unexpected committed entry index", + zap.Uint64("current-applied-index", ep.appliedi), + zap.Uint64("first-committed-entry-index", firsti), + ) + } else { + plog.Panicf("first index of committed entry[%d] should <= appliedi[%d] + 1", firsti, ep.appliedi) + } } var ents []raftpb.Entry if ep.appliedi+1-firsti < uint64(len(apply.entries)) { @@ -943,7 +1308,18 @@ func (s *EtcdServer) triggerSnapshot(ep *etcdProgress) { return } - plog.Infof("start to snapshot (applied: %d, lastsnap: %d)", ep.appliedi, ep.snapi) + if lg := s.getLogger(); lg != nil { + lg.Info( + "triggering snapshot", + zap.String("local-member-id", s.ID().String()), + zap.Uint64("local-member-applied-index", ep.appliedi), + zap.Uint64("local-member-snapshot-index", ep.snapi), + zap.Uint64("local-member-snapshot-count", s.Cfg.SnapCount), + ) + } else { + plog.Infof("start to snapshot (applied: %d, lastsnap: %d)", ep.appliedi, ep.snapi) + } + s.snapshot(ep.appliedi, ep.confState) ep.snapi = ep.appliedi } @@ -961,7 +1337,17 @@ func (s *EtcdServer) MoveLeader(ctx context.Context, lead, transferee uint64) er now := time.Now() interval := time.Duration(s.Cfg.TickMs) * time.Millisecond - plog.Infof("%s starts leadership transfer from %s to %s", s.ID(), types.ID(lead), types.ID(transferee)) + if lg := s.getLogger(); lg != nil { + lg.Info( + "leadership transfer starting", + zap.String("local-member-id", s.ID().String()), + zap.String("current-leader-member-id", types.ID(lead).String()), + zap.String("transferee-member-id", types.ID(transferee).String()), + ) + } else { + plog.Infof("%s starts leadership transfer from %s to %s", s.ID(), types.ID(lead), types.ID(transferee)) + } + s.r.TransferLeadership(ctx, lead, transferee) for s.Lead() != transferee { select { @@ -972,20 +1358,45 @@ func (s *EtcdServer) MoveLeader(ctx context.Context, lead, transferee uint64) er } // TODO: drain all requests, or drop all messages to the old leader - - plog.Infof("%s finished leadership transfer from %s to %s (took %v)", s.ID(), types.ID(lead), types.ID(transferee), time.Since(now)) + if lg := s.getLogger(); lg != nil { + lg.Info( + "leadership transfer finished", + zap.String("local-member-id", s.ID().String()), + zap.String("old-leader-member-id", types.ID(lead).String()), + zap.String("new-leader-member-id", types.ID(transferee).String()), + zap.Duration("took", time.Since(now)), + ) + } else { + plog.Infof("%s finished leadership transfer from %s to %s (took %v)", s.ID(), types.ID(lead), types.ID(transferee), time.Since(now)) + } return nil } // TransferLeadership transfers the leader to the chosen transferee. func (s *EtcdServer) TransferLeadership() error { if !s.isLeader() { - plog.Printf("skipped leadership transfer for stopping non-leader member") + if lg := s.getLogger(); lg != nil { + lg.Info( + "skipped leadership transfer; local server is not leader", + zap.String("local-member-id", s.ID().String()), + zap.String("current-leader-member-id", types.ID(s.Lead()).String()), + ) + } else { + plog.Printf("skipped leadership transfer for stopping non-leader member") + } return nil } if !s.isMultiNode() { - plog.Printf("skipped leadership transfer for single member cluster") + if lg := s.getLogger(); lg != nil { + lg.Info( + "skipped leadership transfer; it's a single-node cluster", + zap.String("local-member-id", s.ID().String()), + zap.String("current-leader-member-id", types.ID(s.Lead()).String()), + ) + } else { + plog.Printf("skipped leadership transfer for single member cluster") + } return nil } @@ -1019,7 +1430,11 @@ func (s *EtcdServer) HardStop() { // Do and Process cannot be called after Stop has been invoked. func (s *EtcdServer) Stop() { if err := s.TransferLeadership(); err != nil { - plog.Warningf("%s failed to transfer leadership (%v)", s.ID(), err) + if lg := s.getLogger(); lg != nil { + lg.Warn("leadership transfer failed", zap.String("local-member-id", s.ID().String()), zap.Error(err)) + } else { + plog.Warningf("%s failed to transfer leadership (%v)", s.ID(), err) + } } s.HardStop() } @@ -1046,14 +1461,14 @@ func (s *EtcdServer) StopNotify() <-chan struct{} { return s.done } func (s *EtcdServer) SelfStats() []byte { return s.stats.JSON() } func (s *EtcdServer) LeaderStats() []byte { - lead := atomic.LoadUint64(&s.r.lead) + lead := s.getLead() if lead != uint64(s.id) { return nil } return s.lstats.JSON() } -func (s *EtcdServer) StoreStats() []byte { return s.store.JsonStats() } +func (s *EtcdServer) StoreStats() []byte { return s.v2store.JsonStats() } func (s *EtcdServer) checkMembershipOperationPermission(ctx context.Context) error { if s.authStore == nil { @@ -1084,11 +1499,30 @@ func (s *EtcdServer) AddMember(ctx context.Context, memb membership.Member) ([]* if s.Cfg.StrictReconfigCheck { // by default StrictReconfigCheck is enabled; reject new members if unhealthy if !s.cluster.IsReadyToAddNewMember() { - plog.Warningf("not enough started members, rejecting member add %+v", memb) + if lg := s.getLogger(); lg != nil { + lg.Warn( + "rejecting member add request; not enough healthy members", + zap.String("local-member-id", s.ID().String()), + zap.String("requested-member-add", fmt.Sprintf("%+v", memb)), + zap.Error(ErrNotEnoughStartedMembers), + ) + } else { + plog.Warningf("not enough started members, rejecting member add %+v", memb) + } return nil, ErrNotEnoughStartedMembers } + if !isConnectedFullySince(s.r.transport, time.Now().Add(-HealthInterval), s.ID(), s.cluster.Members()) { - plog.Warningf("not healthy for reconfigure, rejecting member add %+v", memb) + if lg := s.getLogger(); lg != nil { + lg.Warn( + "rejecting member add request; local member has not been connected to all peers, reconfigure breaks active quorum", + zap.String("local-member-id", s.ID().String()), + zap.String("requested-member-add", fmt.Sprintf("%+v", memb)), + zap.Error(ErrUnhealthy), + ) + } else { + plog.Warningf("not healthy for reconfigure, rejecting member add %+v", memb) + } return nil, ErrUnhealthy } } @@ -1129,7 +1563,16 @@ func (s *EtcdServer) mayRemoveMember(id types.ID) error { } if !s.cluster.IsReadyToRemoveMember(uint64(id)) { - plog.Warningf("not enough started members, rejecting remove member %s", id) + if lg := s.getLogger(); lg != nil { + lg.Warn( + "rejecting member remove request; not enough healthy members", + zap.String("local-member-id", s.ID().String()), + zap.String("requested-member-remove-id", id.String()), + zap.Error(ErrNotEnoughStartedMembers), + ) + } else { + plog.Warningf("not enough started members, rejecting remove member %s", id) + } return ErrNotEnoughStartedMembers } @@ -1142,7 +1585,17 @@ func (s *EtcdServer) mayRemoveMember(id types.ID) error { m := s.cluster.Members() active := numConnectedSince(s.r.transport, time.Now().Add(-HealthInterval), s.ID(), m) if (active - 1) < 1+((len(m)-1)/2) { - plog.Warningf("reconfigure breaks active quorum, rejecting remove member %s", id) + if lg := s.getLogger(); lg != nil { + lg.Warn( + "rejecting member remove request; local member has not been connected to all peers, reconfigure breaks active quorum", + zap.String("local-member-id", s.ID().String()), + zap.String("requested-member-remove", id.String()), + zap.Int("active-peers", active), + zap.Error(ErrUnhealthy), + ) + } else { + plog.Warningf("reconfigure breaks active quorum, rejecting remove member %s", id) + } return ErrUnhealthy } @@ -1166,20 +1619,58 @@ func (s *EtcdServer) UpdateMember(ctx context.Context, memb membership.Member) ( return s.configure(ctx, cc) } -// Implement the RaftTimer interface +func (s *EtcdServer) setCommittedIndex(v uint64) { + atomic.StoreUint64(&s.committedIndex, v) +} -func (s *EtcdServer) Index() uint64 { return atomic.LoadUint64(&s.r.index) } +func (s *EtcdServer) getCommittedIndex() uint64 { + return atomic.LoadUint64(&s.committedIndex) +} -func (s *EtcdServer) AppliedIndex() uint64 { return atomic.LoadUint64(&s.r.appliedindex) } +func (s *EtcdServer) setAppliedIndex(v uint64) { + atomic.StoreUint64(&s.appliedIndex, v) +} -func (s *EtcdServer) Term() uint64 { return atomic.LoadUint64(&s.r.term) } +func (s *EtcdServer) getAppliedIndex() uint64 { + return atomic.LoadUint64(&s.appliedIndex) +} -// Lead is only for testing purposes. -// TODO: add Raft server interface to expose raft related info: -// Index, Term, Lead, Committed, Applied, LastIndex, etc. -func (s *EtcdServer) Lead() uint64 { return atomic.LoadUint64(&s.r.lead) } +func (s *EtcdServer) setTerm(v uint64) { + atomic.StoreUint64(&s.term, v) +} -func (s *EtcdServer) Leader() types.ID { return types.ID(s.Lead()) } +func (s *EtcdServer) getTerm() uint64 { + return atomic.LoadUint64(&s.term) +} + +func (s *EtcdServer) setLead(v uint64) { + atomic.StoreUint64(&s.lead, v) +} + +func (s *EtcdServer) getLead() uint64 { + return atomic.LoadUint64(&s.lead) +} + +// RaftStatusGetter represents etcd server and Raft progress. +type RaftStatusGetter interface { + ID() types.ID + Leader() types.ID + CommittedIndex() uint64 + AppliedIndex() uint64 + Term() uint64 +} + +func (s *EtcdServer) ID() types.ID { return s.id } + +func (s *EtcdServer) Leader() types.ID { return types.ID(s.getLead()) } + +func (s *EtcdServer) Lead() uint64 { return s.getLead() } + +func (s *EtcdServer) CommittedIndex() uint64 { return s.getCommittedIndex() } + +func (s *EtcdServer) AppliedIndex() uint64 { return s.getAppliedIndex() } + +func (s *EtcdServer) Term() uint64 { return s.getTerm() } type confChangeResponse struct { membs []*membership.Member @@ -1192,21 +1683,37 @@ type confChangeResponse struct { func (s *EtcdServer) configure(ctx context.Context, cc raftpb.ConfChange) ([]*membership.Member, error) { cc.ID = s.reqIDGen.Next() ch := s.w.Register(cc.ID) + start := time.Now() if err := s.r.ProposeConfChange(ctx, cc); err != nil { s.w.Trigger(cc.ID, nil) return nil, err } + select { case x := <-ch: if x == nil { - plog.Panicf("configure trigger value should never be nil") + if lg := s.getLogger(); lg != nil { + lg.Panic("failed to configure") + } else { + plog.Panicf("configure trigger value should never be nil") + } } resp := x.(*confChangeResponse) + if lg := s.getLogger(); lg != nil { + lg.Info( + "applied a configuration change through raft", + zap.String("local-member-id", s.ID().String()), + zap.String("raft-conf-change", cc.Type.String()), + zap.String("raft-conf-change-node-id", types.ID(cc.NodeID).String()), + ) + } return resp.membs, resp.err + case <-ctx.Done(): s.w.Trigger(cc.ID, nil) // GC wait return nil, s.parseProposeCtxErr(ctx.Err(), start) + case <-s.stopping: return nil, ErrStopped } @@ -1239,7 +1746,11 @@ func (s *EtcdServer) sync(timeout time.Duration) { func (s *EtcdServer) publish(timeout time.Duration) { b, err := json.Marshal(s.attributes) if err != nil { - plog.Panicf("json marshal error: %v", err) + if lg := s.getLogger(); lg != nil { + lg.Panic("failed to marshal JSON", zap.Error(err)) + } else { + plog.Panicf("json marshal error: %v", err) + } return } req := pb.Request{ @@ -1255,13 +1766,47 @@ func (s *EtcdServer) publish(timeout time.Duration) { switch err { case nil: close(s.readych) - plog.Infof("published %+v to cluster %s", s.attributes, s.cluster.ID()) + if lg := s.getLogger(); lg != nil { + lg.Info( + "published local member to cluster through raft", + zap.String("local-member-id", s.ID().String()), + zap.String("local-member-attributes", fmt.Sprintf("%+v", s.attributes)), + zap.String("request-path", req.Path), + zap.String("cluster-id", s.cluster.ID().String()), + zap.Duration("publish-timeout", timeout), + ) + } else { + plog.Infof("published %+v to cluster %s", s.attributes, s.cluster.ID()) + } return + case ErrStopped: - plog.Infof("aborting publish because server is stopped") + if lg := s.getLogger(); lg != nil { + lg.Warn( + "stopped publish because server is stopped", + zap.String("local-member-id", s.ID().String()), + zap.String("local-member-attributes", fmt.Sprintf("%+v", s.attributes)), + zap.Duration("publish-timeout", timeout), + zap.Error(err), + ) + } else { + plog.Infof("aborting publish because server is stopped") + } return + default: - plog.Errorf("publish error: %v", err) + if lg := s.getLogger(); lg != nil { + lg.Warn( + "failed to publish local member to cluster through raft", + zap.String("local-member-id", s.ID().String()), + zap.String("local-member-attributes", fmt.Sprintf("%+v", s.attributes)), + zap.String("request-path", req.Path), + zap.Duration("publish-timeout", timeout), + zap.Error(err), + ) + } else { + plog.Errorf("publish error: %v", err) + } } } } @@ -1269,7 +1814,20 @@ func (s *EtcdServer) publish(timeout time.Duration) { func (s *EtcdServer) sendMergedSnap(merged raftsnap.Message) { atomic.AddInt64(&s.inflightSnapshots, 1) + lg := s.getLogger() + fields := []zap.Field{ + zap.String("from", s.ID().String()), + zap.String("to", types.ID(merged.To).String()), + zap.Int64("bytes", merged.TotalSize), + zap.String("size", humanize.Bytes(uint64(merged.TotalSize))), + } + + now := time.Now() s.r.transport.SendSnapshot(merged) + if lg != nil { + lg.Info("sending merged snapshot", fields...) + } + s.goAttach(func() { select { case ok := <-merged.CloseNotify(): @@ -1283,8 +1841,17 @@ func (s *EtcdServer) sendMergedSnap(merged raftsnap.Message) { case <-s.stopping: } } + atomic.AddInt64(&s.inflightSnapshots, -1) + + if lg != nil { + lg.Info("sent merged snapshot", append(fields, zap.Duration("took", time.Since(now)))...) + } + case <-s.stopping: + if lg != nil { + lg.Warn("canceled sending merged snapshot; server stopping", fields...) + } return } }) @@ -1293,12 +1860,18 @@ func (s *EtcdServer) sendMergedSnap(merged raftsnap.Message) { // apply takes entries received from Raft (after it has been committed) and // applies them to the current state of the EtcdServer. // The given entries should not be empty. -func (s *EtcdServer) apply(es []raftpb.Entry, confState *raftpb.ConfState) (appliedt uint64, appliedi uint64, shouldStop bool) { +func (s *EtcdServer) apply( + es []raftpb.Entry, + confState *raftpb.ConfState, +) (appliedt uint64, appliedi uint64, shouldStop bool) { for i := range es { e := es[i] switch e.Type { case raftpb.EntryNormal: s.applyEntryNormal(&e) + s.setAppliedIndex(e.Index) + s.setTerm(e.Term) + case raftpb.EntryConfChange: // set the consistent index of current executing entry if e.Index > s.consistIndex.ConsistentIndex() { @@ -1308,15 +1881,21 @@ func (s *EtcdServer) apply(es []raftpb.Entry, confState *raftpb.ConfState) (appl pbutil.MustUnmarshal(&cc, e.Data) removedSelf, err := s.applyConfChange(cc, confState) s.setAppliedIndex(e.Index) + s.setTerm(e.Term) shouldStop = shouldStop || removedSelf s.w.Trigger(cc.ID, &confChangeResponse{s.cluster.Members(), err}) + default: - plog.Panicf("entry type should be either EntryNormal or EntryConfChange") + if lg := s.getLogger(); lg != nil { + lg.Panic( + "unknown entry type; must be either EntryNormal or EntryConfChange", + zap.String("type", e.Type.String()), + ) + } else { + plog.Panicf("entry type should be either EntryNormal or EntryConfChange") + } } - atomic.StoreUint64(&s.r.index, e.Index) - atomic.StoreUint64(&s.r.term, e.Term) - appliedt = e.Term - appliedi = e.Index + appliedi, appliedt = e.Index, e.Term } return appliedt, appliedi, shouldStop } @@ -1329,7 +1908,6 @@ func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry) { s.consistIndex.setConsistentIndex(e.Index) shouldApplyV3 = true } - defer s.setAppliedIndex(e.Index) // raft state machine may generate noop entry when leader confirmation. // skip it in advance to avoid some potential bug in the future @@ -1388,7 +1966,17 @@ func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry) { return } - plog.Errorf("applying raft message exceeded backend quota") + if lg := s.getLogger(); lg != nil { + lg.Warn( + "message exceeded backend quota; raising alarm", + zap.Int64("quota-size-bytes", s.Cfg.QuotaBackendBytes), + zap.String("quota-size", humanize.Bytes(uint64(s.Cfg.QuotaBackendBytes))), + zap.Error(ar.err), + ) + } else { + plog.Errorf("applying raft message exceeded backend quota") + } + s.goAttach(func() { a := &pb.AlarmRequest{ MemberID: uint64(s.ID()), @@ -1408,20 +1996,35 @@ func (s *EtcdServer) applyConfChange(cc raftpb.ConfChange, confState *raftpb.Con s.r.ApplyConfChange(cc) return false, err } + + lg := s.getLogger() *confState = *s.r.ApplyConfChange(cc) switch cc.Type { case raftpb.ConfChangeAddNode: m := new(membership.Member) if err := json.Unmarshal(cc.Context, m); err != nil { - plog.Panicf("unmarshal member should never fail: %v", err) + if lg != nil { + lg.Panic("failed to unmarshal member", zap.Error(err)) + } else { + plog.Panicf("unmarshal member should never fail: %v", err) + } } if cc.NodeID != uint64(m.ID) { - plog.Panicf("nodeID should always be equal to member ID") + if lg != nil { + lg.Panic( + "got different member ID", + zap.String("member-id-from-config-change-entry", types.ID(cc.NodeID).String()), + zap.String("member-id-from-message", m.ID.String()), + ) + } else { + plog.Panicf("nodeID should always be equal to member ID") + } } s.cluster.AddMember(m) if m.ID != s.id { s.r.transport.AddPeer(m.ID, m.PeerURLs) } + case raftpb.ConfChangeRemoveNode: id := types.ID(cc.NodeID) s.cluster.RemoveMember(id) @@ -1429,13 +2032,26 @@ func (s *EtcdServer) applyConfChange(cc raftpb.ConfChange, confState *raftpb.Con return true, nil } s.r.transport.RemovePeer(id) + case raftpb.ConfChangeUpdateNode: m := new(membership.Member) if err := json.Unmarshal(cc.Context, m); err != nil { - plog.Panicf("unmarshal member should never fail: %v", err) + if lg != nil { + lg.Panic("failed to unmarshal member", zap.Error(err)) + } else { + plog.Panicf("unmarshal member should never fail: %v", err) + } } if cc.NodeID != uint64(m.ID) { - plog.Panicf("nodeID should always be equal to member ID") + if lg != nil { + lg.Panic( + "got different member ID", + zap.String("member-id-from-config-change-entry", types.ID(cc.NodeID).String()), + zap.String("member-id-from-message", m.ID.String()), + ) + } else { + plog.Panicf("nodeID should always be equal to member ID") + } } s.cluster.UpdateRaftAttributes(m.ID, m.RaftAttributes) if m.ID != s.id { @@ -1447,7 +2063,7 @@ func (s *EtcdServer) applyConfChange(cc raftpb.ConfChange, confState *raftpb.Con // TODO: non-blocking snapshot func (s *EtcdServer) snapshot(snapi uint64, confState raftpb.ConfState) { - clone := s.store.Clone() + clone := s.v2store.Clone() // commit kv to write metadata (for example: consistent index) to disk. // KV().commit() updates the consistent index in backend. // All operations that update consistent index must be called sequentially @@ -1457,11 +2073,17 @@ func (s *EtcdServer) snapshot(snapi uint64, confState raftpb.ConfState) { s.KV().Commit() s.goAttach(func() { + lg := s.getLogger() + d, err := clone.SaveNoCopy() // TODO: current store will never fail to do a snapshot // what should we do if the store might fail? if err != nil { - plog.Panicf("store save should never fail: %v", err) + if lg != nil { + lg.Panic("failed to save v2 store", zap.Error(err)) + } else { + plog.Panicf("store save should never fail: %v", err) + } } snap, err := s.r.raftStorage.CreateSnapshot(snapi, &confState, d) if err != nil { @@ -1470,14 +2092,29 @@ func (s *EtcdServer) snapshot(snapi uint64, confState raftpb.ConfState) { if err == raft.ErrSnapOutOfDate { return } - plog.Panicf("unexpected create snapshot error %v", err) + if lg != nil { + lg.Panic("failed to create snapshot", zap.Error(err)) + } else { + plog.Panicf("unexpected create snapshot error %v", err) + } } // SaveSnap saves the snapshot and releases the locked wal files // to the snapshot index. if err = s.r.storage.SaveSnap(snap); err != nil { - plog.Fatalf("save snapshot error: %v", err) + if lg != nil { + lg.Panic("failed to save snapshot", zap.Error(err)) + } else { + plog.Fatalf("save snapshot error: %v", err) + } + } + if lg != nil { + lg.Info( + "saved snapshot", + zap.Uint64("snapshot-index", snap.Metadata.Index), + ) + } else { + plog.Infof("saved snapshot at index %d", snap.Metadata.Index) } - plog.Infof("saved snapshot at index %d", snap.Metadata.Index) // When sending a snapshot, etcd will pause compaction. // After receives a snapshot, the slow follower needs to get all the entries right after @@ -1485,7 +2122,11 @@ func (s *EtcdServer) snapshot(snapi uint64, confState raftpb.ConfState) { // the snapshot sent might already be compacted. It happens when the snapshot takes long time // to send and save. Pausing compaction avoids triggering a snapshot sending cycle. if atomic.LoadInt64(&s.inflightSnapshots) != 0 { - plog.Infof("skip compaction since there is an inflight snapshot") + if lg != nil { + lg.Info("skip compaction since there is an inflight snapshot") + } else { + plog.Infof("skip compaction since there is an inflight snapshot") + } return } @@ -1501,9 +2142,20 @@ func (s *EtcdServer) snapshot(snapi uint64, confState raftpb.ConfState) { if err == raft.ErrCompacted { return } - plog.Panicf("unexpected compaction error %v", err) + if lg != nil { + lg.Panic("failed to compact", zap.Error(err)) + } else { + plog.Panicf("unexpected compaction error %v", err) + } + } + if lg != nil { + lg.Info( + "compacted Raft logs", + zap.Uint64("compact-index", compacti), + ) + } else { + plog.Infof("compacted raft log at %d", compacti) } - plog.Infof("compacted raft log at %d", compacti) }) } @@ -1551,7 +2203,7 @@ func (s *EtcdServer) monitorVersions() { continue } - v := decideClusterVersion(getVersions(s.cluster, s.id, s.peerRt)) + v := decideClusterVersion(s.getLogger(), getVersions(s.getLogger(), s.cluster, s.id, s.peerRt)) if v != nil { // only keep major.minor version for comparison v = &semver.Version{ @@ -1581,27 +2233,60 @@ func (s *EtcdServer) monitorVersions() { } func (s *EtcdServer) updateClusterVersion(ver string) { + lg := s.getLogger() + if s.cluster.Version() == nil { - plog.Infof("setting up the initial cluster version to %s", version.Cluster(ver)) + if lg != nil { + lg.Info( + "setting up initial cluster version", + zap.String("cluster-version", version.Cluster(ver)), + ) + } else { + plog.Infof("setting up the initial cluster version to %s", version.Cluster(ver)) + } } else { - plog.Infof("updating the cluster version from %s to %s", version.Cluster(s.cluster.Version().String()), version.Cluster(ver)) + if lg != nil { + lg.Info( + "updating cluster version", + zap.String("from", version.Cluster(s.cluster.Version().String())), + zap.String("to", version.Cluster(ver)), + ) + } else { + plog.Infof("updating the cluster version from %s to %s", version.Cluster(s.cluster.Version().String()), version.Cluster(ver)) + } } + req := pb.Request{ Method: "PUT", Path: membership.StoreClusterVersionKey(), Val: ver, } + ctx, cancel := context.WithTimeout(s.ctx, s.Cfg.ReqTimeout()) _, err := s.Do(ctx, req) cancel() + switch err { case nil: + if lg != nil { + lg.Info("cluster version is updated", zap.String("cluster-version", version.Cluster(ver))) + } return + case ErrStopped: - plog.Infof("aborting update cluster version because server is stopped") + if lg != nil { + lg.Warn("aborting cluster version update; server is stopped", zap.Error(err)) + } else { + plog.Infof("aborting update cluster version because server is stopped") + } return + default: - plog.Errorf("error updating cluster version (%v)", err) + if lg != nil { + lg.Warn("failed to update cluster version", zap.Error(err)) + } else { + plog.Errorf("error updating cluster version (%v)", err) + } } } @@ -1609,6 +2294,7 @@ func (s *EtcdServer) parseProposeCtxErr(err error, start time.Time) error { switch err { case context.Canceled: return ErrCanceled + case context.DeadlineExceeded: s.leadTimeMu.RLock() curLeadElected := s.leadElectedTime @@ -1617,8 +2303,7 @@ func (s *EtcdServer) parseProposeCtxErr(err error, start time.Time) error { if start.After(prevLeadLost) && start.Before(curLeadElected) { return ErrTimeoutDueToLeaderFail } - - lead := types.ID(atomic.LoadUint64(&s.r.lead)) + lead := types.ID(s.getLead()) switch lead { case types.ID(raft.None): // TODO: return error to specify it happens because the cluster does not have leader now @@ -1631,8 +2316,8 @@ func (s *EtcdServer) parseProposeCtxErr(err error, start time.Time) error { return ErrTimeoutDueToConnectionLost } } - return ErrTimeout + default: return err } @@ -1663,23 +2348,6 @@ func (s *EtcdServer) restoreAlarms() error { return nil } -func (s *EtcdServer) getAppliedIndex() uint64 { - return atomic.LoadUint64(&s.appliedIndex) -} - -func (s *EtcdServer) setAppliedIndex(v uint64) { - atomic.StoreUint64(&s.appliedIndex, v) - atomic.StoreUint64(&s.r.appliedindex, v) -} - -func (s *EtcdServer) getCommittedIndex() uint64 { - return atomic.LoadUint64(&s.committedIndex) -} - -func (s *EtcdServer) setCommittedIndex(v uint64) { - atomic.StoreUint64(&s.committedIndex, v) -} - // goAttach creates a goroutine on a given function and tracks it using // the etcdserver waitgroup. func (s *EtcdServer) goAttach(f func()) { @@ -1687,7 +2355,11 @@ func (s *EtcdServer) goAttach(f func()) { defer s.wgMu.RUnlock() select { case <-s.stopping: - plog.Warning("server has stopped (skipping goAttach)") + if lg := s.getLogger(); lg != nil { + lg.Warn("server has stopped; skipping goAttach") + } else { + plog.Warning("server has stopped (skipping goAttach)") + } return default: } diff --git a/vendor/github.com/coreos/etcd/etcdserver/server_access_control.go b/vendor/github.com/coreos/etcd/etcdserver/server_access_control.go new file mode 100644 index 00000000..09e2255c --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/server_access_control.go @@ -0,0 +1,65 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdserver + +import "sync" + +// AccessController controls etcd server HTTP request access. +type AccessController struct { + corsMu sync.RWMutex + CORS map[string]struct{} + hostWhitelistMu sync.RWMutex + HostWhitelist map[string]struct{} +} + +// NewAccessController returns a new "AccessController" with default "*" values. +func NewAccessController() *AccessController { + return &AccessController{ + CORS: map[string]struct{}{"*": {}}, + HostWhitelist: map[string]struct{}{"*": {}}, + } +} + +// OriginAllowed determines whether the server will allow a given CORS origin. +// If CORS is empty, allow all. +func (ac *AccessController) OriginAllowed(origin string) bool { + ac.corsMu.RLock() + defer ac.corsMu.RUnlock() + if len(ac.CORS) == 0 { // allow all + return true + } + _, ok := ac.CORS["*"] + if ok { + return true + } + _, ok = ac.CORS[origin] + return ok +} + +// IsHostWhitelisted returns true if the host is whitelisted. +// If whitelist is empty, allow all. +func (ac *AccessController) IsHostWhitelisted(host string) bool { + ac.hostWhitelistMu.RLock() + defer ac.hostWhitelistMu.RUnlock() + if len(ac.HostWhitelist) == 0 { // allow all + return true + } + _, ok := ac.HostWhitelist["*"] + if ok { + return true + } + _, ok = ac.HostWhitelist[host] + return ok +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/snapshot_merge.go b/vendor/github.com/coreos/etcd/etcdserver/snapshot_merge.go index 20894e81..425550f7 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/snapshot_merge.go +++ b/vendor/github.com/coreos/etcd/etcdserver/snapshot_merge.go @@ -17,9 +17,12 @@ package etcdserver import ( "io" - "github.com/coreos/etcd/internal/mvcc/backend" - "github.com/coreos/etcd/internal/raftsnap" + "github.com/coreos/etcd/mvcc/backend" "github.com/coreos/etcd/raft/raftpb" + "github.com/coreos/etcd/raftsnap" + + humanize "github.com/dustin/go-humanize" + "go.uber.org/zap" ) // createMergedSnapshotMessage creates a snapshot message that contains: raft status (term, conf), @@ -27,17 +30,21 @@ import ( // as ReadCloser. func (s *EtcdServer) createMergedSnapshotMessage(m raftpb.Message, snapt, snapi uint64, confState raftpb.ConfState) raftsnap.Message { // get a snapshot of v2 store as []byte - clone := s.store.Clone() + clone := s.v2store.Clone() d, err := clone.SaveNoCopy() if err != nil { - plog.Panicf("store save should never fail: %v", err) + if lg := s.getLogger(); lg != nil { + lg.Panic("failed to save v2 store data", zap.Error(err)) + } else { + plog.Panicf("store save should never fail: %v", err) + } } // commit kv to write metadata(for example: consistent index). s.KV().Commit() dbsnap := s.be.Snapshot() // get a snapshot of v3 KV as readCloser - rc := newSnapshotReaderCloser(dbsnap) + rc := newSnapshotReaderCloser(s.getLogger(), dbsnap) // put the []byte snapshot of store into raft snapshot and return the merged snapshot with // KV readCloser snapshot. @@ -54,19 +61,39 @@ func (s *EtcdServer) createMergedSnapshotMessage(m raftpb.Message, snapt, snapi return *raftsnap.NewMessage(m, rc, dbsnap.Size()) } -func newSnapshotReaderCloser(snapshot backend.Snapshot) io.ReadCloser { +func newSnapshotReaderCloser(lg *zap.Logger, snapshot backend.Snapshot) io.ReadCloser { pr, pw := io.Pipe() go func() { n, err := snapshot.WriteTo(pw) if err == nil { - plog.Infof("wrote database snapshot out [total bytes: %d]", n) + if lg != nil { + lg.Info( + "sent database snapshot to writer", + zap.Int64("bytes", n), + zap.String("size", humanize.Bytes(uint64(n))), + ) + } else { + plog.Infof("wrote database snapshot out [total bytes: %d]", n) + } } else { - plog.Warningf("failed to write database snapshot out [written bytes: %d]: %v", n, err) + if lg != nil { + lg.Warn( + "failed to send database snapshot to writer", + zap.String("size", humanize.Bytes(uint64(n))), + zap.Error(err), + ) + } else { + plog.Warningf("failed to write database snapshot out [written bytes: %d]: %v", n, err) + } } pw.CloseWithError(err) err = snapshot.Close() if err != nil { - plog.Panicf("failed to close database snapshot: %v", err) + if lg != nil { + lg.Panic("failed to close database snapshot", zap.Error(err)) + } else { + plog.Panicf("failed to close database snapshot: %v", err) + } } }() return pr diff --git a/vendor/github.com/coreos/etcd/etcdserver/storage.go b/vendor/github.com/coreos/etcd/etcdserver/storage.go index b567c518..876f897d 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/storage.go +++ b/vendor/github.com/coreos/etcd/etcdserver/storage.go @@ -18,12 +18,14 @@ import ( "io" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/internal/raftsnap" "github.com/coreos/etcd/pkg/pbutil" "github.com/coreos/etcd/pkg/types" "github.com/coreos/etcd/raft/raftpb" + "github.com/coreos/etcd/raftsnap" "github.com/coreos/etcd/wal" "github.com/coreos/etcd/wal/walpb" + + "go.uber.org/zap" ) type Storage interface { @@ -63,7 +65,7 @@ func (st *storage) SaveSnap(snap raftpb.Snapshot) error { return st.WAL.ReleaseLockTo(snap.Metadata.Index) } -func readWAL(waldir string, snap walpb.Snapshot) (w *wal.WAL, id, cid types.ID, st raftpb.HardState, ents []raftpb.Entry) { +func readWAL(lg *zap.Logger, waldir string, snap walpb.Snapshot) (w *wal.WAL, id, cid types.ID, st raftpb.HardState, ents []raftpb.Entry) { var ( err error wmetadata []byte @@ -71,19 +73,35 @@ func readWAL(waldir string, snap walpb.Snapshot) (w *wal.WAL, id, cid types.ID, repaired := false for { - if w, err = wal.Open(waldir, snap); err != nil { - plog.Fatalf("open wal error: %v", err) + if w, err = wal.Open(lg, waldir, snap); err != nil { + if lg != nil { + lg.Fatal("failed to open WAL", zap.Error(err)) + } else { + plog.Fatalf("open wal error: %v", err) + } } if wmetadata, st, ents, err = w.ReadAll(); err != nil { w.Close() // we can only repair ErrUnexpectedEOF and we never repair twice. if repaired || err != io.ErrUnexpectedEOF { - plog.Fatalf("read wal error (%v) and cannot be repaired", err) + if lg != nil { + lg.Fatal("failed to read WAL, cannot be repaired", zap.Error(err)) + } else { + plog.Fatalf("read wal error (%v) and cannot be repaired", err) + } } - if !wal.Repair(waldir) { - plog.Fatalf("WAL error (%v) cannot be repaired", err) + if !wal.Repair(lg, waldir) { + if lg != nil { + lg.Fatal("failed to repair WAL", zap.Error(err)) + } else { + plog.Fatalf("WAL error (%v) cannot be repaired", err) + } } else { - plog.Infof("repaired WAL error (%v)", err) + if lg != nil { + lg.Info("repaired WAL", zap.Error(err)) + } else { + plog.Infof("repaired WAL error (%v)", err) + } repaired = true } continue diff --git a/vendor/github.com/coreos/etcd/etcdserver/util.go b/vendor/github.com/coreos/etcd/etcdserver/util.go index e3896ffc..ad0632da 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/util.go +++ b/vendor/github.com/coreos/etcd/etcdserver/util.go @@ -15,11 +15,14 @@ package etcdserver import ( + "fmt" "time" "github.com/coreos/etcd/etcdserver/membership" "github.com/coreos/etcd/pkg/types" "github.com/coreos/etcd/rafthttp" + + "go.uber.org/zap" ) // isConnectedToQuorumSince checks whether the local member is connected to the @@ -95,3 +98,29 @@ func (nc *notifier) notify(err error) { nc.err = err close(nc.c) } + +func warnOfExpensiveRequest(lg *zap.Logger, now time.Time, stringer fmt.Stringer) { + warnOfExpensiveGenericRequest(lg, now, stringer, "") +} + +func warnOfExpensiveReadOnlyRangeRequest(lg *zap.Logger, now time.Time, stringer fmt.Stringer) { + warnOfExpensiveGenericRequest(lg, now, stringer, "read-only range ") +} + +func warnOfExpensiveGenericRequest(lg *zap.Logger, now time.Time, stringer fmt.Stringer, prefix string) { + // TODO: add metrics + d := time.Since(now) + if d > warnApplyDuration { + if lg != nil { + lg.Warn( + "request took too long", + zap.Duration("took", d), + zap.Duration("expected-duration", warnApplyDuration), + zap.String("prefix", prefix), + zap.String("request", stringer.String()), + ) + } else { + plog.Warningf("%srequest %q took too long (%v) to execute", prefix, stringer.String(), d) + } + } +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/v2_server.go b/vendor/github.com/coreos/etcd/etcdserver/v2_server.go index 986959fe..1be5650f 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/v2_server.go +++ b/vendor/github.com/coreos/etcd/etcdserver/v2_server.go @@ -19,7 +19,7 @@ import ( "time" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/internal/store" + "github.com/coreos/etcd/etcdserver/v2store" ) type RequestV2 pb.Request @@ -39,11 +39,11 @@ type reqV2HandlerEtcdServer struct { } type reqV2HandlerStore struct { - store store.Store + store v2store.Store applier ApplierV2 } -func NewStoreRequestV2Handler(s store.Store, applier ApplierV2) RequestV2Handler { +func NewStoreRequestV2Handler(s v2store.Store, applier ApplierV2) RequestV2Handler { return &reqV2HandlerStore{s, applier} } @@ -122,14 +122,14 @@ func (s *EtcdServer) Do(ctx context.Context, r pb.Request) (Response, error) { r.ID = s.reqIDGen.Next() h := &reqV2HandlerEtcdServer{ reqV2HandlerStore: reqV2HandlerStore{ - store: s.store, + store: s.v2store, applier: s.applyV2, }, s: s, } rp := &r resp, err := ((*RequestV2)(rp)).Handle(ctx, h) - resp.Term, resp.Index = s.Term(), s.Index() + resp.Term, resp.Index = s.Term(), s.CommittedIndex() return resp, err } @@ -158,3 +158,8 @@ func (r *RequestV2) Handle(ctx context.Context, v2api RequestV2Handler) (Respons } return Response{}, ErrUnknownMethod } + +func (r *RequestV2) String() string { + rpb := pb.Request(*r) + return rpb.String() +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/v3_server.go b/vendor/github.com/coreos/etcd/etcdserver/v3_server.go index d506b1ab..a02c7dc1 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/v3_server.go +++ b/vendor/github.com/coreos/etcd/etcdserver/v3_server.go @@ -18,17 +18,19 @@ import ( "bytes" "context" "encoding/binary" + "fmt" "time" + "github.com/coreos/etcd/auth" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" "github.com/coreos/etcd/etcdserver/membership" - "github.com/coreos/etcd/internal/auth" - "github.com/coreos/etcd/internal/lease" - "github.com/coreos/etcd/internal/lease/leasehttp" - "github.com/coreos/etcd/internal/mvcc" + "github.com/coreos/etcd/lease" + "github.com/coreos/etcd/lease/leasehttp" + "github.com/coreos/etcd/mvcc" "github.com/coreos/etcd/raft" "github.com/gogo/protobuf/proto" + "go.uber.org/zap" ) const ( @@ -84,6 +86,8 @@ type Authenticator interface { } func (s *EtcdServer) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) { + defer warnOfExpensiveReadOnlyRangeRequest(s.getLogger(), time.Now(), r) + if !r.Serializable { err := s.linearizableReadNotify(ctx) if err != nil { @@ -95,6 +99,7 @@ func (s *EtcdServer) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeRe chk := func(ai *auth.AuthInfo) error { return s.authStore.IsRangePermitted(ai, r.Key, r.RangeEnd) } + get := func() { resp, err = s.applyV3Base.Range(nil, r) } if serr := s.doSerialize(ctx, chk, get); serr != nil { return nil, serr @@ -131,12 +136,16 @@ func (s *EtcdServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse chk := func(ai *auth.AuthInfo) error { return checkTxnAuth(s.authStore, ai, r) } + + defer warnOfExpensiveReadOnlyRangeRequest(s.getLogger(), time.Now(), r) + get := func() { resp, err = s.applyV3Base.Txn(r) } if serr := s.doSerialize(ctx, chk, get); serr != nil { return nil, serr } return resp, err } + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{Txn: r}) if err != nil { return nil, err @@ -351,12 +360,22 @@ func (s *EtcdServer) Authenticate(ctx context.Context, r *pb.AuthenticateRequest return nil, err } + lg := s.getLogger() + var resp proto.Message for { checkedRevision, err := s.AuthStore().CheckPassword(r.Name, r.Password) if err != nil { if err != auth.ErrAuthNotEnabled { - plog.Errorf("invalid authentication request to user %s was issued", r.Name) + if lg != nil { + lg.Warn( + "invalid authentication was requested", + zap.String("user", r.Name), + zap.Error(err), + ) + } else { + plog.Errorf("invalid authentication request to user %s was issued", r.Name) + } } return nil, err } @@ -379,7 +398,12 @@ func (s *EtcdServer) Authenticate(ctx context.Context, r *pb.AuthenticateRequest if checkedRevision == s.AuthStore().Revision() { break } - plog.Infof("revision when password checked is obsolete, retrying") + + if lg != nil { + lg.Info("revision when password checked became stale; retrying") + } else { + plog.Infof("revision when password checked is obsolete, retrying") + } } return resp.(*pb.AuthenticateResponse), nil @@ -575,7 +599,12 @@ func (s *EtcdServer) processInternalRaftRequestOnce(ctx context.Context, r pb.In defer cancel() start := time.Now() - s.r.Propose(cctx, data) + err = s.r.Propose(cctx, data) + if err != nil { + proposalsFailed.Inc() + s.w.Trigger(id, nil) // GC wait + return nil, err + } proposalsPending.Inc() defer proposalsPending.Dec() @@ -614,13 +643,18 @@ func (s *EtcdServer) linearizableReadLoop() { s.readNotifier = nextnr s.readMu.Unlock() + lg := s.getLogger() cctx, cancel := context.WithTimeout(context.Background(), s.Cfg.ReqTimeout()) if err := s.r.ReadIndex(cctx, ctx); err != nil { cancel() if err == raft.ErrStopped { return } - plog.Errorf("failed to get read index from raft: %v", err) + if lg != nil { + lg.Warn("failed to get read index from Raft", zap.Error(err)) + } else { + plog.Errorf("failed to get read index from raft: %v", err) + } nr.notify(err) continue } @@ -637,10 +671,22 @@ func (s *EtcdServer) linearizableReadLoop() { if !done { // a previous request might time out. now we should ignore the response of it and // continue waiting for the response of the current requests. - plog.Warningf("ignored out-of-date read index response (want %v, got %v)", rs.RequestCtx, ctx) + if lg != nil { + lg.Warn( + "ignored out-of-date read index response", + zap.String("ctx-expected", fmt.Sprintf("%+v", string(rs.RequestCtx))), + zap.String("ctx-got", fmt.Sprintf("%+v", string(ctx))), + ) + } else { + plog.Warningf("ignored out-of-date read index response (want %v, got %v)", rs.RequestCtx, ctx) + } } case <-time.After(s.Cfg.ReqTimeout()): - plog.Warningf("timed out waiting for read index response") + if lg != nil { + lg.Warn("timed out waiting for read index response", zap.Duration("timeout", s.Cfg.ReqTimeout())) + } else { + plog.Warningf("timed out waiting for read index response") + } nr.notify(ErrTimeout) timeout = true case <-s.stopping: diff --git a/vendor/github.com/coreos/etcd/internal/auth/jwt.go b/vendor/github.com/coreos/etcd/internal/auth/jwt.go deleted file mode 100644 index 99b2d6b5..00000000 --- a/vendor/github.com/coreos/etcd/internal/auth/jwt.go +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package auth - -import ( - "context" - "crypto/rsa" - "io/ioutil" - - jwt "github.com/dgrijalva/jwt-go" -) - -type tokenJWT struct { - signMethod string - signKey *rsa.PrivateKey - verifyKey *rsa.PublicKey -} - -func (t *tokenJWT) enable() {} -func (t *tokenJWT) disable() {} -func (t *tokenJWT) invalidateUser(string) {} -func (t *tokenJWT) genTokenPrefix() (string, error) { return "", nil } - -func (t *tokenJWT) info(ctx context.Context, token string, rev uint64) (*AuthInfo, bool) { - // rev isn't used in JWT, it is only used in simple token - var ( - username string - revision uint64 - ) - - parsed, err := jwt.Parse(token, func(token *jwt.Token) (interface{}, error) { - return t.verifyKey, nil - }) - - switch err.(type) { - case nil: - if !parsed.Valid { - plog.Warningf("invalid jwt token: %s", token) - return nil, false - } - - claims := parsed.Claims.(jwt.MapClaims) - - username = claims["username"].(string) - revision = uint64(claims["revision"].(float64)) - default: - plog.Warningf("failed to parse jwt token: %s", err) - return nil, false - } - - return &AuthInfo{Username: username, Revision: revision}, true -} - -func (t *tokenJWT) assign(ctx context.Context, username string, revision uint64) (string, error) { - // Future work: let a jwt token include permission information would be useful for - // permission checking in proxy side. - tk := jwt.NewWithClaims(jwt.GetSigningMethod(t.signMethod), - jwt.MapClaims{ - "username": username, - "revision": revision, - }) - - token, err := tk.SignedString(t.signKey) - if err != nil { - plog.Debugf("failed to sign jwt token: %s", err) - return "", err - } - - plog.Debugf("jwt token: %s", token) - - return token, err -} - -func prepareOpts(opts map[string]string) (jwtSignMethod, jwtPubKeyPath, jwtPrivKeyPath string, err error) { - for k, v := range opts { - switch k { - case "sign-method": - jwtSignMethod = v - case "pub-key": - jwtPubKeyPath = v - case "priv-key": - jwtPrivKeyPath = v - default: - plog.Errorf("unknown token specific option: %s", k) - return "", "", "", ErrInvalidAuthOpts - } - } - if len(jwtSignMethod) == 0 { - return "", "", "", ErrInvalidAuthOpts - } - return jwtSignMethod, jwtPubKeyPath, jwtPrivKeyPath, nil -} - -func newTokenProviderJWT(opts map[string]string) (*tokenJWT, error) { - jwtSignMethod, jwtPubKeyPath, jwtPrivKeyPath, err := prepareOpts(opts) - if err != nil { - return nil, ErrInvalidAuthOpts - } - - t := &tokenJWT{} - - t.signMethod = jwtSignMethod - - verifyBytes, err := ioutil.ReadFile(jwtPubKeyPath) - if err != nil { - plog.Errorf("failed to read public key (%s) for jwt: %s", jwtPubKeyPath, err) - return nil, err - } - t.verifyKey, err = jwt.ParseRSAPublicKeyFromPEM(verifyBytes) - if err != nil { - plog.Errorf("failed to parse public key (%s): %s", jwtPubKeyPath, err) - return nil, err - } - - signBytes, err := ioutil.ReadFile(jwtPrivKeyPath) - if err != nil { - plog.Errorf("failed to read private key (%s) for jwt: %s", jwtPrivKeyPath, err) - return nil, err - } - t.signKey, err = jwt.ParseRSAPrivateKeyFromPEM(signBytes) - if err != nil { - plog.Errorf("failed to parse private key (%s): %s", jwtPrivKeyPath, err) - return nil, err - } - - return t, nil -} diff --git a/vendor/github.com/coreos/etcd/internal/mvcc/doc.go b/vendor/github.com/coreos/etcd/mvcc/doc.go similarity index 100% rename from vendor/github.com/coreos/etcd/internal/mvcc/doc.go rename to vendor/github.com/coreos/etcd/mvcc/doc.go diff --git a/vendor/github.com/coreos/etcd/internal/mvcc/index.go b/vendor/github.com/coreos/etcd/mvcc/index.go similarity index 81% rename from vendor/github.com/coreos/etcd/internal/mvcc/index.go rename to vendor/github.com/coreos/etcd/mvcc/index.go index b27a9e54..f8cc6df8 100644 --- a/vendor/github.com/coreos/etcd/internal/mvcc/index.go +++ b/vendor/github.com/coreos/etcd/mvcc/index.go @@ -19,6 +19,7 @@ import ( "sync" "github.com/google/btree" + "go.uber.org/zap" ) type index interface { @@ -39,11 +40,13 @@ type index interface { type treeIndex struct { sync.RWMutex tree *btree.BTree + lg *zap.Logger } -func newTreeIndex() index { +func newTreeIndex(lg *zap.Logger) index { return &treeIndex{ tree: btree.New(32), + lg: lg, } } @@ -54,12 +57,12 @@ func (ti *treeIndex) Put(key []byte, rev revision) { defer ti.Unlock() item := ti.tree.Get(keyi) if item == nil { - keyi.put(rev.main, rev.sub) + keyi.put(ti.lg, rev.main, rev.sub) ti.tree.ReplaceOrInsert(keyi) return } okeyi := item.(*keyIndex) - okeyi.put(rev.main, rev.sub) + okeyi.put(ti.lg, rev.main, rev.sub) } func (ti *treeIndex) Get(key []byte, atRev int64) (modified, created revision, ver int64, err error) { @@ -69,7 +72,7 @@ func (ti *treeIndex) Get(key []byte, atRev int64) (modified, created revision, v if keyi = ti.keyIndex(keyi); keyi == nil { return revision{}, revision{}, 0, ErrRevisionNotFound } - return keyi.get(atRev) + return keyi.get(ti.lg, atRev) } func (ti *treeIndex) KeyIndex(keyi *keyIndex) *keyIndex { @@ -109,7 +112,7 @@ func (ti *treeIndex) Revisions(key, end []byte, atRev int64) (revs []revision) { return []revision{rev} } ti.visit(key, end, func(ki *keyIndex) { - if rev, _, _, err := ki.get(atRev); err == nil { + if rev, _, _, err := ki.get(ti.lg, atRev); err == nil { revs = append(revs, rev) } }) @@ -125,7 +128,7 @@ func (ti *treeIndex) Range(key, end []byte, atRev int64) (keys [][]byte, revs [] return [][]byte{key}, []revision{rev} } ti.visit(key, end, func(ki *keyIndex) { - if rev, _, _, err := ki.get(atRev); err == nil { + if rev, _, _, err := ki.get(ti.lg, atRev); err == nil { revs = append(revs, rev) keys = append(keys, ki.key) } @@ -144,7 +147,7 @@ func (ti *treeIndex) Tombstone(key []byte, rev revision) error { } ki := item.(*keyIndex) - return ki.tombstone(rev.main, rev.sub) + return ki.tombstone(ti.lg, rev.main, rev.sub) } // RangeSince returns all revisions from key(including) to end(excluding) @@ -162,7 +165,7 @@ func (ti *treeIndex) RangeSince(key, end []byte, rev int64) []revision { return nil } keyi = item.(*keyIndex) - return keyi.since(rev) + return keyi.since(ti.lg, rev) } endi := &keyIndex{key: end} @@ -172,7 +175,7 @@ func (ti *treeIndex) RangeSince(key, end []byte, rev int64) []revision { return false } curKeyi := item.(*keyIndex) - revs = append(revs, curKeyi.since(rev)...) + revs = append(revs, curKeyi.since(ti.lg, rev)...) return true }) sort.Sort(revisions(revs)) @@ -182,19 +185,34 @@ func (ti *treeIndex) RangeSince(key, end []byte, rev int64) []revision { func (ti *treeIndex) Compact(rev int64) map[revision]struct{} { available := make(map[revision]struct{}) - var emptyki []*keyIndex - plog.Printf("store.index: compact %d", rev) - // TODO: do not hold the lock for long time? - // This is probably OK. Compacting 10M keys takes O(10ms). - ti.Lock() - defer ti.Unlock() - ti.tree.Ascend(compactIndex(rev, available, &emptyki)) - for _, ki := range emptyki { - item := ti.tree.Delete(ki) - if item == nil { - plog.Panic("store.index: unexpected delete failure during compaction") - } + if ti.lg != nil { + ti.lg.Info("compact tree index", zap.Int64("revision", rev)) + } else { + plog.Printf("store.index: compact %d", rev) } + ti.Lock() + clone := ti.tree.Clone() + ti.Unlock() + + clone.Ascend(func(item btree.Item) bool { + keyi := item.(*keyIndex) + //Lock is needed here to prevent modification to the keyIndex while + //compaction is going on or revision added to empty before deletion + ti.Lock() + keyi.compact(ti.lg, rev, available) + if keyi.isEmpty() { + item := ti.tree.Delete(keyi) + if item == nil { + if ti.lg != nil { + ti.lg.Panic("failed to delete during compaction") + } else { + plog.Panic("store.index: unexpected delete failure during compaction") + } + } + } + ti.Unlock() + return true + }) return available } @@ -211,17 +229,6 @@ func (ti *treeIndex) Keep(rev int64) map[revision]struct{} { return available } -func compactIndex(rev int64, available map[revision]struct{}, emptyki *[]*keyIndex) func(i btree.Item) bool { - return func(i btree.Item) bool { - keyi := i.(*keyIndex) - keyi.compact(rev, available) - if keyi.isEmpty() { - *emptyki = append(*emptyki, keyi) - } - return true - } -} - func (ti *treeIndex) Equal(bi index) bool { b := bi.(*treeIndex) diff --git a/vendor/github.com/coreos/etcd/internal/mvcc/key_index.go b/vendor/github.com/coreos/etcd/mvcc/key_index.go similarity index 80% rename from vendor/github.com/coreos/etcd/internal/mvcc/key_index.go rename to vendor/github.com/coreos/etcd/mvcc/key_index.go index 805922bf..2b0844e3 100644 --- a/vendor/github.com/coreos/etcd/internal/mvcc/key_index.go +++ b/vendor/github.com/coreos/etcd/mvcc/key_index.go @@ -20,6 +20,7 @@ import ( "fmt" "github.com/google/btree" + "go.uber.org/zap" ) var ( @@ -73,11 +74,21 @@ type keyIndex struct { } // put puts a revision to the keyIndex. -func (ki *keyIndex) put(main int64, sub int64) { +func (ki *keyIndex) put(lg *zap.Logger, main int64, sub int64) { rev := revision{main: main, sub: sub} if !rev.GreaterThan(ki.modified) { - plog.Panicf("store.keyindex: put with unexpected smaller revision [%v / %v]", rev, ki.modified) + if lg != nil { + lg.Panic( + "'put' with an unexpected smaller revision", + zap.Int64("given-revision-main", rev.main), + zap.Int64("given-revision-sub", rev.sub), + zap.Int64("modified-revision-main", ki.modified.main), + zap.Int64("modified-revision-sub", ki.modified.sub), + ) + } else { + plog.Panicf("store.keyindex: put with unexpected smaller revision [%v / %v]", rev, ki.modified) + } } if len(ki.generations) == 0 { ki.generations = append(ki.generations, generation{}) @@ -92,9 +103,16 @@ func (ki *keyIndex) put(main int64, sub int64) { ki.modified = rev } -func (ki *keyIndex) restore(created, modified revision, ver int64) { +func (ki *keyIndex) restore(lg *zap.Logger, created, modified revision, ver int64) { if len(ki.generations) != 0 { - plog.Panicf("store.keyindex: cannot restore non-empty keyIndex") + if lg != nil { + lg.Panic( + "'restore' got an unexpected non-empty generations", + zap.Int("generations-size", len(ki.generations)), + ) + } else { + plog.Panicf("store.keyindex: cannot restore non-empty keyIndex") + } } ki.modified = modified @@ -106,14 +124,21 @@ func (ki *keyIndex) restore(created, modified revision, ver int64) { // tombstone puts a revision, pointing to a tombstone, to the keyIndex. // It also creates a new empty generation in the keyIndex. // It returns ErrRevisionNotFound when tombstone on an empty generation. -func (ki *keyIndex) tombstone(main int64, sub int64) error { +func (ki *keyIndex) tombstone(lg *zap.Logger, main int64, sub int64) error { if ki.isEmpty() { - plog.Panicf("store.keyindex: unexpected tombstone on empty keyIndex %s", string(ki.key)) + if lg != nil { + lg.Panic( + "'tombstone' got an unexpected empty keyIndex", + zap.String("key", string(ki.key)), + ) + } else { + plog.Panicf("store.keyindex: unexpected tombstone on empty keyIndex %s", string(ki.key)) + } } if ki.generations[len(ki.generations)-1].isEmpty() { return ErrRevisionNotFound } - ki.put(main, sub) + ki.put(lg, main, sub) ki.generations = append(ki.generations, generation{}) keysGauge.Dec() return nil @@ -121,9 +146,16 @@ func (ki *keyIndex) tombstone(main int64, sub int64) error { // get gets the modified, created revision and version of the key that satisfies the given atRev. // Rev must be higher than or equal to the given atRev. -func (ki *keyIndex) get(atRev int64) (modified, created revision, ver int64, err error) { +func (ki *keyIndex) get(lg *zap.Logger, atRev int64) (modified, created revision, ver int64, err error) { if ki.isEmpty() { - plog.Panicf("store.keyindex: unexpected get on empty keyIndex %s", string(ki.key)) + if lg != nil { + lg.Panic( + "'get' got an unexpected empty keyIndex", + zap.String("key", string(ki.key)), + ) + } else { + plog.Panicf("store.keyindex: unexpected get on empty keyIndex %s", string(ki.key)) + } } g := ki.findGeneration(atRev) if g.isEmpty() { @@ -141,9 +173,16 @@ func (ki *keyIndex) get(atRev int64) (modified, created revision, ver int64, err // since returns revisions since the given rev. Only the revision with the // largest sub revision will be returned if multiple revisions have the same // main revision. -func (ki *keyIndex) since(rev int64) []revision { +func (ki *keyIndex) since(lg *zap.Logger, rev int64) []revision { if ki.isEmpty() { - plog.Panicf("store.keyindex: unexpected get on empty keyIndex %s", string(ki.key)) + if lg != nil { + lg.Panic( + "'since' got an unexpected empty keyIndex", + zap.String("key", string(ki.key)), + ) + } else { + plog.Panicf("store.keyindex: unexpected get on empty keyIndex %s", string(ki.key)) + } } since := revision{rev, 0} var gi int @@ -182,9 +221,16 @@ func (ki *keyIndex) since(rev int64) []revision { // revision than the given atRev except the largest one (If the largest one is // a tombstone, it will not be kept). // If a generation becomes empty during compaction, it will be removed. -func (ki *keyIndex) compact(atRev int64, available map[revision]struct{}) { +func (ki *keyIndex) compact(lg *zap.Logger, atRev int64, available map[revision]struct{}) { if ki.isEmpty() { - plog.Panicf("store.keyindex: unexpected compact on empty keyIndex %s", string(ki.key)) + if lg != nil { + lg.Panic( + "'compact' got an unexpected empty keyIndex", + zap.String("key", string(ki.key)), + ) + } else { + plog.Panicf("store.keyindex: unexpected compact on empty keyIndex %s", string(ki.key)) + } } genIdx, revIndex := ki.doCompact(atRev, available) diff --git a/vendor/github.com/coreos/etcd/internal/mvcc/kv.go b/vendor/github.com/coreos/etcd/mvcc/kv.go similarity index 97% rename from vendor/github.com/coreos/etcd/internal/mvcc/kv.go rename to vendor/github.com/coreos/etcd/mvcc/kv.go index e8cb1dec..2dad3ad8 100644 --- a/vendor/github.com/coreos/etcd/internal/mvcc/kv.go +++ b/vendor/github.com/coreos/etcd/mvcc/kv.go @@ -15,9 +15,9 @@ package mvcc import ( - "github.com/coreos/etcd/internal/lease" - "github.com/coreos/etcd/internal/mvcc/backend" - "github.com/coreos/etcd/internal/mvcc/mvccpb" + "github.com/coreos/etcd/lease" + "github.com/coreos/etcd/mvcc/backend" + "github.com/coreos/etcd/mvcc/mvccpb" ) type RangeOptions struct { diff --git a/vendor/github.com/coreos/etcd/internal/mvcc/kv_view.go b/vendor/github.com/coreos/etcd/mvcc/kv_view.go similarity index 96% rename from vendor/github.com/coreos/etcd/internal/mvcc/kv_view.go rename to vendor/github.com/coreos/etcd/mvcc/kv_view.go index 8269a727..1e869c25 100644 --- a/vendor/github.com/coreos/etcd/internal/mvcc/kv_view.go +++ b/vendor/github.com/coreos/etcd/mvcc/kv_view.go @@ -14,7 +14,7 @@ package mvcc -import "github.com/coreos/etcd/internal/lease" +import "github.com/coreos/etcd/lease" type readView struct{ kv KV } diff --git a/vendor/github.com/coreos/etcd/internal/mvcc/kvstore.go b/vendor/github.com/coreos/etcd/mvcc/kvstore.go similarity index 82% rename from vendor/github.com/coreos/etcd/internal/mvcc/kvstore.go rename to vendor/github.com/coreos/etcd/mvcc/kvstore.go index 54bb2047..592f46d2 100644 --- a/vendor/github.com/coreos/etcd/internal/mvcc/kvstore.go +++ b/vendor/github.com/coreos/etcd/mvcc/kvstore.go @@ -18,17 +18,20 @@ import ( "context" "encoding/binary" "errors" + "fmt" "hash/crc32" "math" "sync" "sync/atomic" "time" - "github.com/coreos/etcd/internal/lease" - "github.com/coreos/etcd/internal/mvcc/backend" - "github.com/coreos/etcd/internal/mvcc/mvccpb" + "github.com/coreos/etcd/lease" + "github.com/coreos/etcd/mvcc/backend" + "github.com/coreos/etcd/mvcc/mvccpb" "github.com/coreos/etcd/pkg/schedule" + "github.com/coreos/pkg/capnslog" + "go.uber.org/zap" ) var ( @@ -99,15 +102,17 @@ type store struct { fifoSched schedule.Scheduler stopc chan struct{} + + lg *zap.Logger } // NewStore returns a new store. It is useful to create a store inside // mvcc pkg. It should only be used for testing externally. -func NewStore(b backend.Backend, le lease.Lessor, ig ConsistentIndexGetter) *store { +func NewStore(lg *zap.Logger, b backend.Backend, le lease.Lessor, ig ConsistentIndexGetter) *store { s := &store{ b: b, ig: ig, - kvindex: newTreeIndex(), + kvindex: newTreeIndex(lg), le: le, @@ -118,6 +123,8 @@ func NewStore(b backend.Backend, le lease.Lessor, ig ConsistentIndexGetter) *sto fifoSched: schedule.NewFIFOScheduler(), stopc: make(chan struct{}), + + lg: lg, } s.ReadView = &readView{s} s.WriteView = &writeView{s} @@ -211,17 +218,18 @@ func (s *store) HashByRev(rev int64) (hash uint32, currentRev int64, compactRev func (s *store) Compact(rev int64) (<-chan struct{}, error) { s.mu.Lock() - defer s.mu.Unlock() s.revMu.Lock() - defer s.revMu.Unlock() - if rev <= s.compactMainRev { ch := make(chan struct{}) f := func(ctx context.Context) { s.compactBarrier(ctx, ch) } s.fifoSched.Schedule(f) + s.mu.Unlock() + s.revMu.Unlock() return ch, ErrCompacted } if rev > s.currentRev { + s.mu.Unlock() + s.revMu.Unlock() return nil, ErrFutureRev } @@ -239,6 +247,8 @@ func (s *store) Compact(rev int64) (<-chan struct{}, error) { // ensure that desired compaction is persisted s.b.ForceCommit() + s.mu.Unlock() + s.revMu.Unlock() keep := s.kvindex.Compact(rev) ch := make(chan struct{}) var j = func(ctx context.Context) { @@ -290,7 +300,7 @@ func (s *store) Restore(b backend.Backend) error { atomic.StoreUint64(&s.consistentIndex, 0) s.b = b - s.kvindex = newTreeIndex() + s.kvindex = newTreeIndex(s.lg) s.currentRev = 1 s.compactMainRev = -1 s.fifoSched = schedule.NewFIFOScheduler() @@ -300,10 +310,13 @@ func (s *store) Restore(b backend.Backend) error { } func (s *store) restore() error { - reportDbTotalSizeInBytesMu.Lock() b := s.b + reportDbTotalSizeInBytesMu.Lock() reportDbTotalSizeInBytes = func() float64 { return float64(b.Size()) } reportDbTotalSizeInBytesMu.Unlock() + reportDbTotalSizeInUseInBytesMu.Lock() + reportDbTotalSizeInUseInBytes = func() float64 { return float64(b.SizeInUse()) } + reportDbTotalSizeInUseInBytesMu.Unlock() min, max := newRevBytes(), newRevBytes() revToBytes(revision{main: 1}, min) @@ -318,7 +331,17 @@ func (s *store) restore() error { _, finishedCompactBytes := tx.UnsafeRange(metaBucketName, finishedCompactKeyName, nil, 0) if len(finishedCompactBytes) != 0 { s.compactMainRev = bytesToRev(finishedCompactBytes[0]).main - plog.Printf("restore compact to %d", s.compactMainRev) + + if s.lg != nil { + s.lg.Info( + "restored last compact revision", + zap.String("meta-bucket-name", string(metaBucketName)), + zap.String("meta-bucket-name-key", string(finishedCompactKeyName)), + zap.Int64("restored-compact-revision", s.compactMainRev), + ) + } else { + plog.Printf("restore compact to %d", s.compactMainRev) + } } _, scheduledCompactBytes := tx.UnsafeRange(metaBucketName, scheduledCompactKeyName, nil, 0) scheduledCompact := int64(0) @@ -328,7 +351,7 @@ func (s *store) restore() error { // index keys concurrently as they're loaded in from tx keysGauge.Set(0) - rkvc, revc := restoreIntoIndex(s.kvindex) + rkvc, revc := restoreIntoIndex(s.lg, s.kvindex) for { keys, vals := tx.UnsafeRange(keyBucketName, min, max, int64(restoreChunkKeys)) if len(keys) == 0 { @@ -336,7 +359,7 @@ func (s *store) restore() error { } // rkvc blocks if the total pending keys exceeds the restore // chunk size to keep keys from consuming too much memory. - restoreChunk(rkvc, keys, vals, keyToLease) + restoreChunk(s.lg, rkvc, keys, vals, keyToLease) if len(keys) < restoreChunkKeys { // partial set implies final set break @@ -365,7 +388,15 @@ func (s *store) restore() error { } err := s.le.Attach(lid, []lease.LeaseItem{{Key: key}}) if err != nil { - plog.Errorf("unexpected Attach error: %v", err) + if s.lg != nil { + s.lg.Warn( + "failed to attach a lease", + zap.String("lease-id", fmt.Sprintf("%016x", lid)), + zap.Error(err), + ) + } else { + plog.Errorf("unexpected Attach error: %v", err) + } } } @@ -373,7 +404,17 @@ func (s *store) restore() error { if scheduledCompact != 0 { s.Compact(scheduledCompact) - plog.Printf("resume scheduled compaction at %d", scheduledCompact) + + if s.lg != nil { + s.lg.Info( + "resume scheduled compaction", + zap.String("meta-bucket-name", string(metaBucketName)), + zap.String("meta-bucket-name-key", string(scheduledCompactKeyName)), + zap.Int64("scheduled-compact-revision", scheduledCompact), + ) + } else { + plog.Printf("resume scheduled compaction at %d", scheduledCompact) + } } return nil @@ -385,7 +426,7 @@ type revKeyValue struct { kstr string } -func restoreIntoIndex(idx index) (chan<- revKeyValue, <-chan int64) { +func restoreIntoIndex(lg *zap.Logger, idx index) (chan<- revKeyValue, <-chan int64) { rkvc, revc := make(chan revKeyValue, restoreChunkKeys), make(chan int64, 1) go func() { currentRev := int64(1) @@ -416,12 +457,12 @@ func restoreIntoIndex(idx index) (chan<- revKeyValue, <-chan int64) { currentRev = rev.main if ok { if isTombstone(rkv.key) { - ki.tombstone(rev.main, rev.sub) + ki.tombstone(lg, rev.main, rev.sub) continue } - ki.put(rev.main, rev.sub) + ki.put(lg, rev.main, rev.sub) } else if !isTombstone(rkv.key) { - ki.restore(revision{rkv.kv.CreateRevision, 0}, rev, rkv.kv.Version) + ki.restore(lg, revision{rkv.kv.CreateRevision, 0}, rev, rkv.kv.Version) idx.Insert(ki) kiCache[rkv.kstr] = ki } @@ -430,11 +471,15 @@ func restoreIntoIndex(idx index) (chan<- revKeyValue, <-chan int64) { return rkvc, revc } -func restoreChunk(kvc chan<- revKeyValue, keys, vals [][]byte, keyToLease map[string]lease.LeaseID) { +func restoreChunk(lg *zap.Logger, kvc chan<- revKeyValue, keys, vals [][]byte, keyToLease map[string]lease.LeaseID) { for i, key := range keys { rkv := revKeyValue{key: key} if err := rkv.kv.Unmarshal(vals[i]); err != nil { - plog.Fatalf("cannot unmarshal event: %v", err) + if lg != nil { + lg.Fatal("failed to unmarshal mvccpb.KeyValue", zap.Error(err)) + } else { + plog.Fatalf("cannot unmarshal event: %v", err) + } } rkv.kstr = string(rkv.kv.Key) if isTombstone(key) { @@ -484,9 +529,17 @@ func (s *store) ConsistentIndex() uint64 { } // appendMarkTombstone appends tombstone mark to normal revision bytes. -func appendMarkTombstone(b []byte) []byte { +func appendMarkTombstone(lg *zap.Logger, b []byte) []byte { if len(b) != revBytesLen { - plog.Panicf("cannot append mark to non normal revision bytes") + if lg != nil { + lg.Panic( + "cannot append tombstone mark to non-normal revision bytes", + zap.Int("expected-revision-bytes-size", revBytesLen), + zap.Int("given-revision-bytes-size", len(b)), + ) + } else { + plog.Panicf("cannot append mark to non normal revision bytes") + } } return append(b, markTombstone) } diff --git a/vendor/github.com/coreos/etcd/internal/mvcc/kvstore_compaction.go b/vendor/github.com/coreos/etcd/mvcc/kvstore_compaction.go similarity index 85% rename from vendor/github.com/coreos/etcd/internal/mvcc/kvstore_compaction.go rename to vendor/github.com/coreos/etcd/mvcc/kvstore_compaction.go index 1726490c..e7cfec1a 100644 --- a/vendor/github.com/coreos/etcd/internal/mvcc/kvstore_compaction.go +++ b/vendor/github.com/coreos/etcd/mvcc/kvstore_compaction.go @@ -17,6 +17,8 @@ package mvcc import ( "encoding/binary" "time" + + "go.uber.org/zap" ) func (s *store) scheduleCompaction(compactMainRev int64, keep map[revision]struct{}) bool { @@ -51,7 +53,15 @@ func (s *store) scheduleCompaction(compactMainRev int64, keep map[revision]struc revToBytes(revision{main: compactMainRev}, rbytes) tx.UnsafePut(metaBucketName, finishedCompactKeyName, rbytes) tx.Unlock() - plog.Printf("finished scheduled compaction at %d (took %v)", compactMainRev, time.Since(totalStart)) + if s.lg != nil { + s.lg.Info( + "finished scheduled compaction", + zap.Int64("compact-revision", compactMainRev), + zap.Duration("took", time.Since(totalStart)), + ) + } else { + plog.Printf("finished scheduled compaction at %d (took %v)", compactMainRev, time.Since(totalStart)) + } return true } diff --git a/vendor/github.com/coreos/etcd/internal/mvcc/kvstore_txn.go b/vendor/github.com/coreos/etcd/mvcc/kvstore_txn.go similarity index 72% rename from vendor/github.com/coreos/etcd/internal/mvcc/kvstore_txn.go rename to vendor/github.com/coreos/etcd/mvcc/kvstore_txn.go index 6d839a1a..0e9b8613 100644 --- a/vendor/github.com/coreos/etcd/internal/mvcc/kvstore_txn.go +++ b/vendor/github.com/coreos/etcd/mvcc/kvstore_txn.go @@ -15,9 +15,10 @@ package mvcc import ( - "github.com/coreos/etcd/internal/lease" - "github.com/coreos/etcd/internal/mvcc/backend" - "github.com/coreos/etcd/internal/mvcc/mvccpb" + "github.com/coreos/etcd/lease" + "github.com/coreos/etcd/mvcc/backend" + "github.com/coreos/etcd/mvcc/mvccpb" + "go.uber.org/zap" ) type storeTxnRead struct { @@ -83,14 +84,14 @@ func (tw *storeTxnWrite) Range(key, end []byte, ro RangeOptions) (r *RangeResult func (tw *storeTxnWrite) DeleteRange(key, end []byte) (int64, int64) { if n := tw.deleteRange(key, end); n != 0 || len(tw.changes) > 0 { - return n, int64(tw.beginRev + 1) + return n, tw.beginRev + 1 } - return 0, int64(tw.beginRev) + return 0, tw.beginRev } func (tw *storeTxnWrite) Put(key, value []byte, lease lease.LeaseID) int64 { tw.put(key, value, lease) - return int64(tw.beginRev + 1) + return tw.beginRev + 1 } func (tw *storeTxnWrite) End() { @@ -120,7 +121,7 @@ func (tr *storeTxnRead) rangeKeys(key, end []byte, curRev int64, ro RangeOptions return &RangeResult{KVs: nil, Count: -1, Rev: 0}, ErrCompacted } - revpairs := tr.s.kvindex.Revisions(key, end, int64(rev)) + revpairs := tr.s.kvindex.Revisions(key, end, rev) if len(revpairs) == 0 { return &RangeResult{KVs: nil, Count: 0, Rev: curRev}, nil } @@ -139,10 +140,25 @@ func (tr *storeTxnRead) rangeKeys(key, end []byte, curRev int64, ro RangeOptions revToBytes(revpair, revBytes) _, vs := tr.tx.UnsafeRange(keyBucketName, revBytes, nil, 0) if len(vs) != 1 { - plog.Fatalf("range cannot find rev (%d,%d)", revpair.main, revpair.sub) + if tr.s.lg != nil { + tr.s.lg.Fatal( + "range failed to find revision pair", + zap.Int64("revision-main", revpair.main), + zap.Int64("revision-sub", revpair.sub), + ) + } else { + plog.Fatalf("range cannot find rev (%d,%d)", revpair.main, revpair.sub) + } } if err := kvs[i].Unmarshal(vs[0]); err != nil { - plog.Fatalf("cannot unmarshal event: %v", err) + if tr.s.lg != nil { + tr.s.lg.Fatal( + "failed to unmarshal mvccpb.KeyValue", + zap.Error(err), + ) + } else { + plog.Fatalf("cannot unmarshal event: %v", err) + } } } return &RangeResult{KVs: kvs, Count: len(revpairs), Rev: curRev}, nil @@ -177,7 +193,14 @@ func (tw *storeTxnWrite) put(key, value []byte, leaseID lease.LeaseID) { d, err := kv.Marshal() if err != nil { - plog.Fatalf("cannot marshal event: %v", err) + if tw.storeTxnRead.s.lg != nil { + tw.storeTxnRead.s.lg.Fatal( + "failed to marshal mvccpb.KeyValue", + zap.Error(err), + ) + } else { + plog.Fatalf("cannot marshal event: %v", err) + } } tw.tx.UnsafeSeqPut(keyBucketName, ibytes, d) @@ -190,7 +213,14 @@ func (tw *storeTxnWrite) put(key, value []byte, leaseID lease.LeaseID) { } err = tw.s.le.Detach(oldLease, []lease.LeaseItem{{Key: string(key)}}) if err != nil { - plog.Errorf("unexpected error from lease detach: %v", err) + if tw.storeTxnRead.s.lg != nil { + tw.storeTxnRead.s.lg.Fatal( + "failed to detach old lease from a key", + zap.Error(err), + ) + } else { + plog.Errorf("unexpected error from lease detach: %v", err) + } } } if leaseID != lease.NoLease { @@ -209,33 +239,54 @@ func (tw *storeTxnWrite) deleteRange(key, end []byte) int64 { if len(tw.changes) > 0 { rrev += 1 } - keys, revs := tw.s.kvindex.Range(key, end, rrev) + keys, _ := tw.s.kvindex.Range(key, end, rrev) if len(keys) == 0 { return 0 } - for i, key := range keys { - tw.delete(key, revs[i]) + for _, key := range keys { + tw.delete(key) } return int64(len(keys)) } -func (tw *storeTxnWrite) delete(key []byte, rev revision) { +func (tw *storeTxnWrite) delete(key []byte) { ibytes := newRevBytes() idxRev := revision{main: tw.beginRev + 1, sub: int64(len(tw.changes))} revToBytes(idxRev, ibytes) - ibytes = appendMarkTombstone(ibytes) + + if tw.storeTxnRead.s != nil && tw.storeTxnRead.s.lg != nil { + ibytes = appendMarkTombstone(tw.storeTxnRead.s.lg, ibytes) + } else { + // TODO: remove this in v3.5 + ibytes = appendMarkTombstone(nil, ibytes) + } kv := mvccpb.KeyValue{Key: key} d, err := kv.Marshal() if err != nil { - plog.Fatalf("cannot marshal event: %v", err) + if tw.storeTxnRead.s.lg != nil { + tw.storeTxnRead.s.lg.Fatal( + "failed to marshal mvccpb.KeyValue", + zap.Error(err), + ) + } else { + plog.Fatalf("cannot marshal event: %v", err) + } } tw.tx.UnsafeSeqPut(keyBucketName, ibytes, d) err = tw.s.kvindex.Tombstone(key, idxRev) if err != nil { - plog.Fatalf("cannot tombstone an existing key (%s): %v", string(key), err) + if tw.storeTxnRead.s.lg != nil { + tw.storeTxnRead.s.lg.Fatal( + "failed to tombstone an existing key", + zap.String("key", string(key)), + zap.Error(err), + ) + } else { + plog.Fatalf("cannot tombstone an existing key (%s): %v", string(key), err) + } } tw.changes = append(tw.changes, kv) @@ -245,7 +296,14 @@ func (tw *storeTxnWrite) delete(key []byte, rev revision) { if leaseID != lease.NoLease { err = tw.s.le.Detach(leaseID, []lease.LeaseItem{item}) if err != nil { - plog.Errorf("cannot detach %v", err) + if tw.storeTxnRead.s.lg != nil { + tw.storeTxnRead.s.lg.Fatal( + "failed to detach old lease from a key", + zap.Error(err), + ) + } else { + plog.Errorf("cannot detach %v", err) + } } } } diff --git a/vendor/github.com/coreos/etcd/internal/mvcc/metrics.go b/vendor/github.com/coreos/etcd/mvcc/metrics.go similarity index 88% rename from vendor/github.com/coreos/etcd/internal/mvcc/metrics.go rename to vendor/github.com/coreos/etcd/mvcc/metrics.go index e44eb12d..4f43ed65 100644 --- a/vendor/github.com/coreos/etcd/internal/mvcc/metrics.go +++ b/vendor/github.com/coreos/etcd/mvcc/metrics.go @@ -143,7 +143,7 @@ var ( Namespace: "etcd_debugging", Subsystem: "mvcc", Name: "db_total_size_in_bytes", - Help: "Total size of the underlying database in bytes.", + Help: "Total size of the underlying database physically allocated in bytes.", }, func() float64 { reportDbTotalSizeInBytesMu.RLock() @@ -154,6 +154,22 @@ var ( // overridden by mvcc initialization reportDbTotalSizeInBytesMu sync.RWMutex reportDbTotalSizeInBytes func() float64 = func() float64 { return 0 } + + dbTotalSizeInUse = prometheus.NewGaugeFunc(prometheus.GaugeOpts{ + Namespace: "etcd_debugging", + Subsystem: "mvcc", + Name: "db_total_size_in_use_in_bytes", + Help: "Total size of the underlying database logically in use in bytes.", + }, + func() float64 { + reportDbTotalSizeInUseInBytesMu.RLock() + defer reportDbTotalSizeInUseInBytesMu.RUnlock() + return reportDbTotalSizeInUseInBytes() + }, + ) + // overridden by mvcc initialization + reportDbTotalSizeInUseInBytesMu sync.RWMutex + reportDbTotalSizeInUseInBytes func() float64 = func() float64 { return 0 } ) func init() { @@ -172,6 +188,7 @@ func init() { prometheus.MustRegister(dbCompactionTotalDurations) prometheus.MustRegister(dbCompactionKeysCounter) prometheus.MustRegister(dbTotalSize) + prometheus.MustRegister(dbTotalSizeInUse) } // ReportEventReceived reports that an event is received. diff --git a/vendor/github.com/coreos/etcd/internal/mvcc/metrics_txn.go b/vendor/github.com/coreos/etcd/mvcc/metrics_txn.go similarity index 96% rename from vendor/github.com/coreos/etcd/internal/mvcc/metrics_txn.go rename to vendor/github.com/coreos/etcd/mvcc/metrics_txn.go index 8f81f45e..6a96be76 100644 --- a/vendor/github.com/coreos/etcd/internal/mvcc/metrics_txn.go +++ b/vendor/github.com/coreos/etcd/mvcc/metrics_txn.go @@ -14,9 +14,7 @@ package mvcc -import ( - "github.com/coreos/etcd/internal/lease" -) +import "github.com/coreos/etcd/lease" type metricsTxnWrite struct { TxnWrite diff --git a/vendor/github.com/coreos/etcd/internal/mvcc/mvccpb/kv.pb.go b/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.pb.go similarity index 100% rename from vendor/github.com/coreos/etcd/internal/mvcc/mvccpb/kv.pb.go rename to vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.pb.go diff --git a/vendor/github.com/coreos/etcd/internal/mvcc/revision.go b/vendor/github.com/coreos/etcd/mvcc/revision.go similarity index 100% rename from vendor/github.com/coreos/etcd/internal/mvcc/revision.go rename to vendor/github.com/coreos/etcd/mvcc/revision.go diff --git a/vendor/github.com/coreos/etcd/internal/mvcc/util.go b/vendor/github.com/coreos/etcd/mvcc/util.go similarity index 90% rename from vendor/github.com/coreos/etcd/internal/mvcc/util.go rename to vendor/github.com/coreos/etcd/mvcc/util.go index 1eb05005..aeb2ea8c 100644 --- a/vendor/github.com/coreos/etcd/internal/mvcc/util.go +++ b/vendor/github.com/coreos/etcd/mvcc/util.go @@ -16,9 +16,10 @@ package mvcc import ( "encoding/binary" + "fmt" - "github.com/coreos/etcd/internal/mvcc/backend" - "github.com/coreos/etcd/internal/mvcc/mvccpb" + "github.com/coreos/etcd/mvcc/backend" + "github.com/coreos/etcd/mvcc/mvccpb" ) func UpdateConsistentIndex(be backend.Backend, index uint64) { @@ -47,7 +48,7 @@ func WriteKV(be backend.Backend, kv mvccpb.KeyValue) { d, err := kv.Marshal() if err != nil { - plog.Fatalf("cannot marshal event: %v", err) + panic(fmt.Errorf("cannot marshal event: %v", err)) } be.BatchTx().Lock() diff --git a/vendor/github.com/coreos/etcd/internal/mvcc/watchable_store.go b/vendor/github.com/coreos/etcd/mvcc/watchable_store.go similarity index 91% rename from vendor/github.com/coreos/etcd/internal/mvcc/watchable_store.go rename to vendor/github.com/coreos/etcd/mvcc/watchable_store.go index 34d4d326..edbb7f9d 100644 --- a/vendor/github.com/coreos/etcd/internal/mvcc/watchable_store.go +++ b/vendor/github.com/coreos/etcd/mvcc/watchable_store.go @@ -18,9 +18,10 @@ import ( "sync" "time" - "github.com/coreos/etcd/internal/lease" - "github.com/coreos/etcd/internal/mvcc/backend" - "github.com/coreos/etcd/internal/mvcc/mvccpb" + "github.com/coreos/etcd/lease" + "github.com/coreos/etcd/mvcc/backend" + "github.com/coreos/etcd/mvcc/mvccpb" + "go.uber.org/zap" ) // non-const so modifiable by tests @@ -67,13 +68,13 @@ type watchableStore struct { // cancel operations. type cancelFunc func() -func New(b backend.Backend, le lease.Lessor, ig ConsistentIndexGetter) ConsistentWatchableKV { - return newWatchableStore(b, le, ig) +func New(lg *zap.Logger, b backend.Backend, le lease.Lessor, ig ConsistentIndexGetter) ConsistentWatchableKV { + return newWatchableStore(lg, b, le, ig) } -func newWatchableStore(b backend.Backend, le lease.Lessor, ig ConsistentIndexGetter) *watchableStore { +func newWatchableStore(lg *zap.Logger, b backend.Backend, le lease.Lessor, ig ConsistentIndexGetter) *watchableStore { s := &watchableStore{ - store: NewStore(b, le, ig), + store: NewStore(lg, b, le, ig), victimc: make(chan struct{}, 1), unsynced: newWatcherGroup(), synced: newWatcherGroup(), @@ -192,7 +193,7 @@ func (s *watchableStore) Restore(b backend.Backend) error { } for wa := range s.synced.watchers { - s.unsynced.watchers.add(wa) + s.unsynced.add(wa) } s.synced = newWatcherGroup() return nil @@ -346,7 +347,13 @@ func (s *watchableStore) syncWatchers() int { tx := s.store.b.ReadTx() tx.Lock() revs, vs := tx.UnsafeRange(keyBucketName, minBytes, maxBytes, 0) - evs := kvsToEvents(wg, revs, vs) + var evs []mvccpb.Event + if s.store != nil && s.store.lg != nil { + evs = kvsToEvents(s.store.lg, wg, revs, vs) + } else { + // TODO: remove this in v3.5 + evs = kvsToEvents(nil, wg, revs, vs) + } tx.Unlock() var victims watcherBatch @@ -398,11 +405,15 @@ func (s *watchableStore) syncWatchers() int { } // kvsToEvents gets all events for the watchers from all key-value pairs -func kvsToEvents(wg *watcherGroup, revs, vals [][]byte) (evs []mvccpb.Event) { +func kvsToEvents(lg *zap.Logger, wg *watcherGroup, revs, vals [][]byte) (evs []mvccpb.Event) { for i, v := range vals { var kv mvccpb.KeyValue if err := kv.Unmarshal(v); err != nil { - plog.Panicf("cannot unmarshal event: %v", err) + if lg != nil { + lg.Panic("failed to unmarshal mvccpb.KeyValue", zap.Error(err)) + } else { + plog.Panicf("cannot unmarshal event: %v", err) + } } if !wg.contains(string(kv.Key)) { @@ -426,7 +437,14 @@ func (s *watchableStore) notify(rev int64, evs []mvccpb.Event) { var victim watcherBatch for w, eb := range newWatcherBatch(&s.synced, evs) { if eb.revs != 1 { - plog.Panicf("unexpected multiple revisions in notification") + if s.store != nil && s.store.lg != nil { + s.store.lg.Panic( + "unexpected multiple revisions in watch notification", + zap.Int("number-of-revisions", eb.revs), + ) + } else { + plog.Panicf("unexpected multiple revisions in notification") + } } if w.send(WatchResponse{WatchID: w.id, Events: eb.evs, Revision: rev}) { pendingEventsGauge.Add(float64(len(eb.evs))) diff --git a/vendor/github.com/coreos/etcd/internal/mvcc/watchable_store_txn.go b/vendor/github.com/coreos/etcd/mvcc/watchable_store_txn.go similarity index 96% rename from vendor/github.com/coreos/etcd/internal/mvcc/watchable_store_txn.go rename to vendor/github.com/coreos/etcd/mvcc/watchable_store_txn.go index 9c66e533..bc8eb645 100644 --- a/vendor/github.com/coreos/etcd/internal/mvcc/watchable_store_txn.go +++ b/vendor/github.com/coreos/etcd/mvcc/watchable_store_txn.go @@ -14,7 +14,7 @@ package mvcc -import "github.com/coreos/etcd/internal/mvcc/mvccpb" +import "github.com/coreos/etcd/mvcc/mvccpb" func (tw *watchableStoreTxnWrite) End() { changes := tw.Changes() diff --git a/vendor/github.com/coreos/etcd/internal/mvcc/watcher.go b/vendor/github.com/coreos/etcd/mvcc/watcher.go similarity index 99% rename from vendor/github.com/coreos/etcd/internal/mvcc/watcher.go rename to vendor/github.com/coreos/etcd/mvcc/watcher.go index 9b79aa80..886b87d5 100644 --- a/vendor/github.com/coreos/etcd/internal/mvcc/watcher.go +++ b/vendor/github.com/coreos/etcd/mvcc/watcher.go @@ -19,7 +19,7 @@ import ( "errors" "sync" - "github.com/coreos/etcd/internal/mvcc/mvccpb" + "github.com/coreos/etcd/mvcc/mvccpb" ) // AutoWatchID is the watcher ID passed in WatchStream.Watch when no diff --git a/vendor/github.com/coreos/etcd/internal/mvcc/watcher_group.go b/vendor/github.com/coreos/etcd/mvcc/watcher_group.go similarity index 99% rename from vendor/github.com/coreos/etcd/internal/mvcc/watcher_group.go rename to vendor/github.com/coreos/etcd/mvcc/watcher_group.go index d56daff0..6ef1d0ce 100644 --- a/vendor/github.com/coreos/etcd/internal/mvcc/watcher_group.go +++ b/vendor/github.com/coreos/etcd/mvcc/watcher_group.go @@ -17,7 +17,7 @@ package mvcc import ( "math" - "github.com/coreos/etcd/internal/mvcc/mvccpb" + "github.com/coreos/etcd/mvcc/mvccpb" "github.com/coreos/etcd/pkg/adt" ) diff --git a/vendor/github.com/coreos/etcd/pkg/logutil/discard_logger.go b/vendor/github.com/coreos/etcd/pkg/logutil/discard_logger.go new file mode 100644 index 00000000..81b0a9d0 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/logutil/discard_logger.go @@ -0,0 +1,46 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logutil + +import ( + "log" + + "google.golang.org/grpc/grpclog" +) + +// assert that "discardLogger" satisfy "Logger" interface +var _ Logger = &discardLogger{} + +// NewDiscardLogger returns a new Logger that discards everything except "fatal". +func NewDiscardLogger() Logger { return &discardLogger{} } + +type discardLogger struct{} + +func (l *discardLogger) Info(args ...interface{}) {} +func (l *discardLogger) Infoln(args ...interface{}) {} +func (l *discardLogger) Infof(format string, args ...interface{}) {} +func (l *discardLogger) Warning(args ...interface{}) {} +func (l *discardLogger) Warningln(args ...interface{}) {} +func (l *discardLogger) Warningf(format string, args ...interface{}) {} +func (l *discardLogger) Error(args ...interface{}) {} +func (l *discardLogger) Errorln(args ...interface{}) {} +func (l *discardLogger) Errorf(format string, args ...interface{}) {} +func (l *discardLogger) Fatal(args ...interface{}) { log.Fatal(args...) } +func (l *discardLogger) Fatalln(args ...interface{}) { log.Fatalln(args...) } +func (l *discardLogger) Fatalf(format string, args ...interface{}) { log.Fatalf(format, args...) } +func (l *discardLogger) V(lvl int) bool { + return false +} +func (l *discardLogger) Lvl(lvl int) grpclog.LoggerV2 { return l } diff --git a/vendor/github.com/coreos/etcd/pkg/logutil/doc.go b/vendor/github.com/coreos/etcd/pkg/logutil/doc.go new file mode 100644 index 00000000..e919f249 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/logutil/doc.go @@ -0,0 +1,16 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package logutil includes utilities to facilitate logging. +package logutil diff --git a/vendor/github.com/coreos/etcd/pkg/logutil/logger.go b/vendor/github.com/coreos/etcd/pkg/logutil/logger.go new file mode 100644 index 00000000..e7da80ef --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/logutil/logger.go @@ -0,0 +1,64 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logutil + +import "google.golang.org/grpc/grpclog" + +// Logger defines logging interface. +// TODO: deprecate in v3.5. +type Logger interface { + grpclog.LoggerV2 + + // Lvl returns logger if logger's verbosity level >= "lvl". + // Otherwise, logger that discards everything. + Lvl(lvl int) grpclog.LoggerV2 +} + +// assert that "defaultLogger" satisfy "Logger" interface +var _ Logger = &defaultLogger{} + +// NewLogger wraps "grpclog.LoggerV2" that implements "Logger" interface. +// +// For example: +// +// var defaultLogger Logger +// g := grpclog.NewLoggerV2WithVerbosity(os.Stderr, os.Stderr, os.Stderr, 4) +// defaultLogger = NewLogger(g) +// +func NewLogger(g grpclog.LoggerV2) Logger { return &defaultLogger{g: g} } + +type defaultLogger struct { + g grpclog.LoggerV2 +} + +func (l *defaultLogger) Info(args ...interface{}) { l.g.Info(args...) } +func (l *defaultLogger) Infoln(args ...interface{}) { l.g.Info(args...) } +func (l *defaultLogger) Infof(format string, args ...interface{}) { l.g.Infof(format, args...) } +func (l *defaultLogger) Warning(args ...interface{}) { l.g.Warning(args...) } +func (l *defaultLogger) Warningln(args ...interface{}) { l.g.Warning(args...) } +func (l *defaultLogger) Warningf(format string, args ...interface{}) { l.g.Warningf(format, args...) } +func (l *defaultLogger) Error(args ...interface{}) { l.g.Error(args...) } +func (l *defaultLogger) Errorln(args ...interface{}) { l.g.Error(args...) } +func (l *defaultLogger) Errorf(format string, args ...interface{}) { l.g.Errorf(format, args...) } +func (l *defaultLogger) Fatal(args ...interface{}) { l.g.Fatal(args...) } +func (l *defaultLogger) Fatalln(args ...interface{}) { l.g.Fatal(args...) } +func (l *defaultLogger) Fatalf(format string, args ...interface{}) { l.g.Fatalf(format, args...) } +func (l *defaultLogger) V(lvl int) bool { return l.g.V(lvl) } +func (l *defaultLogger) Lvl(lvl int) grpclog.LoggerV2 { + if l.g.V(lvl) { + return l + } + return &discardLogger{} +} diff --git a/vendor/github.com/coreos/etcd/pkg/logutil/merge_logger.go b/vendor/github.com/coreos/etcd/pkg/logutil/merge_logger.go new file mode 100644 index 00000000..866b6f7a --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/logutil/merge_logger.go @@ -0,0 +1,194 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logutil + +import ( + "fmt" + "sync" + "time" + + "github.com/coreos/pkg/capnslog" +) + +var ( + defaultMergePeriod = time.Second + defaultTimeOutputScale = 10 * time.Millisecond + + outputInterval = time.Second +) + +// line represents a log line that can be printed out +// through capnslog.PackageLogger. +type line struct { + level capnslog.LogLevel + str string +} + +func (l line) append(s string) line { + return line{ + level: l.level, + str: l.str + " " + s, + } +} + +// status represents the merge status of a line. +type status struct { + period time.Duration + + start time.Time // start time of latest merge period + count int // number of merged lines from starting +} + +func (s *status) isInMergePeriod(now time.Time) bool { + return s.period == 0 || s.start.Add(s.period).After(now) +} + +func (s *status) isEmpty() bool { return s.count == 0 } + +func (s *status) summary(now time.Time) string { + ts := s.start.Round(defaultTimeOutputScale) + took := now.Round(defaultTimeOutputScale).Sub(ts) + return fmt.Sprintf("[merged %d repeated lines in %s]", s.count, took) +} + +func (s *status) reset(now time.Time) { + s.start = now + s.count = 0 +} + +// MergeLogger supports merge logging, which merges repeated log lines +// and prints summary log lines instead. +// +// For merge logging, MergeLogger prints out the line when the line appears +// at the first time. MergeLogger holds the same log line printed within +// defaultMergePeriod, and prints out summary log line at the end of defaultMergePeriod. +// It stops merging when the line doesn't appear within the +// defaultMergePeriod. +type MergeLogger struct { + *capnslog.PackageLogger + + mu sync.Mutex // protect statusm + statusm map[line]*status +} + +func NewMergeLogger(logger *capnslog.PackageLogger) *MergeLogger { + l := &MergeLogger{ + PackageLogger: logger, + statusm: make(map[line]*status), + } + go l.outputLoop() + return l +} + +func (l *MergeLogger) MergeInfo(entries ...interface{}) { + l.merge(line{ + level: capnslog.INFO, + str: fmt.Sprint(entries...), + }) +} + +func (l *MergeLogger) MergeInfof(format string, args ...interface{}) { + l.merge(line{ + level: capnslog.INFO, + str: fmt.Sprintf(format, args...), + }) +} + +func (l *MergeLogger) MergeNotice(entries ...interface{}) { + l.merge(line{ + level: capnslog.NOTICE, + str: fmt.Sprint(entries...), + }) +} + +func (l *MergeLogger) MergeNoticef(format string, args ...interface{}) { + l.merge(line{ + level: capnslog.NOTICE, + str: fmt.Sprintf(format, args...), + }) +} + +func (l *MergeLogger) MergeWarning(entries ...interface{}) { + l.merge(line{ + level: capnslog.WARNING, + str: fmt.Sprint(entries...), + }) +} + +func (l *MergeLogger) MergeWarningf(format string, args ...interface{}) { + l.merge(line{ + level: capnslog.WARNING, + str: fmt.Sprintf(format, args...), + }) +} + +func (l *MergeLogger) MergeError(entries ...interface{}) { + l.merge(line{ + level: capnslog.ERROR, + str: fmt.Sprint(entries...), + }) +} + +func (l *MergeLogger) MergeErrorf(format string, args ...interface{}) { + l.merge(line{ + level: capnslog.ERROR, + str: fmt.Sprintf(format, args...), + }) +} + +func (l *MergeLogger) merge(ln line) { + l.mu.Lock() + + // increase count if the logger is merging the line + if status, ok := l.statusm[ln]; ok { + status.count++ + l.mu.Unlock() + return + } + + // initialize status of the line + l.statusm[ln] = &status{ + period: defaultMergePeriod, + start: time.Now(), + } + // release the lock before IO operation + l.mu.Unlock() + // print out the line at its first time + l.PackageLogger.Logf(ln.level, ln.str) +} + +func (l *MergeLogger) outputLoop() { + for now := range time.Tick(outputInterval) { + var outputs []line + + l.mu.Lock() + for ln, status := range l.statusm { + if status.isInMergePeriod(now) { + continue + } + if status.isEmpty() { + delete(l.statusm, ln) + continue + } + outputs = append(outputs, ln.append(status.summary(now))) + status.reset(now) + } + l.mu.Unlock() + + for _, o := range outputs { + l.PackageLogger.Logf(o.level, o.str) + } + } +} diff --git a/vendor/github.com/coreos/etcd/pkg/logutil/package_logger.go b/vendor/github.com/coreos/etcd/pkg/logutil/package_logger.go new file mode 100644 index 00000000..378bee0e --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/logutil/package_logger.go @@ -0,0 +1,60 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logutil + +import ( + "github.com/coreos/pkg/capnslog" + "google.golang.org/grpc/grpclog" +) + +// assert that "packageLogger" satisfy "Logger" interface +var _ Logger = &packageLogger{} + +// NewPackageLogger wraps "*capnslog.PackageLogger" that implements "Logger" interface. +// +// For example: +// +// var defaultLogger Logger +// defaultLogger = NewPackageLogger("github.com/coreos/etcd", "snapshot") +// +func NewPackageLogger(repo, pkg string) Logger { + return &packageLogger{p: capnslog.NewPackageLogger(repo, pkg)} +} + +type packageLogger struct { + p *capnslog.PackageLogger +} + +func (l *packageLogger) Info(args ...interface{}) { l.p.Info(args...) } +func (l *packageLogger) Infoln(args ...interface{}) { l.p.Info(args...) } +func (l *packageLogger) Infof(format string, args ...interface{}) { l.p.Infof(format, args...) } +func (l *packageLogger) Warning(args ...interface{}) { l.p.Warning(args...) } +func (l *packageLogger) Warningln(args ...interface{}) { l.p.Warning(args...) } +func (l *packageLogger) Warningf(format string, args ...interface{}) { l.p.Warningf(format, args...) } +func (l *packageLogger) Error(args ...interface{}) { l.p.Error(args...) } +func (l *packageLogger) Errorln(args ...interface{}) { l.p.Error(args...) } +func (l *packageLogger) Errorf(format string, args ...interface{}) { l.p.Errorf(format, args...) } +func (l *packageLogger) Fatal(args ...interface{}) { l.p.Fatal(args...) } +func (l *packageLogger) Fatalln(args ...interface{}) { l.p.Fatal(args...) } +func (l *packageLogger) Fatalf(format string, args ...interface{}) { l.p.Fatalf(format, args...) } +func (l *packageLogger) V(lvl int) bool { + return l.p.LevelAt(capnslog.LogLevel(lvl)) +} +func (l *packageLogger) Lvl(lvl int) grpclog.LoggerV2 { + if l.p.LevelAt(capnslog.LogLevel(lvl)) { + return l + } + return &discardLogger{} +} diff --git a/vendor/github.com/coreos/etcd/pkg/logutil/zap_grpc.go b/vendor/github.com/coreos/etcd/pkg/logutil/zap_grpc.go new file mode 100644 index 00000000..3f48d813 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/logutil/zap_grpc.go @@ -0,0 +1,111 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logutil + +import ( + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "google.golang.org/grpc/grpclog" +) + +// NewGRPCLoggerV2 converts "*zap.Logger" to "grpclog.LoggerV2". +// It discards all INFO level logging in gRPC, if debug level +// is not enabled in "*zap.Logger". +func NewGRPCLoggerV2(lcfg zap.Config) (grpclog.LoggerV2, error) { + lg, err := lcfg.Build(zap.AddCallerSkip(1)) // to annotate caller outside of "logutil" + if err != nil { + return nil, err + } + return &zapGRPCLogger{lg: lg, sugar: lg.Sugar()}, nil +} + +// NewGRPCLoggerV2FromZapCore creates "grpclog.LoggerV2" from "zap.Core" +// and "zapcore.WriteSyncer". It discards all INFO level logging in gRPC, +// if debug level is not enabled in "*zap.Logger". +func NewGRPCLoggerV2FromZapCore(cr zapcore.Core, syncer zapcore.WriteSyncer) grpclog.LoggerV2 { + // "AddCallerSkip" to annotate caller outside of "logutil" + lg := zap.New(cr, zap.AddCaller(), zap.AddCallerSkip(1), zap.ErrorOutput(syncer)) + return &zapGRPCLogger{lg: lg, sugar: lg.Sugar()} +} + +type zapGRPCLogger struct { + lg *zap.Logger + sugar *zap.SugaredLogger +} + +func (zl *zapGRPCLogger) Info(args ...interface{}) { + if !zl.lg.Core().Enabled(zapcore.DebugLevel) { + return + } + zl.sugar.Info(args...) +} + +func (zl *zapGRPCLogger) Infoln(args ...interface{}) { + if !zl.lg.Core().Enabled(zapcore.DebugLevel) { + return + } + zl.sugar.Info(args...) +} + +func (zl *zapGRPCLogger) Infof(format string, args ...interface{}) { + if !zl.lg.Core().Enabled(zapcore.DebugLevel) { + return + } + zl.sugar.Infof(format, args...) +} + +func (zl *zapGRPCLogger) Warning(args ...interface{}) { + zl.sugar.Warn(args...) +} + +func (zl *zapGRPCLogger) Warningln(args ...interface{}) { + zl.sugar.Warn(args...) +} + +func (zl *zapGRPCLogger) Warningf(format string, args ...interface{}) { + zl.sugar.Warnf(format, args...) +} + +func (zl *zapGRPCLogger) Error(args ...interface{}) { + zl.sugar.Error(args...) +} + +func (zl *zapGRPCLogger) Errorln(args ...interface{}) { + zl.sugar.Error(args...) +} + +func (zl *zapGRPCLogger) Errorf(format string, args ...interface{}) { + zl.sugar.Errorf(format, args...) +} + +func (zl *zapGRPCLogger) Fatal(args ...interface{}) { + zl.sugar.Fatal(args...) +} + +func (zl *zapGRPCLogger) Fatalln(args ...interface{}) { + zl.sugar.Fatal(args...) +} + +func (zl *zapGRPCLogger) Fatalf(format string, args ...interface{}) { + zl.sugar.Fatalf(format, args...) +} + +func (zl *zapGRPCLogger) V(l int) bool { + // infoLog == 0 + if l <= 0 { // debug level, then we ignore info level in gRPC + return !zl.lg.Core().Enabled(zapcore.DebugLevel) + } + return true +} diff --git a/vendor/github.com/coreos/etcd/pkg/logutil/zap_journald.go b/vendor/github.com/coreos/etcd/pkg/logutil/zap_journald.go new file mode 100644 index 00000000..9999eb79 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/logutil/zap_journald.go @@ -0,0 +1,89 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows + +package logutil + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + + "github.com/coreos/go-systemd/journal" + "go.uber.org/zap/zapcore" +) + +// NewJournaldWriter wraps "io.Writer" to redirect log output +// to the local systemd journal. If journald send fails, it fails +// back to writing to the original writer. +// The decode overhead is only <30µs per write. +// Reference: https://github.com/coreos/pkg/blob/master/capnslog/journald_formatter.go +func NewJournaldWriter(wr io.Writer) io.Writer { + return &journaldWriter{Writer: wr} +} + +type journaldWriter struct { + io.Writer +} + +// WARN: assume that etcd uses default field names in zap encoder config +// make sure to keep this up-to-date! +type logLine struct { + Level string `json:"level"` + Caller string `json:"caller"` +} + +func (w *journaldWriter) Write(p []byte) (int, error) { + line := &logLine{} + if err := json.NewDecoder(bytes.NewReader(p)).Decode(line); err != nil { + return 0, err + } + + var pri journal.Priority + switch line.Level { + case zapcore.DebugLevel.String(): + pri = journal.PriDebug + case zapcore.InfoLevel.String(): + pri = journal.PriInfo + + case zapcore.WarnLevel.String(): + pri = journal.PriWarning + case zapcore.ErrorLevel.String(): + pri = journal.PriErr + + case zapcore.DPanicLevel.String(): + pri = journal.PriCrit + case zapcore.PanicLevel.String(): + pri = journal.PriCrit + case zapcore.FatalLevel.String(): + pri = journal.PriCrit + + default: + panic(fmt.Errorf("unknown log level: %q", line.Level)) + } + + err := journal.Send(string(p), pri, map[string]string{ + "PACKAGE": filepath.Dir(line.Caller), + "SYSLOG_IDENTIFIER": filepath.Base(os.Args[0]), + }) + if err != nil { + fmt.Println("FAILED TO WRITE TO JOURNALD", err, string(p)) + return w.Writer.Write(p) + } + return 0, nil +} diff --git a/vendor/github.com/coreos/etcd/pkg/logutil/zap_raft.go b/vendor/github.com/coreos/etcd/pkg/logutil/zap_raft.go new file mode 100644 index 00000000..5ee703dd --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/logutil/zap_raft.go @@ -0,0 +1,97 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logutil + +import ( + "errors" + + "github.com/coreos/etcd/raft" + + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +// NewRaftLogger converts "*zap.Logger" to "raft.Logger". +func NewRaftLogger(lcfg *zap.Config) (raft.Logger, error) { + if lcfg == nil { + return nil, errors.New("nil zap.Config") + } + lg, err := lcfg.Build(zap.AddCallerSkip(1)) // to annotate caller outside of "logutil" + if err != nil { + return nil, err + } + return &zapRaftLogger{lg: lg, sugar: lg.Sugar()}, nil +} + +// NewRaftLoggerFromZapCore creates "raft.Logger" from "zap.Core" +// and "zapcore.WriteSyncer". +func NewRaftLoggerFromZapCore(cr zapcore.Core, syncer zapcore.WriteSyncer) raft.Logger { + // "AddCallerSkip" to annotate caller outside of "logutil" + lg := zap.New(cr, zap.AddCaller(), zap.AddCallerSkip(1), zap.ErrorOutput(syncer)) + return &zapRaftLogger{lg: lg, sugar: lg.Sugar()} +} + +type zapRaftLogger struct { + lg *zap.Logger + sugar *zap.SugaredLogger +} + +func (zl *zapRaftLogger) Debug(args ...interface{}) { + zl.sugar.Debug(args...) +} + +func (zl *zapRaftLogger) Debugf(format string, args ...interface{}) { + zl.sugar.Debugf(format, args...) +} + +func (zl *zapRaftLogger) Error(args ...interface{}) { + zl.sugar.Error(args...) +} + +func (zl *zapRaftLogger) Errorf(format string, args ...interface{}) { + zl.sugar.Errorf(format, args...) +} + +func (zl *zapRaftLogger) Info(args ...interface{}) { + zl.sugar.Info(args...) +} + +func (zl *zapRaftLogger) Infof(format string, args ...interface{}) { + zl.sugar.Infof(format, args...) +} + +func (zl *zapRaftLogger) Warning(args ...interface{}) { + zl.sugar.Warn(args...) +} + +func (zl *zapRaftLogger) Warningf(format string, args ...interface{}) { + zl.sugar.Warnf(format, args...) +} + +func (zl *zapRaftLogger) Fatal(args ...interface{}) { + zl.sugar.Fatal(args...) +} + +func (zl *zapRaftLogger) Fatalf(format string, args ...interface{}) { + zl.sugar.Fatalf(format, args...) +} + +func (zl *zapRaftLogger) Panic(args ...interface{}) { + zl.sugar.Panic(args...) +} + +func (zl *zapRaftLogger) Panicf(format string, args ...interface{}) { + zl.sugar.Panicf(format, args...) +} diff --git a/vendor/github.com/coreos/etcd/pkg/netutil/doc.go b/vendor/github.com/coreos/etcd/pkg/netutil/doc.go new file mode 100644 index 00000000..5d92d03a --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/netutil/doc.go @@ -0,0 +1,16 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package netutil implements network-related utility functions. +package netutil diff --git a/vendor/github.com/coreos/etcd/pkg/netutil/netutil.go b/vendor/github.com/coreos/etcd/pkg/netutil/netutil.go index e3db8c50..52373bd0 100644 --- a/vendor/github.com/coreos/etcd/pkg/netutil/netutil.go +++ b/vendor/github.com/coreos/etcd/pkg/netutil/netutil.go @@ -12,7 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package netutil implements network-related utility functions. package netutil import ( @@ -25,15 +24,12 @@ import ( "time" "github.com/coreos/etcd/pkg/types" - "github.com/coreos/pkg/capnslog" + + "go.uber.org/zap" ) -var ( - plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "pkg/netutil") - - // indirection for testing - resolveTCPAddr = resolveTCPAddrDefault -) +// indirection for testing +var resolveTCPAddr = resolveTCPAddrDefault const retryInterval = time.Second @@ -67,7 +63,7 @@ func resolveTCPAddrDefault(ctx context.Context, addr string) (*net.TCPAddr, erro // resolveTCPAddrs is a convenience wrapper for net.ResolveTCPAddr. // resolveTCPAddrs return a new set of url.URLs, in which all DNS hostnames // are resolved. -func resolveTCPAddrs(ctx context.Context, urls [][]url.URL) ([][]url.URL, error) { +func resolveTCPAddrs(ctx context.Context, lg *zap.Logger, urls [][]url.URL) ([][]url.URL, error) { newurls := make([][]url.URL, 0) for _, us := range urls { nus := make([]url.URL, len(us)) @@ -79,7 +75,7 @@ func resolveTCPAddrs(ctx context.Context, urls [][]url.URL) ([][]url.URL, error) nus[i] = *nu } for i, u := range nus { - h, err := resolveURL(ctx, u) + h, err := resolveURL(ctx, lg, u) if err != nil { return nil, fmt.Errorf("failed to resolve %q (%v)", u.String(), err) } @@ -92,14 +88,19 @@ func resolveTCPAddrs(ctx context.Context, urls [][]url.URL) ([][]url.URL, error) return newurls, nil } -func resolveURL(ctx context.Context, u url.URL) (string, error) { +func resolveURL(ctx context.Context, lg *zap.Logger, u url.URL) (string, error) { if u.Scheme == "unix" || u.Scheme == "unixs" { // unix sockets don't resolve over TCP return "", nil } host, _, err := net.SplitHostPort(u.Host) if err != nil { - plog.Errorf("could not parse url %s during tcp resolving", u.Host) + lg.Warn( + "failed to parse URL Host while resolving URL", + zap.String("url", u.String()), + zap.String("host", u.Host), + zap.Error(err), + ) return "", err } if host == "localhost" || net.ParseIP(host) != nil { @@ -108,13 +109,32 @@ func resolveURL(ctx context.Context, u url.URL) (string, error) { for ctx.Err() == nil { tcpAddr, err := resolveTCPAddr(ctx, u.Host) if err == nil { - plog.Infof("resolving %s to %s", u.Host, tcpAddr.String()) + lg.Info( + "resolved URL Host", + zap.String("url", u.String()), + zap.String("host", u.Host), + zap.String("resolved-addr", tcpAddr.String()), + ) return tcpAddr.String(), nil } - plog.Warningf("failed resolving host %s (%v); retrying in %v", u.Host, err, retryInterval) + + lg.Warn( + "failed to resolve URL Host", + zap.String("url", u.String()), + zap.String("host", u.Host), + zap.Duration("retry-interval", retryInterval), + zap.Error(err), + ) + select { case <-ctx.Done(): - plog.Errorf("could not resolve host %s", u.Host) + lg.Warn( + "failed to resolve URL Host; returning", + zap.String("url", u.String()), + zap.String("host", u.Host), + zap.Duration("retry-interval", retryInterval), + zap.Error(err), + ) return "", err case <-time.After(retryInterval): } @@ -124,11 +144,11 @@ func resolveURL(ctx context.Context, u url.URL) (string, error) { // urlsEqual checks equality of url.URLS between two arrays. // This check pass even if an URL is in hostname and opposite is in IP address. -func urlsEqual(ctx context.Context, a []url.URL, b []url.URL) (bool, error) { +func urlsEqual(ctx context.Context, lg *zap.Logger, a []url.URL, b []url.URL) (bool, error) { if len(a) != len(b) { return false, fmt.Errorf("len(%q) != len(%q)", urlsToStrings(a), urlsToStrings(b)) } - urls, err := resolveTCPAddrs(ctx, [][]url.URL{a, b}) + urls, err := resolveTCPAddrs(ctx, lg, [][]url.URL{a, b}) if err != nil { return false, err } @@ -150,7 +170,7 @@ func urlsEqual(ctx context.Context, a []url.URL, b []url.URL) (bool, error) { // URLStringsEqual returns "true" if given URLs are valid // and resolved to same IP addresses. Otherwise, return "false" // and error, if any. -func URLStringsEqual(ctx context.Context, a []string, b []string) (bool, error) { +func URLStringsEqual(ctx context.Context, lg *zap.Logger, a []string, b []string) (bool, error) { if len(a) != len(b) { return false, fmt.Errorf("len(%q) != len(%q)", a, b) } @@ -170,7 +190,13 @@ func URLStringsEqual(ctx context.Context, a []string, b []string) (bool, error) } urlsB = append(urlsB, *u) } - return urlsEqual(ctx, urlsA, urlsB) + if lg == nil { + lg, _ = zap.NewProduction() + if lg == nil { + lg = zap.NewExample() + } + } + return urlsEqual(ctx, lg, urlsA, urlsB) } func urlsToStrings(us []url.URL) []string { diff --git a/vendor/github.com/coreos/etcd/pkg/report/timeseries.go b/vendor/github.com/coreos/etcd/pkg/report/timeseries.go index bc1493b2..a999c2dc 100644 --- a/vendor/github.com/coreos/etcd/pkg/report/timeseries.go +++ b/vendor/github.com/coreos/etcd/pkg/report/timeseries.go @@ -102,7 +102,7 @@ func (sp *secondPoints) getTimeSeries() TimeSeries { for k, v := range sp.tm { var lat time.Duration if v.count > 0 { - lat = time.Duration(v.totalLatency) / time.Duration(v.count) + lat = v.totalLatency / time.Duration(v.count) } tslice[i] = DataPoint{ Timestamp: k, diff --git a/vendor/github.com/coreos/etcd/pkg/types/id.go b/vendor/github.com/coreos/etcd/pkg/types/id.go index 1b042d9c..ae00388d 100644 --- a/vendor/github.com/coreos/etcd/pkg/types/id.go +++ b/vendor/github.com/coreos/etcd/pkg/types/id.go @@ -14,9 +14,7 @@ package types -import ( - "strconv" -) +import "strconv" // ID represents a generic identifier which is canonically // stored as a uint64 but is typically represented as a diff --git a/vendor/github.com/coreos/etcd/raft/doc.go b/vendor/github.com/coreos/etcd/raft/doc.go new file mode 100644 index 00000000..b55c591f --- /dev/null +++ b/vendor/github.com/coreos/etcd/raft/doc.go @@ -0,0 +1,300 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package raft sends and receives messages in the Protocol Buffer format +defined in the raftpb package. + +Raft is a protocol with which a cluster of nodes can maintain a replicated state machine. +The state machine is kept in sync through the use of a replicated log. +For more details on Raft, see "In Search of an Understandable Consensus Algorithm" +(https://ramcloud.stanford.edu/raft.pdf) by Diego Ongaro and John Ousterhout. + +A simple example application, _raftexample_, is also available to help illustrate +how to use this package in practice: +https://github.com/coreos/etcd/tree/master/contrib/raftexample + +Usage + +The primary object in raft is a Node. You either start a Node from scratch +using raft.StartNode or start a Node from some initial state using raft.RestartNode. + +To start a node from scratch: + + storage := raft.NewMemoryStorage() + c := &Config{ + ID: 0x01, + ElectionTick: 10, + HeartbeatTick: 1, + Storage: storage, + MaxSizePerMsg: 4096, + MaxInflightMsgs: 256, + } + n := raft.StartNode(c, []raft.Peer{{ID: 0x02}, {ID: 0x03}}) + +To restart a node from previous state: + + storage := raft.NewMemoryStorage() + + // recover the in-memory storage from persistent + // snapshot, state and entries. + storage.ApplySnapshot(snapshot) + storage.SetHardState(state) + storage.Append(entries) + + c := &Config{ + ID: 0x01, + ElectionTick: 10, + HeartbeatTick: 1, + Storage: storage, + MaxSizePerMsg: 4096, + MaxInflightMsgs: 256, + } + + // restart raft without peer information. + // peer information is already included in the storage. + n := raft.RestartNode(c) + +Now that you are holding onto a Node you have a few responsibilities: + +First, you must read from the Node.Ready() channel and process the updates +it contains. These steps may be performed in parallel, except as noted in step +2. + +1. Write HardState, Entries, and Snapshot to persistent storage if they are +not empty. Note that when writing an Entry with Index i, any +previously-persisted entries with Index >= i must be discarded. + +2. Send all Messages to the nodes named in the To field. It is important that +no messages be sent until the latest HardState has been persisted to disk, +and all Entries written by any previous Ready batch (Messages may be sent while +entries from the same batch are being persisted). To reduce the I/O latency, an +optimization can be applied to make leader write to disk in parallel with its +followers (as explained at section 10.2.1 in Raft thesis). If any Message has type +MsgSnap, call Node.ReportSnapshot() after it has been sent (these messages may be +large). + +Note: Marshalling messages is not thread-safe; it is important that you +make sure that no new entries are persisted while marshalling. +The easiest way to achieve this is to serialise the messages directly inside +your main raft loop. + +3. Apply Snapshot (if any) and CommittedEntries to the state machine. +If any committed Entry has Type EntryConfChange, call Node.ApplyConfChange() +to apply it to the node. The configuration change may be cancelled at this point +by setting the NodeID field to zero before calling ApplyConfChange +(but ApplyConfChange must be called one way or the other, and the decision to cancel +must be based solely on the state machine and not external information such as +the observed health of the node). + +4. Call Node.Advance() to signal readiness for the next batch of updates. +This may be done at any time after step 1, although all updates must be processed +in the order they were returned by Ready. + +Second, all persisted log entries must be made available via an +implementation of the Storage interface. The provided MemoryStorage +type can be used for this (if you repopulate its state upon a +restart), or you can supply your own disk-backed implementation. + +Third, when you receive a message from another node, pass it to Node.Step: + + func recvRaftRPC(ctx context.Context, m raftpb.Message) { + n.Step(ctx, m) + } + +Finally, you need to call Node.Tick() at regular intervals (probably +via a time.Ticker). Raft has two important timeouts: heartbeat and the +election timeout. However, internally to the raft package time is +represented by an abstract "tick". + +The total state machine handling loop will look something like this: + + for { + select { + case <-s.Ticker: + n.Tick() + case rd := <-s.Node.Ready(): + saveToStorage(rd.State, rd.Entries, rd.Snapshot) + send(rd.Messages) + if !raft.IsEmptySnap(rd.Snapshot) { + processSnapshot(rd.Snapshot) + } + for _, entry := range rd.CommittedEntries { + process(entry) + if entry.Type == raftpb.EntryConfChange { + var cc raftpb.ConfChange + cc.Unmarshal(entry.Data) + s.Node.ApplyConfChange(cc) + } + } + s.Node.Advance() + case <-s.done: + return + } + } + +To propose changes to the state machine from your node take your application +data, serialize it into a byte slice and call: + + n.Propose(ctx, data) + +If the proposal is committed, data will appear in committed entries with type +raftpb.EntryNormal. There is no guarantee that a proposed command will be +committed; you may have to re-propose after a timeout. + +To add or remove node in a cluster, build ConfChange struct 'cc' and call: + + n.ProposeConfChange(ctx, cc) + +After config change is committed, some committed entry with type +raftpb.EntryConfChange will be returned. You must apply it to node through: + + var cc raftpb.ConfChange + cc.Unmarshal(data) + n.ApplyConfChange(cc) + +Note: An ID represents a unique node in a cluster for all time. A +given ID MUST be used only once even if the old node has been removed. +This means that for example IP addresses make poor node IDs since they +may be reused. Node IDs must be non-zero. + +Implementation notes + +This implementation is up to date with the final Raft thesis +(https://ramcloud.stanford.edu/~ongaro/thesis.pdf), although our +implementation of the membership change protocol differs somewhat from +that described in chapter 4. The key invariant that membership changes +happen one node at a time is preserved, but in our implementation the +membership change takes effect when its entry is applied, not when it +is added to the log (so the entry is committed under the old +membership instead of the new). This is equivalent in terms of safety, +since the old and new configurations are guaranteed to overlap. + +To ensure that we do not attempt to commit two membership changes at +once by matching log positions (which would be unsafe since they +should have different quorum requirements), we simply disallow any +proposed membership change while any uncommitted change appears in +the leader's log. + +This approach introduces a problem when you try to remove a member +from a two-member cluster: If one of the members dies before the +other one receives the commit of the confchange entry, then the member +cannot be removed any more since the cluster cannot make progress. +For this reason it is highly recommended to use three or more nodes in +every cluster. + +MessageType + +Package raft sends and receives message in Protocol Buffer format (defined +in raftpb package). Each state (follower, candidate, leader) implements its +own 'step' method ('stepFollower', 'stepCandidate', 'stepLeader') when +advancing with the given raftpb.Message. Each step is determined by its +raftpb.MessageType. Note that every step is checked by one common method +'Step' that safety-checks the terms of node and incoming message to prevent +stale log entries: + + 'MsgHup' is used for election. If a node is a follower or candidate, the + 'tick' function in 'raft' struct is set as 'tickElection'. If a follower or + candidate has not received any heartbeat before the election timeout, it + passes 'MsgHup' to its Step method and becomes (or remains) a candidate to + start a new election. + + 'MsgBeat' is an internal type that signals the leader to send a heartbeat of + the 'MsgHeartbeat' type. If a node is a leader, the 'tick' function in + the 'raft' struct is set as 'tickHeartbeat', and triggers the leader to + send periodic 'MsgHeartbeat' messages to its followers. + + 'MsgProp' proposes to append data to its log entries. This is a special + type to redirect proposals to leader. Therefore, send method overwrites + raftpb.Message's term with its HardState's term to avoid attaching its + local term to 'MsgProp'. When 'MsgProp' is passed to the leader's 'Step' + method, the leader first calls the 'appendEntry' method to append entries + to its log, and then calls 'bcastAppend' method to send those entries to + its peers. When passed to candidate, 'MsgProp' is dropped. When passed to + follower, 'MsgProp' is stored in follower's mailbox(msgs) by the send + method. It is stored with sender's ID and later forwarded to leader by + rafthttp package. + + 'MsgApp' contains log entries to replicate. A leader calls bcastAppend, + which calls sendAppend, which sends soon-to-be-replicated logs in 'MsgApp' + type. When 'MsgApp' is passed to candidate's Step method, candidate reverts + back to follower, because it indicates that there is a valid leader sending + 'MsgApp' messages. Candidate and follower respond to this message in + 'MsgAppResp' type. + + 'MsgAppResp' is response to log replication request('MsgApp'). When + 'MsgApp' is passed to candidate or follower's Step method, it responds by + calling 'handleAppendEntries' method, which sends 'MsgAppResp' to raft + mailbox. + + 'MsgVote' requests votes for election. When a node is a follower or + candidate and 'MsgHup' is passed to its Step method, then the node calls + 'campaign' method to campaign itself to become a leader. Once 'campaign' + method is called, the node becomes candidate and sends 'MsgVote' to peers + in cluster to request votes. When passed to leader or candidate's Step + method and the message's Term is lower than leader's or candidate's, + 'MsgVote' will be rejected ('MsgVoteResp' is returned with Reject true). + If leader or candidate receives 'MsgVote' with higher term, it will revert + back to follower. When 'MsgVote' is passed to follower, it votes for the + sender only when sender's last term is greater than MsgVote's term or + sender's last term is equal to MsgVote's term but sender's last committed + index is greater than or equal to follower's. + + 'MsgVoteResp' contains responses from voting request. When 'MsgVoteResp' is + passed to candidate, the candidate calculates how many votes it has won. If + it's more than majority (quorum), it becomes leader and calls 'bcastAppend'. + If candidate receives majority of votes of denials, it reverts back to + follower. + + 'MsgPreVote' and 'MsgPreVoteResp' are used in an optional two-phase election + protocol. When Config.PreVote is true, a pre-election is carried out first + (using the same rules as a regular election), and no node increases its term + number unless the pre-election indicates that the campaigining node would win. + This minimizes disruption when a partitioned node rejoins the cluster. + + 'MsgSnap' requests to install a snapshot message. When a node has just + become a leader or the leader receives 'MsgProp' message, it calls + 'bcastAppend' method, which then calls 'sendAppend' method to each + follower. In 'sendAppend', if a leader fails to get term or entries, + the leader requests snapshot by sending 'MsgSnap' type message. + + 'MsgSnapStatus' tells the result of snapshot install message. When a + follower rejected 'MsgSnap', it indicates the snapshot request with + 'MsgSnap' had failed from network issues which causes the network layer + to fail to send out snapshots to its followers. Then leader considers + follower's progress as probe. When 'MsgSnap' were not rejected, it + indicates that the snapshot succeeded and the leader sets follower's + progress to probe and resumes its log replication. + + 'MsgHeartbeat' sends heartbeat from leader. When 'MsgHeartbeat' is passed + to candidate and message's term is higher than candidate's, the candidate + reverts back to follower and updates its committed index from the one in + this heartbeat. And it sends the message to its mailbox. When + 'MsgHeartbeat' is passed to follower's Step method and message's term is + higher than follower's, the follower updates its leaderID with the ID + from the message. + + 'MsgHeartbeatResp' is a response to 'MsgHeartbeat'. When 'MsgHeartbeatResp' + is passed to leader's Step method, the leader knows which follower + responded. And only when the leader's last committed index is greater than + follower's Match index, the leader runs 'sendAppend` method. + + 'MsgUnreachable' tells that request(message) wasn't delivered. When + 'MsgUnreachable' is passed to leader's Step method, the leader discovers + that the follower that sent this 'MsgUnreachable' is not reachable, often + indicating 'MsgApp' is lost. When follower's progress state is replicate, + the leader sets it back to probe. + +*/ +package raft diff --git a/vendor/github.com/coreos/etcd/raft/log.go b/vendor/github.com/coreos/etcd/raft/log.go new file mode 100644 index 00000000..c3036d3c --- /dev/null +++ b/vendor/github.com/coreos/etcd/raft/log.go @@ -0,0 +1,358 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import ( + "fmt" + "log" + + pb "github.com/coreos/etcd/raft/raftpb" +) + +type raftLog struct { + // storage contains all stable entries since the last snapshot. + storage Storage + + // unstable contains all unstable entries and snapshot. + // they will be saved into storage. + unstable unstable + + // committed is the highest log position that is known to be in + // stable storage on a quorum of nodes. + committed uint64 + // applied is the highest log position that the application has + // been instructed to apply to its state machine. + // Invariant: applied <= committed + applied uint64 + + logger Logger +} + +// newLog returns log using the given storage. It recovers the log to the state +// that it just commits and applies the latest snapshot. +func newLog(storage Storage, logger Logger) *raftLog { + if storage == nil { + log.Panic("storage must not be nil") + } + log := &raftLog{ + storage: storage, + logger: logger, + } + firstIndex, err := storage.FirstIndex() + if err != nil { + panic(err) // TODO(bdarnell) + } + lastIndex, err := storage.LastIndex() + if err != nil { + panic(err) // TODO(bdarnell) + } + log.unstable.offset = lastIndex + 1 + log.unstable.logger = logger + // Initialize our committed and applied pointers to the time of the last compaction. + log.committed = firstIndex - 1 + log.applied = firstIndex - 1 + + return log +} + +func (l *raftLog) String() string { + return fmt.Sprintf("committed=%d, applied=%d, unstable.offset=%d, len(unstable.Entries)=%d", l.committed, l.applied, l.unstable.offset, len(l.unstable.entries)) +} + +// maybeAppend returns (0, false) if the entries cannot be appended. Otherwise, +// it returns (last index of new entries, true). +func (l *raftLog) maybeAppend(index, logTerm, committed uint64, ents ...pb.Entry) (lastnewi uint64, ok bool) { + if l.matchTerm(index, logTerm) { + lastnewi = index + uint64(len(ents)) + ci := l.findConflict(ents) + switch { + case ci == 0: + case ci <= l.committed: + l.logger.Panicf("entry %d conflict with committed entry [committed(%d)]", ci, l.committed) + default: + offset := index + 1 + l.append(ents[ci-offset:]...) + } + l.commitTo(min(committed, lastnewi)) + return lastnewi, true + } + return 0, false +} + +func (l *raftLog) append(ents ...pb.Entry) uint64 { + if len(ents) == 0 { + return l.lastIndex() + } + if after := ents[0].Index - 1; after < l.committed { + l.logger.Panicf("after(%d) is out of range [committed(%d)]", after, l.committed) + } + l.unstable.truncateAndAppend(ents) + return l.lastIndex() +} + +// findConflict finds the index of the conflict. +// It returns the first pair of conflicting entries between the existing +// entries and the given entries, if there are any. +// If there is no conflicting entries, and the existing entries contains +// all the given entries, zero will be returned. +// If there is no conflicting entries, but the given entries contains new +// entries, the index of the first new entry will be returned. +// An entry is considered to be conflicting if it has the same index but +// a different term. +// The first entry MUST have an index equal to the argument 'from'. +// The index of the given entries MUST be continuously increasing. +func (l *raftLog) findConflict(ents []pb.Entry) uint64 { + for _, ne := range ents { + if !l.matchTerm(ne.Index, ne.Term) { + if ne.Index <= l.lastIndex() { + l.logger.Infof("found conflict at index %d [existing term: %d, conflicting term: %d]", + ne.Index, l.zeroTermOnErrCompacted(l.term(ne.Index)), ne.Term) + } + return ne.Index + } + } + return 0 +} + +func (l *raftLog) unstableEntries() []pb.Entry { + if len(l.unstable.entries) == 0 { + return nil + } + return l.unstable.entries +} + +// nextEnts returns all the available entries for execution. +// If applied is smaller than the index of snapshot, it returns all committed +// entries after the index of snapshot. +func (l *raftLog) nextEnts() (ents []pb.Entry) { + off := max(l.applied+1, l.firstIndex()) + if l.committed+1 > off { + ents, err := l.slice(off, l.committed+1, noLimit) + if err != nil { + l.logger.Panicf("unexpected error when getting unapplied entries (%v)", err) + } + return ents + } + return nil +} + +// hasNextEnts returns if there is any available entries for execution. This +// is a fast check without heavy raftLog.slice() in raftLog.nextEnts(). +func (l *raftLog) hasNextEnts() bool { + off := max(l.applied+1, l.firstIndex()) + return l.committed+1 > off +} + +func (l *raftLog) snapshot() (pb.Snapshot, error) { + if l.unstable.snapshot != nil { + return *l.unstable.snapshot, nil + } + return l.storage.Snapshot() +} + +func (l *raftLog) firstIndex() uint64 { + if i, ok := l.unstable.maybeFirstIndex(); ok { + return i + } + index, err := l.storage.FirstIndex() + if err != nil { + panic(err) // TODO(bdarnell) + } + return index +} + +func (l *raftLog) lastIndex() uint64 { + if i, ok := l.unstable.maybeLastIndex(); ok { + return i + } + i, err := l.storage.LastIndex() + if err != nil { + panic(err) // TODO(bdarnell) + } + return i +} + +func (l *raftLog) commitTo(tocommit uint64) { + // never decrease commit + if l.committed < tocommit { + if l.lastIndex() < tocommit { + l.logger.Panicf("tocommit(%d) is out of range [lastIndex(%d)]. Was the raft log corrupted, truncated, or lost?", tocommit, l.lastIndex()) + } + l.committed = tocommit + } +} + +func (l *raftLog) appliedTo(i uint64) { + if i == 0 { + return + } + if l.committed < i || i < l.applied { + l.logger.Panicf("applied(%d) is out of range [prevApplied(%d), committed(%d)]", i, l.applied, l.committed) + } + l.applied = i +} + +func (l *raftLog) stableTo(i, t uint64) { l.unstable.stableTo(i, t) } + +func (l *raftLog) stableSnapTo(i uint64) { l.unstable.stableSnapTo(i) } + +func (l *raftLog) lastTerm() uint64 { + t, err := l.term(l.lastIndex()) + if err != nil { + l.logger.Panicf("unexpected error when getting the last term (%v)", err) + } + return t +} + +func (l *raftLog) term(i uint64) (uint64, error) { + // the valid term range is [index of dummy entry, last index] + dummyIndex := l.firstIndex() - 1 + if i < dummyIndex || i > l.lastIndex() { + // TODO: return an error instead? + return 0, nil + } + + if t, ok := l.unstable.maybeTerm(i); ok { + return t, nil + } + + t, err := l.storage.Term(i) + if err == nil { + return t, nil + } + if err == ErrCompacted || err == ErrUnavailable { + return 0, err + } + panic(err) // TODO(bdarnell) +} + +func (l *raftLog) entries(i, maxsize uint64) ([]pb.Entry, error) { + if i > l.lastIndex() { + return nil, nil + } + return l.slice(i, l.lastIndex()+1, maxsize) +} + +// allEntries returns all entries in the log. +func (l *raftLog) allEntries() []pb.Entry { + ents, err := l.entries(l.firstIndex(), noLimit) + if err == nil { + return ents + } + if err == ErrCompacted { // try again if there was a racing compaction + return l.allEntries() + } + // TODO (xiangli): handle error? + panic(err) +} + +// isUpToDate determines if the given (lastIndex,term) log is more up-to-date +// by comparing the index and term of the last entries in the existing logs. +// If the logs have last entries with different terms, then the log with the +// later term is more up-to-date. If the logs end with the same term, then +// whichever log has the larger lastIndex is more up-to-date. If the logs are +// the same, the given log is up-to-date. +func (l *raftLog) isUpToDate(lasti, term uint64) bool { + return term > l.lastTerm() || (term == l.lastTerm() && lasti >= l.lastIndex()) +} + +func (l *raftLog) matchTerm(i, term uint64) bool { + t, err := l.term(i) + if err != nil { + return false + } + return t == term +} + +func (l *raftLog) maybeCommit(maxIndex, term uint64) bool { + if maxIndex > l.committed && l.zeroTermOnErrCompacted(l.term(maxIndex)) == term { + l.commitTo(maxIndex) + return true + } + return false +} + +func (l *raftLog) restore(s pb.Snapshot) { + l.logger.Infof("log [%s] starts to restore snapshot [index: %d, term: %d]", l, s.Metadata.Index, s.Metadata.Term) + l.committed = s.Metadata.Index + l.unstable.restore(s) +} + +// slice returns a slice of log entries from lo through hi-1, inclusive. +func (l *raftLog) slice(lo, hi, maxSize uint64) ([]pb.Entry, error) { + err := l.mustCheckOutOfBounds(lo, hi) + if err != nil { + return nil, err + } + if lo == hi { + return nil, nil + } + var ents []pb.Entry + if lo < l.unstable.offset { + storedEnts, err := l.storage.Entries(lo, min(hi, l.unstable.offset), maxSize) + if err == ErrCompacted { + return nil, err + } else if err == ErrUnavailable { + l.logger.Panicf("entries[%d:%d) is unavailable from storage", lo, min(hi, l.unstable.offset)) + } else if err != nil { + panic(err) // TODO(bdarnell) + } + + // check if ents has reached the size limitation + if uint64(len(storedEnts)) < min(hi, l.unstable.offset)-lo { + return storedEnts, nil + } + + ents = storedEnts + } + if hi > l.unstable.offset { + unstable := l.unstable.slice(max(lo, l.unstable.offset), hi) + if len(ents) > 0 { + ents = append([]pb.Entry{}, ents...) + ents = append(ents, unstable...) + } else { + ents = unstable + } + } + return limitSize(ents, maxSize), nil +} + +// l.firstIndex <= lo <= hi <= l.firstIndex + len(l.entries) +func (l *raftLog) mustCheckOutOfBounds(lo, hi uint64) error { + if lo > hi { + l.logger.Panicf("invalid slice %d > %d", lo, hi) + } + fi := l.firstIndex() + if lo < fi { + return ErrCompacted + } + + length := l.lastIndex() + 1 - fi + if lo < fi || hi > fi+length { + l.logger.Panicf("slice[%d,%d) out of bound [%d,%d]", lo, hi, fi, l.lastIndex()) + } + return nil +} + +func (l *raftLog) zeroTermOnErrCompacted(t uint64, err error) uint64 { + if err == nil { + return t + } + if err == ErrCompacted { + return 0 + } + l.logger.Panicf("unexpected error (%v)", err) + return 0 +} diff --git a/vendor/github.com/coreos/etcd/raft/log_unstable.go b/vendor/github.com/coreos/etcd/raft/log_unstable.go new file mode 100644 index 00000000..a8a8f5ca --- /dev/null +++ b/vendor/github.com/coreos/etcd/raft/log_unstable.go @@ -0,0 +1,159 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import pb "github.com/coreos/etcd/raft/raftpb" + +// unstable.entries[i] has raft log position i+unstable.offset. +// Note that unstable.offset may be less than the highest log +// position in storage; this means that the next write to storage +// might need to truncate the log before persisting unstable.entries. +type unstable struct { + // the incoming unstable snapshot, if any. + snapshot *pb.Snapshot + // all entries that have not yet been written to storage. + entries []pb.Entry + offset uint64 + + logger Logger +} + +// maybeFirstIndex returns the index of the first possible entry in entries +// if it has a snapshot. +func (u *unstable) maybeFirstIndex() (uint64, bool) { + if u.snapshot != nil { + return u.snapshot.Metadata.Index + 1, true + } + return 0, false +} + +// maybeLastIndex returns the last index if it has at least one +// unstable entry or snapshot. +func (u *unstable) maybeLastIndex() (uint64, bool) { + if l := len(u.entries); l != 0 { + return u.offset + uint64(l) - 1, true + } + if u.snapshot != nil { + return u.snapshot.Metadata.Index, true + } + return 0, false +} + +// maybeTerm returns the term of the entry at index i, if there +// is any. +func (u *unstable) maybeTerm(i uint64) (uint64, bool) { + if i < u.offset { + if u.snapshot == nil { + return 0, false + } + if u.snapshot.Metadata.Index == i { + return u.snapshot.Metadata.Term, true + } + return 0, false + } + + last, ok := u.maybeLastIndex() + if !ok { + return 0, false + } + if i > last { + return 0, false + } + return u.entries[i-u.offset].Term, true +} + +func (u *unstable) stableTo(i, t uint64) { + gt, ok := u.maybeTerm(i) + if !ok { + return + } + // if i < offset, term is matched with the snapshot + // only update the unstable entries if term is matched with + // an unstable entry. + if gt == t && i >= u.offset { + u.entries = u.entries[i+1-u.offset:] + u.offset = i + 1 + u.shrinkEntriesArray() + } +} + +// shrinkEntriesArray discards the underlying array used by the entries slice +// if most of it isn't being used. This avoids holding references to a bunch of +// potentially large entries that aren't needed anymore. Simply clearing the +// entries wouldn't be safe because clients might still be using them. +func (u *unstable) shrinkEntriesArray() { + // We replace the array if we're using less than half of the space in + // it. This number is fairly arbitrary, chosen as an attempt to balance + // memory usage vs number of allocations. It could probably be improved + // with some focused tuning. + const lenMultiple = 2 + if len(u.entries) == 0 { + u.entries = nil + } else if len(u.entries)*lenMultiple < cap(u.entries) { + newEntries := make([]pb.Entry, len(u.entries)) + copy(newEntries, u.entries) + u.entries = newEntries + } +} + +func (u *unstable) stableSnapTo(i uint64) { + if u.snapshot != nil && u.snapshot.Metadata.Index == i { + u.snapshot = nil + } +} + +func (u *unstable) restore(s pb.Snapshot) { + u.offset = s.Metadata.Index + 1 + u.entries = nil + u.snapshot = &s +} + +func (u *unstable) truncateAndAppend(ents []pb.Entry) { + after := ents[0].Index + switch { + case after == u.offset+uint64(len(u.entries)): + // after is the next index in the u.entries + // directly append + u.entries = append(u.entries, ents...) + case after <= u.offset: + u.logger.Infof("replace the unstable entries from index %d", after) + // The log is being truncated to before our current offset + // portion, so set the offset and replace the entries + u.offset = after + u.entries = ents + default: + // truncate to after and copy to u.entries + // then append + u.logger.Infof("truncate the unstable entries before index %d", after) + u.entries = append([]pb.Entry{}, u.slice(u.offset, after)...) + u.entries = append(u.entries, ents...) + } +} + +func (u *unstable) slice(lo uint64, hi uint64) []pb.Entry { + u.mustCheckOutOfBounds(lo, hi) + return u.entries[lo-u.offset : hi-u.offset] +} + +// u.offset <= lo <= hi <= u.offset+len(u.entries) +func (u *unstable) mustCheckOutOfBounds(lo, hi uint64) { + if lo > hi { + u.logger.Panicf("invalid unstable.slice %d > %d", lo, hi) + } + upper := u.offset + uint64(len(u.entries)) + if lo < u.offset || hi > upper { + u.logger.Panicf("unstable.slice[%d,%d) out of bound [%d,%d]", lo, hi, u.offset, upper) + } +} diff --git a/vendor/github.com/coreos/etcd/raft/logger.go b/vendor/github.com/coreos/etcd/raft/logger.go new file mode 100644 index 00000000..92e55b37 --- /dev/null +++ b/vendor/github.com/coreos/etcd/raft/logger.go @@ -0,0 +1,126 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import ( + "fmt" + "io/ioutil" + "log" + "os" +) + +type Logger interface { + Debug(v ...interface{}) + Debugf(format string, v ...interface{}) + + Error(v ...interface{}) + Errorf(format string, v ...interface{}) + + Info(v ...interface{}) + Infof(format string, v ...interface{}) + + Warning(v ...interface{}) + Warningf(format string, v ...interface{}) + + Fatal(v ...interface{}) + Fatalf(format string, v ...interface{}) + + Panic(v ...interface{}) + Panicf(format string, v ...interface{}) +} + +func SetLogger(l Logger) { raftLogger = l } + +var ( + defaultLogger = &DefaultLogger{Logger: log.New(os.Stderr, "raft", log.LstdFlags)} + discardLogger = &DefaultLogger{Logger: log.New(ioutil.Discard, "", 0)} + raftLogger = Logger(defaultLogger) +) + +const ( + calldepth = 2 +) + +// DefaultLogger is a default implementation of the Logger interface. +type DefaultLogger struct { + *log.Logger + debug bool +} + +func (l *DefaultLogger) EnableTimestamps() { + l.SetFlags(l.Flags() | log.Ldate | log.Ltime) +} + +func (l *DefaultLogger) EnableDebug() { + l.debug = true +} + +func (l *DefaultLogger) Debug(v ...interface{}) { + if l.debug { + l.Output(calldepth, header("DEBUG", fmt.Sprint(v...))) + } +} + +func (l *DefaultLogger) Debugf(format string, v ...interface{}) { + if l.debug { + l.Output(calldepth, header("DEBUG", fmt.Sprintf(format, v...))) + } +} + +func (l *DefaultLogger) Info(v ...interface{}) { + l.Output(calldepth, header("INFO", fmt.Sprint(v...))) +} + +func (l *DefaultLogger) Infof(format string, v ...interface{}) { + l.Output(calldepth, header("INFO", fmt.Sprintf(format, v...))) +} + +func (l *DefaultLogger) Error(v ...interface{}) { + l.Output(calldepth, header("ERROR", fmt.Sprint(v...))) +} + +func (l *DefaultLogger) Errorf(format string, v ...interface{}) { + l.Output(calldepth, header("ERROR", fmt.Sprintf(format, v...))) +} + +func (l *DefaultLogger) Warning(v ...interface{}) { + l.Output(calldepth, header("WARN", fmt.Sprint(v...))) +} + +func (l *DefaultLogger) Warningf(format string, v ...interface{}) { + l.Output(calldepth, header("WARN", fmt.Sprintf(format, v...))) +} + +func (l *DefaultLogger) Fatal(v ...interface{}) { + l.Output(calldepth, header("FATAL", fmt.Sprint(v...))) + os.Exit(1) +} + +func (l *DefaultLogger) Fatalf(format string, v ...interface{}) { + l.Output(calldepth, header("FATAL", fmt.Sprintf(format, v...))) + os.Exit(1) +} + +func (l *DefaultLogger) Panic(v ...interface{}) { + l.Logger.Panic(v) +} + +func (l *DefaultLogger) Panicf(format string, v ...interface{}) { + l.Logger.Panicf(format, v...) +} + +func header(lvl, msg string) string { + return fmt.Sprintf("%s: %s", lvl, msg) +} diff --git a/vendor/github.com/coreos/etcd/raft/node.go b/vendor/github.com/coreos/etcd/raft/node.go new file mode 100644 index 00000000..b24ba609 --- /dev/null +++ b/vendor/github.com/coreos/etcd/raft/node.go @@ -0,0 +1,582 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import ( + "context" + "errors" + + pb "github.com/coreos/etcd/raft/raftpb" +) + +type SnapshotStatus int + +const ( + SnapshotFinish SnapshotStatus = 1 + SnapshotFailure SnapshotStatus = 2 +) + +var ( + emptyState = pb.HardState{} + + // ErrStopped is returned by methods on Nodes that have been stopped. + ErrStopped = errors.New("raft: stopped") +) + +// SoftState provides state that is useful for logging and debugging. +// The state is volatile and does not need to be persisted to the WAL. +type SoftState struct { + Lead uint64 // must use atomic operations to access; keep 64-bit aligned. + RaftState StateType +} + +func (a *SoftState) equal(b *SoftState) bool { + return a.Lead == b.Lead && a.RaftState == b.RaftState +} + +// Ready encapsulates the entries and messages that are ready to read, +// be saved to stable storage, committed or sent to other peers. +// All fields in Ready are read-only. +type Ready struct { + // The current volatile state of a Node. + // SoftState will be nil if there is no update. + // It is not required to consume or store SoftState. + *SoftState + + // The current state of a Node to be saved to stable storage BEFORE + // Messages are sent. + // HardState will be equal to empty state if there is no update. + pb.HardState + + // ReadStates can be used for node to serve linearizable read requests locally + // when its applied index is greater than the index in ReadState. + // Note that the readState will be returned when raft receives msgReadIndex. + // The returned is only valid for the request that requested to read. + ReadStates []ReadState + + // Entries specifies entries to be saved to stable storage BEFORE + // Messages are sent. + Entries []pb.Entry + + // Snapshot specifies the snapshot to be saved to stable storage. + Snapshot pb.Snapshot + + // CommittedEntries specifies entries to be committed to a + // store/state-machine. These have previously been committed to stable + // store. + CommittedEntries []pb.Entry + + // Messages specifies outbound messages to be sent AFTER Entries are + // committed to stable storage. + // If it contains a MsgSnap message, the application MUST report back to raft + // when the snapshot has been received or has failed by calling ReportSnapshot. + Messages []pb.Message + + // MustSync indicates whether the HardState and Entries must be synchronously + // written to disk or if an asynchronous write is permissible. + MustSync bool +} + +func isHardStateEqual(a, b pb.HardState) bool { + return a.Term == b.Term && a.Vote == b.Vote && a.Commit == b.Commit +} + +// IsEmptyHardState returns true if the given HardState is empty. +func IsEmptyHardState(st pb.HardState) bool { + return isHardStateEqual(st, emptyState) +} + +// IsEmptySnap returns true if the given Snapshot is empty. +func IsEmptySnap(sp pb.Snapshot) bool { + return sp.Metadata.Index == 0 +} + +func (rd Ready) containsUpdates() bool { + return rd.SoftState != nil || !IsEmptyHardState(rd.HardState) || + !IsEmptySnap(rd.Snapshot) || len(rd.Entries) > 0 || + len(rd.CommittedEntries) > 0 || len(rd.Messages) > 0 || len(rd.ReadStates) != 0 +} + +// Node represents a node in a raft cluster. +type Node interface { + // Tick increments the internal logical clock for the Node by a single tick. Election + // timeouts and heartbeat timeouts are in units of ticks. + Tick() + // Campaign causes the Node to transition to candidate state and start campaigning to become leader. + Campaign(ctx context.Context) error + // Propose proposes that data be appended to the log. + Propose(ctx context.Context, data []byte) error + // ProposeConfChange proposes config change. + // At most one ConfChange can be in the process of going through consensus. + // Application needs to call ApplyConfChange when applying EntryConfChange type entry. + ProposeConfChange(ctx context.Context, cc pb.ConfChange) error + // Step advances the state machine using the given message. ctx.Err() will be returned, if any. + Step(ctx context.Context, msg pb.Message) error + + // Ready returns a channel that returns the current point-in-time state. + // Users of the Node must call Advance after retrieving the state returned by Ready. + // + // NOTE: No committed entries from the next Ready may be applied until all committed entries + // and snapshots from the previous one have finished. + Ready() <-chan Ready + + // Advance notifies the Node that the application has saved progress up to the last Ready. + // It prepares the node to return the next available Ready. + // + // The application should generally call Advance after it applies the entries in last Ready. + // + // However, as an optimization, the application may call Advance while it is applying the + // commands. For example. when the last Ready contains a snapshot, the application might take + // a long time to apply the snapshot data. To continue receiving Ready without blocking raft + // progress, it can call Advance before finishing applying the last ready. + Advance() + // ApplyConfChange applies config change to the local node. + // Returns an opaque ConfState protobuf which must be recorded + // in snapshots. Will never return nil; it returns a pointer only + // to match MemoryStorage.Compact. + ApplyConfChange(cc pb.ConfChange) *pb.ConfState + + // TransferLeadership attempts to transfer leadership to the given transferee. + TransferLeadership(ctx context.Context, lead, transferee uint64) + + // ReadIndex request a read state. The read state will be set in the ready. + // Read state has a read index. Once the application advances further than the read + // index, any linearizable read requests issued before the read request can be + // processed safely. The read state will have the same rctx attached. + ReadIndex(ctx context.Context, rctx []byte) error + + // Status returns the current status of the raft state machine. + Status() Status + // ReportUnreachable reports the given node is not reachable for the last send. + ReportUnreachable(id uint64) + // ReportSnapshot reports the status of the sent snapshot. + ReportSnapshot(id uint64, status SnapshotStatus) + // Stop performs any necessary termination of the Node. + Stop() +} + +type Peer struct { + ID uint64 + Context []byte +} + +// StartNode returns a new Node given configuration and a list of raft peers. +// It appends a ConfChangeAddNode entry for each given peer to the initial log. +func StartNode(c *Config, peers []Peer) Node { + r := newRaft(c) + // become the follower at term 1 and apply initial configuration + // entries of term 1 + r.becomeFollower(1, None) + for _, peer := range peers { + cc := pb.ConfChange{Type: pb.ConfChangeAddNode, NodeID: peer.ID, Context: peer.Context} + d, err := cc.Marshal() + if err != nil { + panic("unexpected marshal error") + } + e := pb.Entry{Type: pb.EntryConfChange, Term: 1, Index: r.raftLog.lastIndex() + 1, Data: d} + r.raftLog.append(e) + } + // Mark these initial entries as committed. + // TODO(bdarnell): These entries are still unstable; do we need to preserve + // the invariant that committed < unstable? + r.raftLog.committed = r.raftLog.lastIndex() + // Now apply them, mainly so that the application can call Campaign + // immediately after StartNode in tests. Note that these nodes will + // be added to raft twice: here and when the application's Ready + // loop calls ApplyConfChange. The calls to addNode must come after + // all calls to raftLog.append so progress.next is set after these + // bootstrapping entries (it is an error if we try to append these + // entries since they have already been committed). + // We do not set raftLog.applied so the application will be able + // to observe all conf changes via Ready.CommittedEntries. + for _, peer := range peers { + r.addNode(peer.ID) + } + + n := newNode() + n.logger = c.Logger + go n.run(r) + return &n +} + +// RestartNode is similar to StartNode but does not take a list of peers. +// The current membership of the cluster will be restored from the Storage. +// If the caller has an existing state machine, pass in the last log index that +// has been applied to it; otherwise use zero. +func RestartNode(c *Config) Node { + r := newRaft(c) + + n := newNode() + n.logger = c.Logger + go n.run(r) + return &n +} + +type msgWithResult struct { + m pb.Message + result chan error +} + +// node is the canonical implementation of the Node interface +type node struct { + propc chan msgWithResult + recvc chan pb.Message + confc chan pb.ConfChange + confstatec chan pb.ConfState + readyc chan Ready + advancec chan struct{} + tickc chan struct{} + done chan struct{} + stop chan struct{} + status chan chan Status + + logger Logger +} + +func newNode() node { + return node{ + propc: make(chan msgWithResult), + recvc: make(chan pb.Message), + confc: make(chan pb.ConfChange), + confstatec: make(chan pb.ConfState), + readyc: make(chan Ready), + advancec: make(chan struct{}), + // make tickc a buffered chan, so raft node can buffer some ticks when the node + // is busy processing raft messages. Raft node will resume process buffered + // ticks when it becomes idle. + tickc: make(chan struct{}, 128), + done: make(chan struct{}), + stop: make(chan struct{}), + status: make(chan chan Status), + } +} + +func (n *node) Stop() { + select { + case n.stop <- struct{}{}: + // Not already stopped, so trigger it + case <-n.done: + // Node has already been stopped - no need to do anything + return + } + // Block until the stop has been acknowledged by run() + <-n.done +} + +func (n *node) run(r *raft) { + var propc chan msgWithResult + var readyc chan Ready + var advancec chan struct{} + var prevLastUnstablei, prevLastUnstablet uint64 + var havePrevLastUnstablei bool + var prevSnapi uint64 + var rd Ready + + lead := None + prevSoftSt := r.softState() + prevHardSt := emptyState + + for { + if advancec != nil { + readyc = nil + } else { + rd = newReady(r, prevSoftSt, prevHardSt) + if rd.containsUpdates() { + readyc = n.readyc + } else { + readyc = nil + } + } + + if lead != r.lead { + if r.hasLeader() { + if lead == None { + r.logger.Infof("raft.node: %x elected leader %x at term %d", r.id, r.lead, r.Term) + } else { + r.logger.Infof("raft.node: %x changed leader from %x to %x at term %d", r.id, lead, r.lead, r.Term) + } + propc = n.propc + } else { + r.logger.Infof("raft.node: %x lost leader %x at term %d", r.id, lead, r.Term) + propc = nil + } + lead = r.lead + } + + select { + // TODO: maybe buffer the config propose if there exists one (the way + // described in raft dissertation) + // Currently it is dropped in Step silently. + case pm := <-propc: + m := pm.m + m.From = r.id + err := r.Step(m) + if pm.result != nil { + pm.result <- err + close(pm.result) + } + case m := <-n.recvc: + // filter out response message from unknown From. + if pr := r.getProgress(m.From); pr != nil || !IsResponseMsg(m.Type) { + r.Step(m) + } + case cc := <-n.confc: + if cc.NodeID == None { + select { + case n.confstatec <- pb.ConfState{ + Nodes: r.nodes(), + Learners: r.learnerNodes()}: + case <-n.done: + } + break + } + switch cc.Type { + case pb.ConfChangeAddNode: + r.addNode(cc.NodeID) + case pb.ConfChangeAddLearnerNode: + r.addLearner(cc.NodeID) + case pb.ConfChangeRemoveNode: + // block incoming proposal when local node is + // removed + if cc.NodeID == r.id { + propc = nil + } + r.removeNode(cc.NodeID) + case pb.ConfChangeUpdateNode: + default: + panic("unexpected conf type") + } + select { + case n.confstatec <- pb.ConfState{ + Nodes: r.nodes(), + Learners: r.learnerNodes()}: + case <-n.done: + } + case <-n.tickc: + r.tick() + case readyc <- rd: + if rd.SoftState != nil { + prevSoftSt = rd.SoftState + } + if len(rd.Entries) > 0 { + prevLastUnstablei = rd.Entries[len(rd.Entries)-1].Index + prevLastUnstablet = rd.Entries[len(rd.Entries)-1].Term + havePrevLastUnstablei = true + } + if !IsEmptyHardState(rd.HardState) { + prevHardSt = rd.HardState + } + if !IsEmptySnap(rd.Snapshot) { + prevSnapi = rd.Snapshot.Metadata.Index + } + + r.msgs = nil + r.readStates = nil + advancec = n.advancec + case <-advancec: + if prevHardSt.Commit != 0 { + r.raftLog.appliedTo(prevHardSt.Commit) + } + if havePrevLastUnstablei { + r.raftLog.stableTo(prevLastUnstablei, prevLastUnstablet) + havePrevLastUnstablei = false + } + r.raftLog.stableSnapTo(prevSnapi) + advancec = nil + case c := <-n.status: + c <- getStatus(r) + case <-n.stop: + close(n.done) + return + } + } +} + +// Tick increments the internal logical clock for this Node. Election timeouts +// and heartbeat timeouts are in units of ticks. +func (n *node) Tick() { + select { + case n.tickc <- struct{}{}: + case <-n.done: + default: + n.logger.Warningf("A tick missed to fire. Node blocks too long!") + } +} + +func (n *node) Campaign(ctx context.Context) error { return n.step(ctx, pb.Message{Type: pb.MsgHup}) } + +func (n *node) Propose(ctx context.Context, data []byte) error { + return n.stepWait(ctx, pb.Message{Type: pb.MsgProp, Entries: []pb.Entry{{Data: data}}}) +} + +func (n *node) Step(ctx context.Context, m pb.Message) error { + // ignore unexpected local messages receiving over network + if IsLocalMsg(m.Type) { + // TODO: return an error? + return nil + } + return n.step(ctx, m) +} + +func (n *node) ProposeConfChange(ctx context.Context, cc pb.ConfChange) error { + data, err := cc.Marshal() + if err != nil { + return err + } + return n.Step(ctx, pb.Message{Type: pb.MsgProp, Entries: []pb.Entry{{Type: pb.EntryConfChange, Data: data}}}) +} + +func (n *node) step(ctx context.Context, m pb.Message) error { + return n.stepWithWaitOption(ctx, m, false) +} + +func (n *node) stepWait(ctx context.Context, m pb.Message) error { + return n.stepWithWaitOption(ctx, m, true) +} + +// Step advances the state machine using msgs. The ctx.Err() will be returned, +// if any. +func (n *node) stepWithWaitOption(ctx context.Context, m pb.Message, wait bool) error { + if m.Type != pb.MsgProp { + select { + case n.recvc <- m: + return nil + case <-ctx.Done(): + return ctx.Err() + case <-n.done: + return ErrStopped + } + } + ch := n.propc + pm := msgWithResult{m: m} + if wait { + pm.result = make(chan error, 1) + } + select { + case ch <- pm: + if !wait { + return nil + } + case <-ctx.Done(): + return ctx.Err() + case <-n.done: + return ErrStopped + } + select { + case rsp := <-pm.result: + if rsp != nil { + return rsp + } + case <-ctx.Done(): + return ctx.Err() + case <-n.done: + return ErrStopped + } + return nil +} + +func (n *node) Ready() <-chan Ready { return n.readyc } + +func (n *node) Advance() { + select { + case n.advancec <- struct{}{}: + case <-n.done: + } +} + +func (n *node) ApplyConfChange(cc pb.ConfChange) *pb.ConfState { + var cs pb.ConfState + select { + case n.confc <- cc: + case <-n.done: + } + select { + case cs = <-n.confstatec: + case <-n.done: + } + return &cs +} + +func (n *node) Status() Status { + c := make(chan Status) + select { + case n.status <- c: + return <-c + case <-n.done: + return Status{} + } +} + +func (n *node) ReportUnreachable(id uint64) { + select { + case n.recvc <- pb.Message{Type: pb.MsgUnreachable, From: id}: + case <-n.done: + } +} + +func (n *node) ReportSnapshot(id uint64, status SnapshotStatus) { + rej := status == SnapshotFailure + + select { + case n.recvc <- pb.Message{Type: pb.MsgSnapStatus, From: id, Reject: rej}: + case <-n.done: + } +} + +func (n *node) TransferLeadership(ctx context.Context, lead, transferee uint64) { + select { + // manually set 'from' and 'to', so that leader can voluntarily transfers its leadership + case n.recvc <- pb.Message{Type: pb.MsgTransferLeader, From: transferee, To: lead}: + case <-n.done: + case <-ctx.Done(): + } +} + +func (n *node) ReadIndex(ctx context.Context, rctx []byte) error { + return n.step(ctx, pb.Message{Type: pb.MsgReadIndex, Entries: []pb.Entry{{Data: rctx}}}) +} + +func newReady(r *raft, prevSoftSt *SoftState, prevHardSt pb.HardState) Ready { + rd := Ready{ + Entries: r.raftLog.unstableEntries(), + CommittedEntries: r.raftLog.nextEnts(), + Messages: r.msgs, + } + if softSt := r.softState(); !softSt.equal(prevSoftSt) { + rd.SoftState = softSt + } + if hardSt := r.hardState(); !isHardStateEqual(hardSt, prevHardSt) { + rd.HardState = hardSt + } + if r.raftLog.unstable.snapshot != nil { + rd.Snapshot = *r.raftLog.unstable.snapshot + } + if len(r.readStates) != 0 { + rd.ReadStates = r.readStates + } + rd.MustSync = MustSync(rd.HardState, prevHardSt, len(rd.Entries)) + return rd +} + +// MustSync returns true if the hard state and count of Raft entries indicate +// that a synchronous write to persistent storage is required. +func MustSync(st, prevst pb.HardState, entsnum int) bool { + // Persistent state on all servers: + // (Updated on stable storage before responding to RPCs) + // currentTerm + // votedFor + // log entries[] + return entsnum != 0 || st.Vote != prevst.Vote || st.Term != prevst.Term +} diff --git a/vendor/github.com/coreos/etcd/raft/progress.go b/vendor/github.com/coreos/etcd/raft/progress.go new file mode 100644 index 00000000..ef3787db --- /dev/null +++ b/vendor/github.com/coreos/etcd/raft/progress.go @@ -0,0 +1,284 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import "fmt" + +const ( + ProgressStateProbe ProgressStateType = iota + ProgressStateReplicate + ProgressStateSnapshot +) + +type ProgressStateType uint64 + +var prstmap = [...]string{ + "ProgressStateProbe", + "ProgressStateReplicate", + "ProgressStateSnapshot", +} + +func (st ProgressStateType) String() string { return prstmap[uint64(st)] } + +// Progress represents a follower’s progress in the view of the leader. Leader maintains +// progresses of all followers, and sends entries to the follower based on its progress. +type Progress struct { + Match, Next uint64 + // State defines how the leader should interact with the follower. + // + // When in ProgressStateProbe, leader sends at most one replication message + // per heartbeat interval. It also probes actual progress of the follower. + // + // When in ProgressStateReplicate, leader optimistically increases next + // to the latest entry sent after sending replication message. This is + // an optimized state for fast replicating log entries to the follower. + // + // When in ProgressStateSnapshot, leader should have sent out snapshot + // before and stops sending any replication message. + State ProgressStateType + + // Paused is used in ProgressStateProbe. + // When Paused is true, raft should pause sending replication message to this peer. + Paused bool + // PendingSnapshot is used in ProgressStateSnapshot. + // If there is a pending snapshot, the pendingSnapshot will be set to the + // index of the snapshot. If pendingSnapshot is set, the replication process of + // this Progress will be paused. raft will not resend snapshot until the pending one + // is reported to be failed. + PendingSnapshot uint64 + + // RecentActive is true if the progress is recently active. Receiving any messages + // from the corresponding follower indicates the progress is active. + // RecentActive can be reset to false after an election timeout. + RecentActive bool + + // inflights is a sliding window for the inflight messages. + // Each inflight message contains one or more log entries. + // The max number of entries per message is defined in raft config as MaxSizePerMsg. + // Thus inflight effectively limits both the number of inflight messages + // and the bandwidth each Progress can use. + // When inflights is full, no more message should be sent. + // When a leader sends out a message, the index of the last + // entry should be added to inflights. The index MUST be added + // into inflights in order. + // When a leader receives a reply, the previous inflights should + // be freed by calling inflights.freeTo with the index of the last + // received entry. + ins *inflights + + // IsLearner is true if this progress is tracked for a learner. + IsLearner bool +} + +func (pr *Progress) resetState(state ProgressStateType) { + pr.Paused = false + pr.PendingSnapshot = 0 + pr.State = state + pr.ins.reset() +} + +func (pr *Progress) becomeProbe() { + // If the original state is ProgressStateSnapshot, progress knows that + // the pending snapshot has been sent to this peer successfully, then + // probes from pendingSnapshot + 1. + if pr.State == ProgressStateSnapshot { + pendingSnapshot := pr.PendingSnapshot + pr.resetState(ProgressStateProbe) + pr.Next = max(pr.Match+1, pendingSnapshot+1) + } else { + pr.resetState(ProgressStateProbe) + pr.Next = pr.Match + 1 + } +} + +func (pr *Progress) becomeReplicate() { + pr.resetState(ProgressStateReplicate) + pr.Next = pr.Match + 1 +} + +func (pr *Progress) becomeSnapshot(snapshoti uint64) { + pr.resetState(ProgressStateSnapshot) + pr.PendingSnapshot = snapshoti +} + +// maybeUpdate returns false if the given n index comes from an outdated message. +// Otherwise it updates the progress and returns true. +func (pr *Progress) maybeUpdate(n uint64) bool { + var updated bool + if pr.Match < n { + pr.Match = n + updated = true + pr.resume() + } + if pr.Next < n+1 { + pr.Next = n + 1 + } + return updated +} + +func (pr *Progress) optimisticUpdate(n uint64) { pr.Next = n + 1 } + +// maybeDecrTo returns false if the given to index comes from an out of order message. +// Otherwise it decreases the progress next index to min(rejected, last) and returns true. +func (pr *Progress) maybeDecrTo(rejected, last uint64) bool { + if pr.State == ProgressStateReplicate { + // the rejection must be stale if the progress has matched and "rejected" + // is smaller than "match". + if rejected <= pr.Match { + return false + } + // directly decrease next to match + 1 + pr.Next = pr.Match + 1 + return true + } + + // the rejection must be stale if "rejected" does not match next - 1 + if pr.Next-1 != rejected { + return false + } + + if pr.Next = min(rejected, last+1); pr.Next < 1 { + pr.Next = 1 + } + pr.resume() + return true +} + +func (pr *Progress) pause() { pr.Paused = true } +func (pr *Progress) resume() { pr.Paused = false } + +// IsPaused returns whether sending log entries to this node has been +// paused. A node may be paused because it has rejected recent +// MsgApps, is currently waiting for a snapshot, or has reached the +// MaxInflightMsgs limit. +func (pr *Progress) IsPaused() bool { + switch pr.State { + case ProgressStateProbe: + return pr.Paused + case ProgressStateReplicate: + return pr.ins.full() + case ProgressStateSnapshot: + return true + default: + panic("unexpected state") + } +} + +func (pr *Progress) snapshotFailure() { pr.PendingSnapshot = 0 } + +// needSnapshotAbort returns true if snapshot progress's Match +// is equal or higher than the pendingSnapshot. +func (pr *Progress) needSnapshotAbort() bool { + return pr.State == ProgressStateSnapshot && pr.Match >= pr.PendingSnapshot +} + +func (pr *Progress) String() string { + return fmt.Sprintf("next = %d, match = %d, state = %s, waiting = %v, pendingSnapshot = %d", pr.Next, pr.Match, pr.State, pr.IsPaused(), pr.PendingSnapshot) +} + +type inflights struct { + // the starting index in the buffer + start int + // number of inflights in the buffer + count int + + // the size of the buffer + size int + + // buffer contains the index of the last entry + // inside one message. + buffer []uint64 +} + +func newInflights(size int) *inflights { + return &inflights{ + size: size, + } +} + +// add adds an inflight into inflights +func (in *inflights) add(inflight uint64) { + if in.full() { + panic("cannot add into a full inflights") + } + next := in.start + in.count + size := in.size + if next >= size { + next -= size + } + if next >= len(in.buffer) { + in.growBuf() + } + in.buffer[next] = inflight + in.count++ +} + +// grow the inflight buffer by doubling up to inflights.size. We grow on demand +// instead of preallocating to inflights.size to handle systems which have +// thousands of Raft groups per process. +func (in *inflights) growBuf() { + newSize := len(in.buffer) * 2 + if newSize == 0 { + newSize = 1 + } else if newSize > in.size { + newSize = in.size + } + newBuffer := make([]uint64, newSize) + copy(newBuffer, in.buffer) + in.buffer = newBuffer +} + +// freeTo frees the inflights smaller or equal to the given `to` flight. +func (in *inflights) freeTo(to uint64) { + if in.count == 0 || to < in.buffer[in.start] { + // out of the left side of the window + return + } + + idx := in.start + var i int + for i = 0; i < in.count; i++ { + if to < in.buffer[idx] { // found the first large inflight + break + } + + // increase index and maybe rotate + size := in.size + if idx++; idx >= size { + idx -= size + } + } + // free i inflights and set new start index + in.count -= i + in.start = idx + if in.count == 0 { + // inflights is empty, reset the start index so that we don't grow the + // buffer unnecessarily. + in.start = 0 + } +} + +func (in *inflights) freeFirstOne() { in.freeTo(in.buffer[in.start]) } + +// full returns true if the inflights is full. +func (in *inflights) full() bool { + return in.count == in.size +} + +// resets frees all inflights. +func (in *inflights) reset() { + in.count = 0 + in.start = 0 +} diff --git a/vendor/github.com/coreos/etcd/raft/raft.go b/vendor/github.com/coreos/etcd/raft/raft.go new file mode 100644 index 00000000..a07b39cc --- /dev/null +++ b/vendor/github.com/coreos/etcd/raft/raft.go @@ -0,0 +1,1450 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import ( + "bytes" + "errors" + "fmt" + "math" + "math/rand" + "sort" + "strings" + "sync" + "time" + + pb "github.com/coreos/etcd/raft/raftpb" +) + +// None is a placeholder node ID used when there is no leader. +const None uint64 = 0 +const noLimit = math.MaxUint64 + +// Possible values for StateType. +const ( + StateFollower StateType = iota + StateCandidate + StateLeader + StatePreCandidate + numStates +) + +type ReadOnlyOption int + +const ( + // ReadOnlySafe guarantees the linearizability of the read only request by + // communicating with the quorum. It is the default and suggested option. + ReadOnlySafe ReadOnlyOption = iota + // ReadOnlyLeaseBased ensures linearizability of the read only request by + // relying on the leader lease. It can be affected by clock drift. + // If the clock drift is unbounded, leader might keep the lease longer than it + // should (clock can move backward/pause without any bound). ReadIndex is not safe + // in that case. + ReadOnlyLeaseBased +) + +// Possible values for CampaignType +const ( + // campaignPreElection represents the first phase of a normal election when + // Config.PreVote is true. + campaignPreElection CampaignType = "CampaignPreElection" + // campaignElection represents a normal (time-based) election (the second phase + // of the election when Config.PreVote is true). + campaignElection CampaignType = "CampaignElection" + // campaignTransfer represents the type of leader transfer + campaignTransfer CampaignType = "CampaignTransfer" +) + +// ErrProposalDropped is returned when the proposal is ignored by some cases, +// so that the proposer can be notified and fail fast. +var ErrProposalDropped = errors.New("raft proposal dropped") + +// lockedRand is a small wrapper around rand.Rand to provide +// synchronization. Only the methods needed by the code are exposed +// (e.g. Intn). +type lockedRand struct { + mu sync.Mutex + rand *rand.Rand +} + +func (r *lockedRand) Intn(n int) int { + r.mu.Lock() + v := r.rand.Intn(n) + r.mu.Unlock() + return v +} + +var globalRand = &lockedRand{ + rand: rand.New(rand.NewSource(time.Now().UnixNano())), +} + +// CampaignType represents the type of campaigning +// the reason we use the type of string instead of uint64 +// is because it's simpler to compare and fill in raft entries +type CampaignType string + +// StateType represents the role of a node in a cluster. +type StateType uint64 + +var stmap = [...]string{ + "StateFollower", + "StateCandidate", + "StateLeader", + "StatePreCandidate", +} + +func (st StateType) String() string { + return stmap[uint64(st)] +} + +// Config contains the parameters to start a raft. +type Config struct { + // ID is the identity of the local raft. ID cannot be 0. + ID uint64 + + // peers contains the IDs of all nodes (including self) in the raft cluster. It + // should only be set when starting a new raft cluster. Restarting raft from + // previous configuration will panic if peers is set. peer is private and only + // used for testing right now. + peers []uint64 + + // learners contains the IDs of all learner nodes (including self if the + // local node is a learner) in the raft cluster. learners only receives + // entries from the leader node. It does not vote or promote itself. + learners []uint64 + + // ElectionTick is the number of Node.Tick invocations that must pass between + // elections. That is, if a follower does not receive any message from the + // leader of current term before ElectionTick has elapsed, it will become + // candidate and start an election. ElectionTick must be greater than + // HeartbeatTick. We suggest ElectionTick = 10 * HeartbeatTick to avoid + // unnecessary leader switching. + ElectionTick int + // HeartbeatTick is the number of Node.Tick invocations that must pass between + // heartbeats. That is, a leader sends heartbeat messages to maintain its + // leadership every HeartbeatTick ticks. + HeartbeatTick int + + // Storage is the storage for raft. raft generates entries and states to be + // stored in storage. raft reads the persisted entries and states out of + // Storage when it needs. raft reads out the previous state and configuration + // out of storage when restarting. + Storage Storage + // Applied is the last applied index. It should only be set when restarting + // raft. raft will not return entries to the application smaller or equal to + // Applied. If Applied is unset when restarting, raft might return previous + // applied entries. This is a very application dependent configuration. + Applied uint64 + + // MaxSizePerMsg limits the max size of each append message. Smaller value + // lowers the raft recovery cost(initial probing and message lost during normal + // operation). On the other side, it might affect the throughput during normal + // replication. Note: math.MaxUint64 for unlimited, 0 for at most one entry per + // message. + MaxSizePerMsg uint64 + // MaxInflightMsgs limits the max number of in-flight append messages during + // optimistic replication phase. The application transportation layer usually + // has its own sending buffer over TCP/UDP. Setting MaxInflightMsgs to avoid + // overflowing that sending buffer. TODO (xiangli): feedback to application to + // limit the proposal rate? + MaxInflightMsgs int + + // CheckQuorum specifies if the leader should check quorum activity. Leader + // steps down when quorum is not active for an electionTimeout. + CheckQuorum bool + + // PreVote enables the Pre-Vote algorithm described in raft thesis section + // 9.6. This prevents disruption when a node that has been partitioned away + // rejoins the cluster. + PreVote bool + + // ReadOnlyOption specifies how the read only request is processed. + // + // ReadOnlySafe guarantees the linearizability of the read only request by + // communicating with the quorum. It is the default and suggested option. + // + // ReadOnlyLeaseBased ensures linearizability of the read only request by + // relying on the leader lease. It can be affected by clock drift. + // If the clock drift is unbounded, leader might keep the lease longer than it + // should (clock can move backward/pause without any bound). ReadIndex is not safe + // in that case. + // CheckQuorum MUST be enabled if ReadOnlyOption is ReadOnlyLeaseBased. + ReadOnlyOption ReadOnlyOption + + // Logger is the logger used for raft log. For multinode which can host + // multiple raft group, each raft group can have its own logger + Logger Logger + + // DisableProposalForwarding set to true means that followers will drop + // proposals, rather than forwarding them to the leader. One use case for + // this feature would be in a situation where the Raft leader is used to + // compute the data of a proposal, for example, adding a timestamp from a + // hybrid logical clock to data in a monotonically increasing way. Forwarding + // should be disabled to prevent a follower with an inaccurate hybrid + // logical clock from assigning the timestamp and then forwarding the data + // to the leader. + DisableProposalForwarding bool +} + +func (c *Config) validate() error { + if c.ID == None { + return errors.New("cannot use none as id") + } + + if c.HeartbeatTick <= 0 { + return errors.New("heartbeat tick must be greater than 0") + } + + if c.ElectionTick <= c.HeartbeatTick { + return errors.New("election tick must be greater than heartbeat tick") + } + + if c.Storage == nil { + return errors.New("storage cannot be nil") + } + + if c.MaxInflightMsgs <= 0 { + return errors.New("max inflight messages must be greater than 0") + } + + if c.Logger == nil { + c.Logger = raftLogger + } + + if c.ReadOnlyOption == ReadOnlyLeaseBased && !c.CheckQuorum { + return errors.New("CheckQuorum must be enabled when ReadOnlyOption is ReadOnlyLeaseBased") + } + + return nil +} + +type raft struct { + id uint64 + + Term uint64 + Vote uint64 + + readStates []ReadState + + // the log + raftLog *raftLog + + maxInflight int + maxMsgSize uint64 + prs map[uint64]*Progress + learnerPrs map[uint64]*Progress + + state StateType + + // isLearner is true if the local raft node is a learner. + isLearner bool + + votes map[uint64]bool + + msgs []pb.Message + + // the leader id + lead uint64 + // leadTransferee is id of the leader transfer target when its value is not zero. + // Follow the procedure defined in raft thesis 3.10. + leadTransferee uint64 + // Only one conf change may be pending (in the log, but not yet + // applied) at a time. This is enforced via pendingConfIndex, which + // is set to a value >= the log index of the latest pending + // configuration change (if any). Config changes are only allowed to + // be proposed if the leader's applied index is greater than this + // value. + pendingConfIndex uint64 + + readOnly *readOnly + + // number of ticks since it reached last electionTimeout when it is leader + // or candidate. + // number of ticks since it reached last electionTimeout or received a + // valid message from current leader when it is a follower. + electionElapsed int + + // number of ticks since it reached last heartbeatTimeout. + // only leader keeps heartbeatElapsed. + heartbeatElapsed int + + checkQuorum bool + preVote bool + + heartbeatTimeout int + electionTimeout int + // randomizedElectionTimeout is a random number between + // [electiontimeout, 2 * electiontimeout - 1]. It gets reset + // when raft changes its state to follower or candidate. + randomizedElectionTimeout int + disableProposalForwarding bool + + tick func() + step stepFunc + + logger Logger +} + +func newRaft(c *Config) *raft { + if err := c.validate(); err != nil { + panic(err.Error()) + } + raftlog := newLog(c.Storage, c.Logger) + hs, cs, err := c.Storage.InitialState() + if err != nil { + panic(err) // TODO(bdarnell) + } + peers := c.peers + learners := c.learners + if len(cs.Nodes) > 0 || len(cs.Learners) > 0 { + if len(peers) > 0 || len(learners) > 0 { + // TODO(bdarnell): the peers argument is always nil except in + // tests; the argument should be removed and these tests should be + // updated to specify their nodes through a snapshot. + panic("cannot specify both newRaft(peers, learners) and ConfState.(Nodes, Learners)") + } + peers = cs.Nodes + learners = cs.Learners + } + r := &raft{ + id: c.ID, + lead: None, + isLearner: false, + raftLog: raftlog, + maxMsgSize: c.MaxSizePerMsg, + maxInflight: c.MaxInflightMsgs, + prs: make(map[uint64]*Progress), + learnerPrs: make(map[uint64]*Progress), + electionTimeout: c.ElectionTick, + heartbeatTimeout: c.HeartbeatTick, + logger: c.Logger, + checkQuorum: c.CheckQuorum, + preVote: c.PreVote, + readOnly: newReadOnly(c.ReadOnlyOption), + disableProposalForwarding: c.DisableProposalForwarding, + } + for _, p := range peers { + r.prs[p] = &Progress{Next: 1, ins: newInflights(r.maxInflight)} + } + for _, p := range learners { + if _, ok := r.prs[p]; ok { + panic(fmt.Sprintf("node %x is in both learner and peer list", p)) + } + r.learnerPrs[p] = &Progress{Next: 1, ins: newInflights(r.maxInflight), IsLearner: true} + if r.id == p { + r.isLearner = true + } + } + + if !isHardStateEqual(hs, emptyState) { + r.loadState(hs) + } + if c.Applied > 0 { + raftlog.appliedTo(c.Applied) + } + r.becomeFollower(r.Term, None) + + var nodesStrs []string + for _, n := range r.nodes() { + nodesStrs = append(nodesStrs, fmt.Sprintf("%x", n)) + } + + r.logger.Infof("newRaft %x [peers: [%s], term: %d, commit: %d, applied: %d, lastindex: %d, lastterm: %d]", + r.id, strings.Join(nodesStrs, ","), r.Term, r.raftLog.committed, r.raftLog.applied, r.raftLog.lastIndex(), r.raftLog.lastTerm()) + return r +} + +func (r *raft) hasLeader() bool { return r.lead != None } + +func (r *raft) softState() *SoftState { return &SoftState{Lead: r.lead, RaftState: r.state} } + +func (r *raft) hardState() pb.HardState { + return pb.HardState{ + Term: r.Term, + Vote: r.Vote, + Commit: r.raftLog.committed, + } +} + +func (r *raft) quorum() int { return len(r.prs)/2 + 1 } + +func (r *raft) nodes() []uint64 { + nodes := make([]uint64, 0, len(r.prs)) + for id := range r.prs { + nodes = append(nodes, id) + } + sort.Sort(uint64Slice(nodes)) + return nodes +} + +func (r *raft) learnerNodes() []uint64 { + nodes := make([]uint64, 0, len(r.learnerPrs)) + for id := range r.learnerPrs { + nodes = append(nodes, id) + } + sort.Sort(uint64Slice(nodes)) + return nodes +} + +// send persists state to stable storage and then sends to its mailbox. +func (r *raft) send(m pb.Message) { + m.From = r.id + if m.Type == pb.MsgVote || m.Type == pb.MsgVoteResp || m.Type == pb.MsgPreVote || m.Type == pb.MsgPreVoteResp { + if m.Term == 0 { + // All {pre-,}campaign messages need to have the term set when + // sending. + // - MsgVote: m.Term is the term the node is campaigning for, + // non-zero as we increment the term when campaigning. + // - MsgVoteResp: m.Term is the new r.Term if the MsgVote was + // granted, non-zero for the same reason MsgVote is + // - MsgPreVote: m.Term is the term the node will campaign, + // non-zero as we use m.Term to indicate the next term we'll be + // campaigning for + // - MsgPreVoteResp: m.Term is the term received in the original + // MsgPreVote if the pre-vote was granted, non-zero for the + // same reasons MsgPreVote is + panic(fmt.Sprintf("term should be set when sending %s", m.Type)) + } + } else { + if m.Term != 0 { + panic(fmt.Sprintf("term should not be set when sending %s (was %d)", m.Type, m.Term)) + } + // do not attach term to MsgProp, MsgReadIndex + // proposals are a way to forward to the leader and + // should be treated as local message. + // MsgReadIndex is also forwarded to leader. + if m.Type != pb.MsgProp && m.Type != pb.MsgReadIndex { + m.Term = r.Term + } + } + r.msgs = append(r.msgs, m) +} + +func (r *raft) getProgress(id uint64) *Progress { + if pr, ok := r.prs[id]; ok { + return pr + } + + return r.learnerPrs[id] +} + +// sendAppend sends RPC, with entries to the given peer. +func (r *raft) sendAppend(to uint64) { + pr := r.getProgress(to) + if pr.IsPaused() { + return + } + m := pb.Message{} + m.To = to + + term, errt := r.raftLog.term(pr.Next - 1) + ents, erre := r.raftLog.entries(pr.Next, r.maxMsgSize) + + if errt != nil || erre != nil { // send snapshot if we failed to get term or entries + if !pr.RecentActive { + r.logger.Debugf("ignore sending snapshot to %x since it is not recently active", to) + return + } + + m.Type = pb.MsgSnap + snapshot, err := r.raftLog.snapshot() + if err != nil { + if err == ErrSnapshotTemporarilyUnavailable { + r.logger.Debugf("%x failed to send snapshot to %x because snapshot is temporarily unavailable", r.id, to) + return + } + panic(err) // TODO(bdarnell) + } + if IsEmptySnap(snapshot) { + panic("need non-empty snapshot") + } + m.Snapshot = snapshot + sindex, sterm := snapshot.Metadata.Index, snapshot.Metadata.Term + r.logger.Debugf("%x [firstindex: %d, commit: %d] sent snapshot[index: %d, term: %d] to %x [%s]", + r.id, r.raftLog.firstIndex(), r.raftLog.committed, sindex, sterm, to, pr) + pr.becomeSnapshot(sindex) + r.logger.Debugf("%x paused sending replication messages to %x [%s]", r.id, to, pr) + } else { + m.Type = pb.MsgApp + m.Index = pr.Next - 1 + m.LogTerm = term + m.Entries = ents + m.Commit = r.raftLog.committed + if n := len(m.Entries); n != 0 { + switch pr.State { + // optimistically increase the next when in ProgressStateReplicate + case ProgressStateReplicate: + last := m.Entries[n-1].Index + pr.optimisticUpdate(last) + pr.ins.add(last) + case ProgressStateProbe: + pr.pause() + default: + r.logger.Panicf("%x is sending append in unhandled state %s", r.id, pr.State) + } + } + } + r.send(m) +} + +// sendHeartbeat sends an empty MsgApp +func (r *raft) sendHeartbeat(to uint64, ctx []byte) { + // Attach the commit as min(to.matched, r.committed). + // When the leader sends out heartbeat message, + // the receiver(follower) might not be matched with the leader + // or it might not have all the committed entries. + // The leader MUST NOT forward the follower's commit to + // an unmatched index. + commit := min(r.getProgress(to).Match, r.raftLog.committed) + m := pb.Message{ + To: to, + Type: pb.MsgHeartbeat, + Commit: commit, + Context: ctx, + } + + r.send(m) +} + +func (r *raft) forEachProgress(f func(id uint64, pr *Progress)) { + for id, pr := range r.prs { + f(id, pr) + } + + for id, pr := range r.learnerPrs { + f(id, pr) + } +} + +// bcastAppend sends RPC, with entries to all peers that are not up-to-date +// according to the progress recorded in r.prs. +func (r *raft) bcastAppend() { + r.forEachProgress(func(id uint64, _ *Progress) { + if id == r.id { + return + } + + r.sendAppend(id) + }) +} + +// bcastHeartbeat sends RPC, without entries to all the peers. +func (r *raft) bcastHeartbeat() { + lastCtx := r.readOnly.lastPendingRequestCtx() + if len(lastCtx) == 0 { + r.bcastHeartbeatWithCtx(nil) + } else { + r.bcastHeartbeatWithCtx([]byte(lastCtx)) + } +} + +func (r *raft) bcastHeartbeatWithCtx(ctx []byte) { + r.forEachProgress(func(id uint64, _ *Progress) { + if id == r.id { + return + } + r.sendHeartbeat(id, ctx) + }) +} + +// maybeCommit attempts to advance the commit index. Returns true if +// the commit index changed (in which case the caller should call +// r.bcastAppend). +func (r *raft) maybeCommit() bool { + // TODO(bmizerany): optimize.. Currently naive + mis := make(uint64Slice, 0, len(r.prs)) + for _, p := range r.prs { + mis = append(mis, p.Match) + } + sort.Sort(sort.Reverse(mis)) + mci := mis[r.quorum()-1] + return r.raftLog.maybeCommit(mci, r.Term) +} + +func (r *raft) reset(term uint64) { + if r.Term != term { + r.Term = term + r.Vote = None + } + r.lead = None + + r.electionElapsed = 0 + r.heartbeatElapsed = 0 + r.resetRandomizedElectionTimeout() + + r.abortLeaderTransfer() + + r.votes = make(map[uint64]bool) + r.forEachProgress(func(id uint64, pr *Progress) { + *pr = Progress{Next: r.raftLog.lastIndex() + 1, ins: newInflights(r.maxInflight), IsLearner: pr.IsLearner} + if id == r.id { + pr.Match = r.raftLog.lastIndex() + } + }) + + r.pendingConfIndex = 0 + r.readOnly = newReadOnly(r.readOnly.option) +} + +func (r *raft) appendEntry(es ...pb.Entry) { + li := r.raftLog.lastIndex() + for i := range es { + es[i].Term = r.Term + es[i].Index = li + 1 + uint64(i) + } + // use latest "last" index after truncate/append + li = r.raftLog.append(es...) + r.getProgress(r.id).maybeUpdate(li) + // Regardless of maybeCommit's return, our caller will call bcastAppend. + r.maybeCommit() +} + +// tickElection is run by followers and candidates after r.electionTimeout. +func (r *raft) tickElection() { + r.electionElapsed++ + + if r.promotable() && r.pastElectionTimeout() { + r.electionElapsed = 0 + r.Step(pb.Message{From: r.id, Type: pb.MsgHup}) + } +} + +// tickHeartbeat is run by leaders to send a MsgBeat after r.heartbeatTimeout. +func (r *raft) tickHeartbeat() { + r.heartbeatElapsed++ + r.electionElapsed++ + + if r.electionElapsed >= r.electionTimeout { + r.electionElapsed = 0 + if r.checkQuorum { + r.Step(pb.Message{From: r.id, Type: pb.MsgCheckQuorum}) + } + // If current leader cannot transfer leadership in electionTimeout, it becomes leader again. + if r.state == StateLeader && r.leadTransferee != None { + r.abortLeaderTransfer() + } + } + + if r.state != StateLeader { + return + } + + if r.heartbeatElapsed >= r.heartbeatTimeout { + r.heartbeatElapsed = 0 + r.Step(pb.Message{From: r.id, Type: pb.MsgBeat}) + } +} + +func (r *raft) becomeFollower(term uint64, lead uint64) { + r.step = stepFollower + r.reset(term) + r.tick = r.tickElection + r.lead = lead + r.state = StateFollower + r.logger.Infof("%x became follower at term %d", r.id, r.Term) +} + +func (r *raft) becomeCandidate() { + // TODO(xiangli) remove the panic when the raft implementation is stable + if r.state == StateLeader { + panic("invalid transition [leader -> candidate]") + } + r.step = stepCandidate + r.reset(r.Term + 1) + r.tick = r.tickElection + r.Vote = r.id + r.state = StateCandidate + r.logger.Infof("%x became candidate at term %d", r.id, r.Term) +} + +func (r *raft) becomePreCandidate() { + // TODO(xiangli) remove the panic when the raft implementation is stable + if r.state == StateLeader { + panic("invalid transition [leader -> pre-candidate]") + } + // Becoming a pre-candidate changes our step functions and state, + // but doesn't change anything else. In particular it does not increase + // r.Term or change r.Vote. + r.step = stepCandidate + r.votes = make(map[uint64]bool) + r.tick = r.tickElection + r.state = StatePreCandidate + r.logger.Infof("%x became pre-candidate at term %d", r.id, r.Term) +} + +func (r *raft) becomeLeader() { + // TODO(xiangli) remove the panic when the raft implementation is stable + if r.state == StateFollower { + panic("invalid transition [follower -> leader]") + } + r.step = stepLeader + r.reset(r.Term) + r.tick = r.tickHeartbeat + r.lead = r.id + r.state = StateLeader + ents, err := r.raftLog.entries(r.raftLog.committed+1, noLimit) + if err != nil { + r.logger.Panicf("unexpected error getting uncommitted entries (%v)", err) + } + + // Conservatively set the pendingConfIndex to the last index in the + // log. There may or may not be a pending config change, but it's + // safe to delay any future proposals until we commit all our + // pending log entries, and scanning the entire tail of the log + // could be expensive. + if len(ents) > 0 { + r.pendingConfIndex = ents[len(ents)-1].Index + } + + r.appendEntry(pb.Entry{Data: nil}) + r.logger.Infof("%x became leader at term %d", r.id, r.Term) +} + +func (r *raft) campaign(t CampaignType) { + var term uint64 + var voteMsg pb.MessageType + if t == campaignPreElection { + r.becomePreCandidate() + voteMsg = pb.MsgPreVote + // PreVote RPCs are sent for the next term before we've incremented r.Term. + term = r.Term + 1 + } else { + r.becomeCandidate() + voteMsg = pb.MsgVote + term = r.Term + } + if r.quorum() == r.poll(r.id, voteRespMsgType(voteMsg), true) { + // We won the election after voting for ourselves (which must mean that + // this is a single-node cluster). Advance to the next state. + if t == campaignPreElection { + r.campaign(campaignElection) + } else { + r.becomeLeader() + } + return + } + for id := range r.prs { + if id == r.id { + continue + } + r.logger.Infof("%x [logterm: %d, index: %d] sent %s request to %x at term %d", + r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), voteMsg, id, r.Term) + + var ctx []byte + if t == campaignTransfer { + ctx = []byte(t) + } + r.send(pb.Message{Term: term, To: id, Type: voteMsg, Index: r.raftLog.lastIndex(), LogTerm: r.raftLog.lastTerm(), Context: ctx}) + } +} + +func (r *raft) poll(id uint64, t pb.MessageType, v bool) (granted int) { + if v { + r.logger.Infof("%x received %s from %x at term %d", r.id, t, id, r.Term) + } else { + r.logger.Infof("%x received %s rejection from %x at term %d", r.id, t, id, r.Term) + } + if _, ok := r.votes[id]; !ok { + r.votes[id] = v + } + for _, vv := range r.votes { + if vv { + granted++ + } + } + return granted +} + +func (r *raft) Step(m pb.Message) error { + // Handle the message term, which may result in our stepping down to a follower. + switch { + case m.Term == 0: + // local message + case m.Term > r.Term: + if m.Type == pb.MsgVote || m.Type == pb.MsgPreVote { + force := bytes.Equal(m.Context, []byte(campaignTransfer)) + inLease := r.checkQuorum && r.lead != None && r.electionElapsed < r.electionTimeout + if !force && inLease { + // If a server receives a RequestVote request within the minimum election timeout + // of hearing from a current leader, it does not update its term or grant its vote + r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] ignored %s from %x [logterm: %d, index: %d] at term %d: lease is not expired (remaining ticks: %d)", + r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term, r.electionTimeout-r.electionElapsed) + return nil + } + } + switch { + case m.Type == pb.MsgPreVote: + // Never change our term in response to a PreVote + case m.Type == pb.MsgPreVoteResp && !m.Reject: + // We send pre-vote requests with a term in our future. If the + // pre-vote is granted, we will increment our term when we get a + // quorum. If it is not, the term comes from the node that + // rejected our vote so we should become a follower at the new + // term. + default: + r.logger.Infof("%x [term: %d] received a %s message with higher term from %x [term: %d]", + r.id, r.Term, m.Type, m.From, m.Term) + if m.Type == pb.MsgApp || m.Type == pb.MsgHeartbeat || m.Type == pb.MsgSnap { + r.becomeFollower(m.Term, m.From) + } else { + r.becomeFollower(m.Term, None) + } + } + + case m.Term < r.Term: + if (r.checkQuorum || r.preVote) && (m.Type == pb.MsgHeartbeat || m.Type == pb.MsgApp) { + // We have received messages from a leader at a lower term. It is possible + // that these messages were simply delayed in the network, but this could + // also mean that this node has advanced its term number during a network + // partition, and it is now unable to either win an election or to rejoin + // the majority on the old term. If checkQuorum is false, this will be + // handled by incrementing term numbers in response to MsgVote with a + // higher term, but if checkQuorum is true we may not advance the term on + // MsgVote and must generate other messages to advance the term. The net + // result of these two features is to minimize the disruption caused by + // nodes that have been removed from the cluster's configuration: a + // removed node will send MsgVotes (or MsgPreVotes) which will be ignored, + // but it will not receive MsgApp or MsgHeartbeat, so it will not create + // disruptive term increases, by notifying leader of this node's activeness. + // The above comments also true for Pre-Vote + // + // When follower gets isolated, it soon starts an election ending + // up with a higher term than leader, although it won't receive enough + // votes to win the election. When it regains connectivity, this response + // with "pb.MsgAppResp" of higher term would force leader to step down. + // However, this disruption is inevitable to free this stuck node with + // fresh election. This can be prevented with Pre-Vote phase. + r.send(pb.Message{To: m.From, Type: pb.MsgAppResp}) + } else if m.Type == pb.MsgPreVote { + // Before Pre-Vote enable, there may have candidate with higher term, + // but less log. After update to Pre-Vote, the cluster may deadlock if + // we drop messages with a lower term. + r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] rejected %s from %x [logterm: %d, index: %d] at term %d", + r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term) + r.send(pb.Message{To: m.From, Term: r.Term, Type: pb.MsgPreVoteResp, Reject: true}) + } else { + // ignore other cases + r.logger.Infof("%x [term: %d] ignored a %s message with lower term from %x [term: %d]", + r.id, r.Term, m.Type, m.From, m.Term) + } + return nil + } + + switch m.Type { + case pb.MsgHup: + if r.state != StateLeader { + ents, err := r.raftLog.slice(r.raftLog.applied+1, r.raftLog.committed+1, noLimit) + if err != nil { + r.logger.Panicf("unexpected error getting unapplied entries (%v)", err) + } + if n := numOfPendingConf(ents); n != 0 && r.raftLog.committed > r.raftLog.applied { + r.logger.Warningf("%x cannot campaign at term %d since there are still %d pending configuration changes to apply", r.id, r.Term, n) + return nil + } + + r.logger.Infof("%x is starting a new election at term %d", r.id, r.Term) + if r.preVote { + r.campaign(campaignPreElection) + } else { + r.campaign(campaignElection) + } + } else { + r.logger.Debugf("%x ignoring MsgHup because already leader", r.id) + } + + case pb.MsgVote, pb.MsgPreVote: + if r.isLearner { + // TODO: learner may need to vote, in case of node down when confchange. + r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] ignored %s from %x [logterm: %d, index: %d] at term %d: learner can not vote", + r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term) + return nil + } + // We can vote if this is a repeat of a vote we've already cast... + canVote := r.Vote == m.From || + // ...we haven't voted and we don't think there's a leader yet in this term... + (r.Vote == None && r.lead == None) || + // ...or this is a PreVote for a future term... + (m.Type == pb.MsgPreVote && m.Term > r.Term) + // ...and we believe the candidate is up to date. + if canVote && r.raftLog.isUpToDate(m.Index, m.LogTerm) { + r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] cast %s for %x [logterm: %d, index: %d] at term %d", + r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term) + // When responding to Msg{Pre,}Vote messages we include the term + // from the message, not the local term. To see why consider the + // case where a single node was previously partitioned away and + // it's local term is now of date. If we include the local term + // (recall that for pre-votes we don't update the local term), the + // (pre-)campaigning node on the other end will proceed to ignore + // the message (it ignores all out of date messages). + // The term in the original message and current local term are the + // same in the case of regular votes, but different for pre-votes. + r.send(pb.Message{To: m.From, Term: m.Term, Type: voteRespMsgType(m.Type)}) + if m.Type == pb.MsgVote { + // Only record real votes. + r.electionElapsed = 0 + r.Vote = m.From + } + } else { + r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] rejected %s from %x [logterm: %d, index: %d] at term %d", + r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term) + r.send(pb.Message{To: m.From, Term: r.Term, Type: voteRespMsgType(m.Type), Reject: true}) + } + + default: + err := r.step(r, m) + if err != nil { + return err + } + } + return nil +} + +type stepFunc func(r *raft, m pb.Message) error + +func stepLeader(r *raft, m pb.Message) error { + // These message types do not require any progress for m.From. + switch m.Type { + case pb.MsgBeat: + r.bcastHeartbeat() + return nil + case pb.MsgCheckQuorum: + if !r.checkQuorumActive() { + r.logger.Warningf("%x stepped down to follower since quorum is not active", r.id) + r.becomeFollower(r.Term, None) + } + return nil + case pb.MsgProp: + if len(m.Entries) == 0 { + r.logger.Panicf("%x stepped empty MsgProp", r.id) + } + if _, ok := r.prs[r.id]; !ok { + // If we are not currently a member of the range (i.e. this node + // was removed from the configuration while serving as leader), + // drop any new proposals. + return ErrProposalDropped + } + if r.leadTransferee != None { + r.logger.Debugf("%x [term %d] transfer leadership to %x is in progress; dropping proposal", r.id, r.Term, r.leadTransferee) + return ErrProposalDropped + } + + for i, e := range m.Entries { + if e.Type == pb.EntryConfChange { + if r.pendingConfIndex > r.raftLog.applied { + r.logger.Infof("propose conf %s ignored since pending unapplied configuration [index %d, applied %d]", + e.String(), r.pendingConfIndex, r.raftLog.applied) + m.Entries[i] = pb.Entry{Type: pb.EntryNormal} + } else { + r.pendingConfIndex = r.raftLog.lastIndex() + uint64(i) + 1 + } + } + } + r.appendEntry(m.Entries...) + r.bcastAppend() + return nil + case pb.MsgReadIndex: + if r.quorum() > 1 { + if r.raftLog.zeroTermOnErrCompacted(r.raftLog.term(r.raftLog.committed)) != r.Term { + // Reject read only request when this leader has not committed any log entry at its term. + return nil + } + + // thinking: use an interally defined context instead of the user given context. + // We can express this in terms of the term and index instead of a user-supplied value. + // This would allow multiple reads to piggyback on the same message. + switch r.readOnly.option { + case ReadOnlySafe: + r.readOnly.addRequest(r.raftLog.committed, m) + r.bcastHeartbeatWithCtx(m.Entries[0].Data) + case ReadOnlyLeaseBased: + ri := r.raftLog.committed + if m.From == None || m.From == r.id { // from local member + r.readStates = append(r.readStates, ReadState{Index: r.raftLog.committed, RequestCtx: m.Entries[0].Data}) + } else { + r.send(pb.Message{To: m.From, Type: pb.MsgReadIndexResp, Index: ri, Entries: m.Entries}) + } + } + } else { + r.readStates = append(r.readStates, ReadState{Index: r.raftLog.committed, RequestCtx: m.Entries[0].Data}) + } + + return nil + } + + // All other message types require a progress for m.From (pr). + pr := r.getProgress(m.From) + if pr == nil { + r.logger.Debugf("%x no progress available for %x", r.id, m.From) + return nil + } + switch m.Type { + case pb.MsgAppResp: + pr.RecentActive = true + + if m.Reject { + r.logger.Debugf("%x received msgApp rejection(lastindex: %d) from %x for index %d", + r.id, m.RejectHint, m.From, m.Index) + if pr.maybeDecrTo(m.Index, m.RejectHint) { + r.logger.Debugf("%x decreased progress of %x to [%s]", r.id, m.From, pr) + if pr.State == ProgressStateReplicate { + pr.becomeProbe() + } + r.sendAppend(m.From) + } + } else { + oldPaused := pr.IsPaused() + if pr.maybeUpdate(m.Index) { + switch { + case pr.State == ProgressStateProbe: + pr.becomeReplicate() + case pr.State == ProgressStateSnapshot && pr.needSnapshotAbort(): + r.logger.Debugf("%x snapshot aborted, resumed sending replication messages to %x [%s]", r.id, m.From, pr) + pr.becomeProbe() + case pr.State == ProgressStateReplicate: + pr.ins.freeTo(m.Index) + } + + if r.maybeCommit() { + r.bcastAppend() + } else if oldPaused { + // update() reset the wait state on this node. If we had delayed sending + // an update before, send it now. + r.sendAppend(m.From) + } + // Transfer leadership is in progress. + if m.From == r.leadTransferee && pr.Match == r.raftLog.lastIndex() { + r.logger.Infof("%x sent MsgTimeoutNow to %x after received MsgAppResp", r.id, m.From) + r.sendTimeoutNow(m.From) + } + } + } + case pb.MsgHeartbeatResp: + pr.RecentActive = true + pr.resume() + + // free one slot for the full inflights window to allow progress. + if pr.State == ProgressStateReplicate && pr.ins.full() { + pr.ins.freeFirstOne() + } + if pr.Match < r.raftLog.lastIndex() { + r.sendAppend(m.From) + } + + if r.readOnly.option != ReadOnlySafe || len(m.Context) == 0 { + return nil + } + + ackCount := r.readOnly.recvAck(m) + if ackCount < r.quorum() { + return nil + } + + rss := r.readOnly.advance(m) + for _, rs := range rss { + req := rs.req + if req.From == None || req.From == r.id { // from local member + r.readStates = append(r.readStates, ReadState{Index: rs.index, RequestCtx: req.Entries[0].Data}) + } else { + r.send(pb.Message{To: req.From, Type: pb.MsgReadIndexResp, Index: rs.index, Entries: req.Entries}) + } + } + case pb.MsgSnapStatus: + if pr.State != ProgressStateSnapshot { + return nil + } + if !m.Reject { + pr.becomeProbe() + r.logger.Debugf("%x snapshot succeeded, resumed sending replication messages to %x [%s]", r.id, m.From, pr) + } else { + pr.snapshotFailure() + pr.becomeProbe() + r.logger.Debugf("%x snapshot failed, resumed sending replication messages to %x [%s]", r.id, m.From, pr) + } + // If snapshot finish, wait for the msgAppResp from the remote node before sending + // out the next msgApp. + // If snapshot failure, wait for a heartbeat interval before next try + pr.pause() + case pb.MsgUnreachable: + // During optimistic replication, if the remote becomes unreachable, + // there is huge probability that a MsgApp is lost. + if pr.State == ProgressStateReplicate { + pr.becomeProbe() + } + r.logger.Debugf("%x failed to send message to %x because it is unreachable [%s]", r.id, m.From, pr) + case pb.MsgTransferLeader: + if pr.IsLearner { + r.logger.Debugf("%x is learner. Ignored transferring leadership", r.id) + return nil + } + leadTransferee := m.From + lastLeadTransferee := r.leadTransferee + if lastLeadTransferee != None { + if lastLeadTransferee == leadTransferee { + r.logger.Infof("%x [term %d] transfer leadership to %x is in progress, ignores request to same node %x", + r.id, r.Term, leadTransferee, leadTransferee) + return nil + } + r.abortLeaderTransfer() + r.logger.Infof("%x [term %d] abort previous transferring leadership to %x", r.id, r.Term, lastLeadTransferee) + } + if leadTransferee == r.id { + r.logger.Debugf("%x is already leader. Ignored transferring leadership to self", r.id) + return nil + } + // Transfer leadership to third party. + r.logger.Infof("%x [term %d] starts to transfer leadership to %x", r.id, r.Term, leadTransferee) + // Transfer leadership should be finished in one electionTimeout, so reset r.electionElapsed. + r.electionElapsed = 0 + r.leadTransferee = leadTransferee + if pr.Match == r.raftLog.lastIndex() { + r.sendTimeoutNow(leadTransferee) + r.logger.Infof("%x sends MsgTimeoutNow to %x immediately as %x already has up-to-date log", r.id, leadTransferee, leadTransferee) + } else { + r.sendAppend(leadTransferee) + } + } + return nil +} + +// stepCandidate is shared by StateCandidate and StatePreCandidate; the difference is +// whether they respond to MsgVoteResp or MsgPreVoteResp. +func stepCandidate(r *raft, m pb.Message) error { + // Only handle vote responses corresponding to our candidacy (while in + // StateCandidate, we may get stale MsgPreVoteResp messages in this term from + // our pre-candidate state). + var myVoteRespType pb.MessageType + if r.state == StatePreCandidate { + myVoteRespType = pb.MsgPreVoteResp + } else { + myVoteRespType = pb.MsgVoteResp + } + switch m.Type { + case pb.MsgProp: + r.logger.Infof("%x no leader at term %d; dropping proposal", r.id, r.Term) + return ErrProposalDropped + case pb.MsgApp: + r.becomeFollower(m.Term, m.From) // always m.Term == r.Term + r.handleAppendEntries(m) + case pb.MsgHeartbeat: + r.becomeFollower(m.Term, m.From) // always m.Term == r.Term + r.handleHeartbeat(m) + case pb.MsgSnap: + r.becomeFollower(m.Term, m.From) // always m.Term == r.Term + r.handleSnapshot(m) + case myVoteRespType: + gr := r.poll(m.From, m.Type, !m.Reject) + r.logger.Infof("%x [quorum:%d] has received %d %s votes and %d vote rejections", r.id, r.quorum(), gr, m.Type, len(r.votes)-gr) + switch r.quorum() { + case gr: + if r.state == StatePreCandidate { + r.campaign(campaignElection) + } else { + r.becomeLeader() + r.bcastAppend() + } + case len(r.votes) - gr: + // pb.MsgPreVoteResp contains future term of pre-candidate + // m.Term > r.Term; reuse r.Term + r.becomeFollower(r.Term, None) + } + case pb.MsgTimeoutNow: + r.logger.Debugf("%x [term %d state %v] ignored MsgTimeoutNow from %x", r.id, r.Term, r.state, m.From) + } + return nil +} + +func stepFollower(r *raft, m pb.Message) error { + switch m.Type { + case pb.MsgProp: + if r.lead == None { + r.logger.Infof("%x no leader at term %d; dropping proposal", r.id, r.Term) + return ErrProposalDropped + } else if r.disableProposalForwarding { + r.logger.Infof("%x not forwarding to leader %x at term %d; dropping proposal", r.id, r.lead, r.Term) + return ErrProposalDropped + } + m.To = r.lead + r.send(m) + case pb.MsgApp: + r.electionElapsed = 0 + r.lead = m.From + r.handleAppendEntries(m) + case pb.MsgHeartbeat: + r.electionElapsed = 0 + r.lead = m.From + r.handleHeartbeat(m) + case pb.MsgSnap: + r.electionElapsed = 0 + r.lead = m.From + r.handleSnapshot(m) + case pb.MsgTransferLeader: + if r.lead == None { + r.logger.Infof("%x no leader at term %d; dropping leader transfer msg", r.id, r.Term) + return nil + } + m.To = r.lead + r.send(m) + case pb.MsgTimeoutNow: + if r.promotable() { + r.logger.Infof("%x [term %d] received MsgTimeoutNow from %x and starts an election to get leadership.", r.id, r.Term, m.From) + // Leadership transfers never use pre-vote even if r.preVote is true; we + // know we are not recovering from a partition so there is no need for the + // extra round trip. + r.campaign(campaignTransfer) + } else { + r.logger.Infof("%x received MsgTimeoutNow from %x but is not promotable", r.id, m.From) + } + case pb.MsgReadIndex: + if r.lead == None { + r.logger.Infof("%x no leader at term %d; dropping index reading msg", r.id, r.Term) + return nil + } + m.To = r.lead + r.send(m) + case pb.MsgReadIndexResp: + if len(m.Entries) != 1 { + r.logger.Errorf("%x invalid format of MsgReadIndexResp from %x, entries count: %d", r.id, m.From, len(m.Entries)) + return nil + } + r.readStates = append(r.readStates, ReadState{Index: m.Index, RequestCtx: m.Entries[0].Data}) + } + return nil +} + +func (r *raft) handleAppendEntries(m pb.Message) { + if m.Index < r.raftLog.committed { + r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: r.raftLog.committed}) + return + } + + if mlastIndex, ok := r.raftLog.maybeAppend(m.Index, m.LogTerm, m.Commit, m.Entries...); ok { + r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: mlastIndex}) + } else { + r.logger.Debugf("%x [logterm: %d, index: %d] rejected msgApp [logterm: %d, index: %d] from %x", + r.id, r.raftLog.zeroTermOnErrCompacted(r.raftLog.term(m.Index)), m.Index, m.LogTerm, m.Index, m.From) + r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: m.Index, Reject: true, RejectHint: r.raftLog.lastIndex()}) + } +} + +func (r *raft) handleHeartbeat(m pb.Message) { + r.raftLog.commitTo(m.Commit) + r.send(pb.Message{To: m.From, Type: pb.MsgHeartbeatResp, Context: m.Context}) +} + +func (r *raft) handleSnapshot(m pb.Message) { + sindex, sterm := m.Snapshot.Metadata.Index, m.Snapshot.Metadata.Term + if r.restore(m.Snapshot) { + r.logger.Infof("%x [commit: %d] restored snapshot [index: %d, term: %d]", + r.id, r.raftLog.committed, sindex, sterm) + r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: r.raftLog.lastIndex()}) + } else { + r.logger.Infof("%x [commit: %d] ignored snapshot [index: %d, term: %d]", + r.id, r.raftLog.committed, sindex, sterm) + r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: r.raftLog.committed}) + } +} + +// restore recovers the state machine from a snapshot. It restores the log and the +// configuration of state machine. +func (r *raft) restore(s pb.Snapshot) bool { + if s.Metadata.Index <= r.raftLog.committed { + return false + } + if r.raftLog.matchTerm(s.Metadata.Index, s.Metadata.Term) { + r.logger.Infof("%x [commit: %d, lastindex: %d, lastterm: %d] fast-forwarded commit to snapshot [index: %d, term: %d]", + r.id, r.raftLog.committed, r.raftLog.lastIndex(), r.raftLog.lastTerm(), s.Metadata.Index, s.Metadata.Term) + r.raftLog.commitTo(s.Metadata.Index) + return false + } + + // The normal peer can't become learner. + if !r.isLearner { + for _, id := range s.Metadata.ConfState.Learners { + if id == r.id { + r.logger.Errorf("%x can't become learner when restores snapshot [index: %d, term: %d]", r.id, s.Metadata.Index, s.Metadata.Term) + return false + } + } + } + + r.logger.Infof("%x [commit: %d, lastindex: %d, lastterm: %d] starts to restore snapshot [index: %d, term: %d]", + r.id, r.raftLog.committed, r.raftLog.lastIndex(), r.raftLog.lastTerm(), s.Metadata.Index, s.Metadata.Term) + + r.raftLog.restore(s) + r.prs = make(map[uint64]*Progress) + r.learnerPrs = make(map[uint64]*Progress) + r.restoreNode(s.Metadata.ConfState.Nodes, false) + r.restoreNode(s.Metadata.ConfState.Learners, true) + return true +} + +func (r *raft) restoreNode(nodes []uint64, isLearner bool) { + for _, n := range nodes { + match, next := uint64(0), r.raftLog.lastIndex()+1 + if n == r.id { + match = next - 1 + r.isLearner = isLearner + } + r.setProgress(n, match, next, isLearner) + r.logger.Infof("%x restored progress of %x [%s]", r.id, n, r.getProgress(n)) + } +} + +// promotable indicates whether state machine can be promoted to leader, +// which is true when its own id is in progress list. +func (r *raft) promotable() bool { + _, ok := r.prs[r.id] + return ok +} + +func (r *raft) addNode(id uint64) { + r.addNodeOrLearnerNode(id, false) +} + +func (r *raft) addLearner(id uint64) { + r.addNodeOrLearnerNode(id, true) +} + +func (r *raft) addNodeOrLearnerNode(id uint64, isLearner bool) { + pr := r.getProgress(id) + if pr == nil { + r.setProgress(id, 0, r.raftLog.lastIndex()+1, isLearner) + } else { + if isLearner && !pr.IsLearner { + // can only change Learner to Voter + r.logger.Infof("%x ignored addLearner: do not support changing %x from raft peer to learner.", r.id, id) + return + } + + if isLearner == pr.IsLearner { + // Ignore any redundant addNode calls (which can happen because the + // initial bootstrapping entries are applied twice). + return + } + + // change Learner to Voter, use origin Learner progress + delete(r.learnerPrs, id) + pr.IsLearner = false + r.prs[id] = pr + } + + if r.id == id { + r.isLearner = isLearner + } + + // When a node is first added, we should mark it as recently active. + // Otherwise, CheckQuorum may cause us to step down if it is invoked + // before the added node has a chance to communicate with us. + pr = r.getProgress(id) + pr.RecentActive = true +} + +func (r *raft) removeNode(id uint64) { + r.delProgress(id) + + // do not try to commit or abort transferring if there is no nodes in the cluster. + if len(r.prs) == 0 && len(r.learnerPrs) == 0 { + return + } + + // The quorum size is now smaller, so see if any pending entries can + // be committed. + if r.maybeCommit() { + r.bcastAppend() + } + // If the removed node is the leadTransferee, then abort the leadership transferring. + if r.state == StateLeader && r.leadTransferee == id { + r.abortLeaderTransfer() + } +} + +func (r *raft) setProgress(id, match, next uint64, isLearner bool) { + if !isLearner { + delete(r.learnerPrs, id) + r.prs[id] = &Progress{Next: next, Match: match, ins: newInflights(r.maxInflight)} + return + } + + if _, ok := r.prs[id]; ok { + panic(fmt.Sprintf("%x unexpected changing from voter to learner for %x", r.id, id)) + } + r.learnerPrs[id] = &Progress{Next: next, Match: match, ins: newInflights(r.maxInflight), IsLearner: true} +} + +func (r *raft) delProgress(id uint64) { + delete(r.prs, id) + delete(r.learnerPrs, id) +} + +func (r *raft) loadState(state pb.HardState) { + if state.Commit < r.raftLog.committed || state.Commit > r.raftLog.lastIndex() { + r.logger.Panicf("%x state.commit %d is out of range [%d, %d]", r.id, state.Commit, r.raftLog.committed, r.raftLog.lastIndex()) + } + r.raftLog.committed = state.Commit + r.Term = state.Term + r.Vote = state.Vote +} + +// pastElectionTimeout returns true iff r.electionElapsed is greater +// than or equal to the randomized election timeout in +// [electiontimeout, 2 * electiontimeout - 1]. +func (r *raft) pastElectionTimeout() bool { + return r.electionElapsed >= r.randomizedElectionTimeout +} + +func (r *raft) resetRandomizedElectionTimeout() { + r.randomizedElectionTimeout = r.electionTimeout + globalRand.Intn(r.electionTimeout) +} + +// checkQuorumActive returns true if the quorum is active from +// the view of the local raft state machine. Otherwise, it returns +// false. +// checkQuorumActive also resets all RecentActive to false. +func (r *raft) checkQuorumActive() bool { + var act int + + r.forEachProgress(func(id uint64, pr *Progress) { + if id == r.id { // self is always active + act++ + return + } + + if pr.RecentActive && !pr.IsLearner { + act++ + } + + pr.RecentActive = false + }) + + return act >= r.quorum() +} + +func (r *raft) sendTimeoutNow(to uint64) { + r.send(pb.Message{To: to, Type: pb.MsgTimeoutNow}) +} + +func (r *raft) abortLeaderTransfer() { + r.leadTransferee = None +} + +func numOfPendingConf(ents []pb.Entry) int { + n := 0 + for i := range ents { + if ents[i].Type == pb.EntryConfChange { + n++ + } + } + return n +} diff --git a/vendor/github.com/coreos/etcd/raft/raftpb/raft.pb.go b/vendor/github.com/coreos/etcd/raft/raftpb/raft.pb.go new file mode 100644 index 00000000..fd9ee372 --- /dev/null +++ b/vendor/github.com/coreos/etcd/raft/raftpb/raft.pb.go @@ -0,0 +1,2004 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: raft.proto + +/* + Package raftpb is a generated protocol buffer package. + + It is generated from these files: + raft.proto + + It has these top-level messages: + Entry + SnapshotMetadata + Snapshot + Message + HardState + ConfState + ConfChange +*/ +package raftpb + +import ( + "fmt" + + proto "github.com/golang/protobuf/proto" + + math "math" + + _ "github.com/gogo/protobuf/gogoproto" + + io "io" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type EntryType int32 + +const ( + EntryNormal EntryType = 0 + EntryConfChange EntryType = 1 +) + +var EntryType_name = map[int32]string{ + 0: "EntryNormal", + 1: "EntryConfChange", +} +var EntryType_value = map[string]int32{ + "EntryNormal": 0, + "EntryConfChange": 1, +} + +func (x EntryType) Enum() *EntryType { + p := new(EntryType) + *p = x + return p +} +func (x EntryType) String() string { + return proto.EnumName(EntryType_name, int32(x)) +} +func (x *EntryType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(EntryType_value, data, "EntryType") + if err != nil { + return err + } + *x = EntryType(value) + return nil +} +func (EntryType) EnumDescriptor() ([]byte, []int) { return fileDescriptorRaft, []int{0} } + +type MessageType int32 + +const ( + MsgHup MessageType = 0 + MsgBeat MessageType = 1 + MsgProp MessageType = 2 + MsgApp MessageType = 3 + MsgAppResp MessageType = 4 + MsgVote MessageType = 5 + MsgVoteResp MessageType = 6 + MsgSnap MessageType = 7 + MsgHeartbeat MessageType = 8 + MsgHeartbeatResp MessageType = 9 + MsgUnreachable MessageType = 10 + MsgSnapStatus MessageType = 11 + MsgCheckQuorum MessageType = 12 + MsgTransferLeader MessageType = 13 + MsgTimeoutNow MessageType = 14 + MsgReadIndex MessageType = 15 + MsgReadIndexResp MessageType = 16 + MsgPreVote MessageType = 17 + MsgPreVoteResp MessageType = 18 +) + +var MessageType_name = map[int32]string{ + 0: "MsgHup", + 1: "MsgBeat", + 2: "MsgProp", + 3: "MsgApp", + 4: "MsgAppResp", + 5: "MsgVote", + 6: "MsgVoteResp", + 7: "MsgSnap", + 8: "MsgHeartbeat", + 9: "MsgHeartbeatResp", + 10: "MsgUnreachable", + 11: "MsgSnapStatus", + 12: "MsgCheckQuorum", + 13: "MsgTransferLeader", + 14: "MsgTimeoutNow", + 15: "MsgReadIndex", + 16: "MsgReadIndexResp", + 17: "MsgPreVote", + 18: "MsgPreVoteResp", +} +var MessageType_value = map[string]int32{ + "MsgHup": 0, + "MsgBeat": 1, + "MsgProp": 2, + "MsgApp": 3, + "MsgAppResp": 4, + "MsgVote": 5, + "MsgVoteResp": 6, + "MsgSnap": 7, + "MsgHeartbeat": 8, + "MsgHeartbeatResp": 9, + "MsgUnreachable": 10, + "MsgSnapStatus": 11, + "MsgCheckQuorum": 12, + "MsgTransferLeader": 13, + "MsgTimeoutNow": 14, + "MsgReadIndex": 15, + "MsgReadIndexResp": 16, + "MsgPreVote": 17, + "MsgPreVoteResp": 18, +} + +func (x MessageType) Enum() *MessageType { + p := new(MessageType) + *p = x + return p +} +func (x MessageType) String() string { + return proto.EnumName(MessageType_name, int32(x)) +} +func (x *MessageType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MessageType_value, data, "MessageType") + if err != nil { + return err + } + *x = MessageType(value) + return nil +} +func (MessageType) EnumDescriptor() ([]byte, []int) { return fileDescriptorRaft, []int{1} } + +type ConfChangeType int32 + +const ( + ConfChangeAddNode ConfChangeType = 0 + ConfChangeRemoveNode ConfChangeType = 1 + ConfChangeUpdateNode ConfChangeType = 2 + ConfChangeAddLearnerNode ConfChangeType = 3 +) + +var ConfChangeType_name = map[int32]string{ + 0: "ConfChangeAddNode", + 1: "ConfChangeRemoveNode", + 2: "ConfChangeUpdateNode", + 3: "ConfChangeAddLearnerNode", +} +var ConfChangeType_value = map[string]int32{ + "ConfChangeAddNode": 0, + "ConfChangeRemoveNode": 1, + "ConfChangeUpdateNode": 2, + "ConfChangeAddLearnerNode": 3, +} + +func (x ConfChangeType) Enum() *ConfChangeType { + p := new(ConfChangeType) + *p = x + return p +} +func (x ConfChangeType) String() string { + return proto.EnumName(ConfChangeType_name, int32(x)) +} +func (x *ConfChangeType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(ConfChangeType_value, data, "ConfChangeType") + if err != nil { + return err + } + *x = ConfChangeType(value) + return nil +} +func (ConfChangeType) EnumDescriptor() ([]byte, []int) { return fileDescriptorRaft, []int{2} } + +type Entry struct { + Term uint64 `protobuf:"varint,2,opt,name=Term" json:"Term"` + Index uint64 `protobuf:"varint,3,opt,name=Index" json:"Index"` + Type EntryType `protobuf:"varint,1,opt,name=Type,enum=raftpb.EntryType" json:"Type"` + Data []byte `protobuf:"bytes,4,opt,name=Data" json:"Data,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Entry) Reset() { *m = Entry{} } +func (m *Entry) String() string { return proto.CompactTextString(m) } +func (*Entry) ProtoMessage() {} +func (*Entry) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{0} } + +type SnapshotMetadata struct { + ConfState ConfState `protobuf:"bytes,1,opt,name=conf_state,json=confState" json:"conf_state"` + Index uint64 `protobuf:"varint,2,opt,name=index" json:"index"` + Term uint64 `protobuf:"varint,3,opt,name=term" json:"term"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SnapshotMetadata) Reset() { *m = SnapshotMetadata{} } +func (m *SnapshotMetadata) String() string { return proto.CompactTextString(m) } +func (*SnapshotMetadata) ProtoMessage() {} +func (*SnapshotMetadata) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{1} } + +type Snapshot struct { + Data []byte `protobuf:"bytes,1,opt,name=data" json:"data,omitempty"` + Metadata SnapshotMetadata `protobuf:"bytes,2,opt,name=metadata" json:"metadata"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Snapshot) Reset() { *m = Snapshot{} } +func (m *Snapshot) String() string { return proto.CompactTextString(m) } +func (*Snapshot) ProtoMessage() {} +func (*Snapshot) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{2} } + +type Message struct { + Type MessageType `protobuf:"varint,1,opt,name=type,enum=raftpb.MessageType" json:"type"` + To uint64 `protobuf:"varint,2,opt,name=to" json:"to"` + From uint64 `protobuf:"varint,3,opt,name=from" json:"from"` + Term uint64 `protobuf:"varint,4,opt,name=term" json:"term"` + LogTerm uint64 `protobuf:"varint,5,opt,name=logTerm" json:"logTerm"` + Index uint64 `protobuf:"varint,6,opt,name=index" json:"index"` + Entries []Entry `protobuf:"bytes,7,rep,name=entries" json:"entries"` + Commit uint64 `protobuf:"varint,8,opt,name=commit" json:"commit"` + Snapshot Snapshot `protobuf:"bytes,9,opt,name=snapshot" json:"snapshot"` + Reject bool `protobuf:"varint,10,opt,name=reject" json:"reject"` + RejectHint uint64 `protobuf:"varint,11,opt,name=rejectHint" json:"rejectHint"` + Context []byte `protobuf:"bytes,12,opt,name=context" json:"context,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Message) Reset() { *m = Message{} } +func (m *Message) String() string { return proto.CompactTextString(m) } +func (*Message) ProtoMessage() {} +func (*Message) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{3} } + +type HardState struct { + Term uint64 `protobuf:"varint,1,opt,name=term" json:"term"` + Vote uint64 `protobuf:"varint,2,opt,name=vote" json:"vote"` + Commit uint64 `protobuf:"varint,3,opt,name=commit" json:"commit"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *HardState) Reset() { *m = HardState{} } +func (m *HardState) String() string { return proto.CompactTextString(m) } +func (*HardState) ProtoMessage() {} +func (*HardState) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{4} } + +type ConfState struct { + Nodes []uint64 `protobuf:"varint,1,rep,name=nodes" json:"nodes,omitempty"` + Learners []uint64 `protobuf:"varint,2,rep,name=learners" json:"learners,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ConfState) Reset() { *m = ConfState{} } +func (m *ConfState) String() string { return proto.CompactTextString(m) } +func (*ConfState) ProtoMessage() {} +func (*ConfState) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{5} } + +type ConfChange struct { + ID uint64 `protobuf:"varint,1,opt,name=ID" json:"ID"` + Type ConfChangeType `protobuf:"varint,2,opt,name=Type,enum=raftpb.ConfChangeType" json:"Type"` + NodeID uint64 `protobuf:"varint,3,opt,name=NodeID" json:"NodeID"` + Context []byte `protobuf:"bytes,4,opt,name=Context" json:"Context,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ConfChange) Reset() { *m = ConfChange{} } +func (m *ConfChange) String() string { return proto.CompactTextString(m) } +func (*ConfChange) ProtoMessage() {} +func (*ConfChange) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{6} } + +func init() { + proto.RegisterType((*Entry)(nil), "raftpb.Entry") + proto.RegisterType((*SnapshotMetadata)(nil), "raftpb.SnapshotMetadata") + proto.RegisterType((*Snapshot)(nil), "raftpb.Snapshot") + proto.RegisterType((*Message)(nil), "raftpb.Message") + proto.RegisterType((*HardState)(nil), "raftpb.HardState") + proto.RegisterType((*ConfState)(nil), "raftpb.ConfState") + proto.RegisterType((*ConfChange)(nil), "raftpb.ConfChange") + proto.RegisterEnum("raftpb.EntryType", EntryType_name, EntryType_value) + proto.RegisterEnum("raftpb.MessageType", MessageType_name, MessageType_value) + proto.RegisterEnum("raftpb.ConfChangeType", ConfChangeType_name, ConfChangeType_value) +} +func (m *Entry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Entry) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Type)) + dAtA[i] = 0x10 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Term)) + dAtA[i] = 0x18 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Index)) + if m.Data != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintRaft(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *SnapshotMetadata) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SnapshotMetadata) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.ConfState.Size())) + n1, err := m.ConfState.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + dAtA[i] = 0x10 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Index)) + dAtA[i] = 0x18 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Term)) + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *Snapshot) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Snapshot) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Data != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRaft(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Metadata.Size())) + n2, err := m.Metadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *Message) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Message) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Type)) + dAtA[i] = 0x10 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.To)) + dAtA[i] = 0x18 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.From)) + dAtA[i] = 0x20 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Term)) + dAtA[i] = 0x28 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.LogTerm)) + dAtA[i] = 0x30 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Index)) + if len(m.Entries) > 0 { + for _, msg := range m.Entries { + dAtA[i] = 0x3a + i++ + i = encodeVarintRaft(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + dAtA[i] = 0x40 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Commit)) + dAtA[i] = 0x4a + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Snapshot.Size())) + n3, err := m.Snapshot.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + dAtA[i] = 0x50 + i++ + if m.Reject { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x58 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.RejectHint)) + if m.Context != nil { + dAtA[i] = 0x62 + i++ + i = encodeVarintRaft(dAtA, i, uint64(len(m.Context))) + i += copy(dAtA[i:], m.Context) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *HardState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HardState) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Term)) + dAtA[i] = 0x10 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Vote)) + dAtA[i] = 0x18 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Commit)) + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *ConfState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfState) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Nodes) > 0 { + for _, num := range m.Nodes { + dAtA[i] = 0x8 + i++ + i = encodeVarintRaft(dAtA, i, uint64(num)) + } + } + if len(m.Learners) > 0 { + for _, num := range m.Learners { + dAtA[i] = 0x10 + i++ + i = encodeVarintRaft(dAtA, i, uint64(num)) + } + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *ConfChange) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfChange) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.ID)) + dAtA[i] = 0x10 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Type)) + dAtA[i] = 0x18 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.NodeID)) + if m.Context != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintRaft(dAtA, i, uint64(len(m.Context))) + i += copy(dAtA[i:], m.Context) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func encodeVarintRaft(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *Entry) Size() (n int) { + var l int + _ = l + n += 1 + sovRaft(uint64(m.Type)) + n += 1 + sovRaft(uint64(m.Term)) + n += 1 + sovRaft(uint64(m.Index)) + if m.Data != nil { + l = len(m.Data) + n += 1 + l + sovRaft(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *SnapshotMetadata) Size() (n int) { + var l int + _ = l + l = m.ConfState.Size() + n += 1 + l + sovRaft(uint64(l)) + n += 1 + sovRaft(uint64(m.Index)) + n += 1 + sovRaft(uint64(m.Term)) + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Snapshot) Size() (n int) { + var l int + _ = l + if m.Data != nil { + l = len(m.Data) + n += 1 + l + sovRaft(uint64(l)) + } + l = m.Metadata.Size() + n += 1 + l + sovRaft(uint64(l)) + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Message) Size() (n int) { + var l int + _ = l + n += 1 + sovRaft(uint64(m.Type)) + n += 1 + sovRaft(uint64(m.To)) + n += 1 + sovRaft(uint64(m.From)) + n += 1 + sovRaft(uint64(m.Term)) + n += 1 + sovRaft(uint64(m.LogTerm)) + n += 1 + sovRaft(uint64(m.Index)) + if len(m.Entries) > 0 { + for _, e := range m.Entries { + l = e.Size() + n += 1 + l + sovRaft(uint64(l)) + } + } + n += 1 + sovRaft(uint64(m.Commit)) + l = m.Snapshot.Size() + n += 1 + l + sovRaft(uint64(l)) + n += 2 + n += 1 + sovRaft(uint64(m.RejectHint)) + if m.Context != nil { + l = len(m.Context) + n += 1 + l + sovRaft(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *HardState) Size() (n int) { + var l int + _ = l + n += 1 + sovRaft(uint64(m.Term)) + n += 1 + sovRaft(uint64(m.Vote)) + n += 1 + sovRaft(uint64(m.Commit)) + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ConfState) Size() (n int) { + var l int + _ = l + if len(m.Nodes) > 0 { + for _, e := range m.Nodes { + n += 1 + sovRaft(uint64(e)) + } + } + if len(m.Learners) > 0 { + for _, e := range m.Learners { + n += 1 + sovRaft(uint64(e)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ConfChange) Size() (n int) { + var l int + _ = l + n += 1 + sovRaft(uint64(m.ID)) + n += 1 + sovRaft(uint64(m.Type)) + n += 1 + sovRaft(uint64(m.NodeID)) + if m.Context != nil { + l = len(m.Context) + n += 1 + l + sovRaft(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovRaft(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozRaft(x uint64) (n int) { + return sovRaft(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Entry) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Entry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Entry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (EntryType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType) + } + m.Term = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Term |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SnapshotMetadata) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SnapshotMetadata: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SnapshotMetadata: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfState", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ConfState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType) + } + m.Term = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Term |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Snapshot) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Snapshot: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Snapshot: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Message) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Message: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Message: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (MessageType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field To", wireType) + } + m.To = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.To |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + m.From = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.From |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType) + } + m.Term = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Term |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LogTerm", wireType) + } + m.LogTerm = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LogTerm |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Entries", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Entries = append(m.Entries, Entry{}) + if err := m.Entries[len(m.Entries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) + } + m.Commit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Commit |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Snapshot", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Snapshot.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Reject", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Reject = bool(v != 0) + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RejectHint", wireType) + } + m.RejectHint = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RejectHint |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Context = append(m.Context[:0], dAtA[iNdEx:postIndex]...) + if m.Context == nil { + m.Context = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HardState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HardState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HardState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType) + } + m.Term = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Term |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Vote", wireType) + } + m.Vote = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Vote |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) + } + m.Commit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Commit |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Nodes = append(m.Nodes, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Nodes = append(m.Nodes, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType) + } + case 2: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Learners = append(m.Learners, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Learners = append(m.Learners, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Learners", wireType) + } + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfChange) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfChange: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfChange: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (ConfChangeType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) + } + m.NodeID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NodeID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Context = append(m.Context[:0], dAtA[iNdEx:postIndex]...) + if m.Context == nil { + m.Context = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipRaft(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaft + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaft + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaft + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthRaft + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaft + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipRaft(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthRaft = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowRaft = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("raft.proto", fileDescriptorRaft) } + +var fileDescriptorRaft = []byte{ + // 815 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x54, 0xcd, 0x6e, 0x23, 0x45, + 0x10, 0xf6, 0x8c, 0xc7, 0x7f, 0x35, 0x8e, 0xd3, 0xa9, 0x35, 0xa8, 0x15, 0x45, 0xc6, 0xb2, 0x38, + 0x58, 0x41, 0x1b, 0x20, 0x07, 0x0e, 0x48, 0x1c, 0x36, 0x09, 0x52, 0x22, 0xad, 0xa3, 0xc5, 0x9b, + 0xe5, 0x80, 0x84, 0x50, 0xc7, 0x53, 0x9e, 0x18, 0x32, 0xd3, 0xa3, 0x9e, 0xf6, 0xb2, 0xb9, 0x20, + 0x1e, 0x80, 0x07, 0xe0, 0xc2, 0xfb, 0xe4, 0xb8, 0x12, 0x77, 0xc4, 0x86, 0x17, 0x41, 0xdd, 0xd3, + 0x63, 0xcf, 0x24, 0xb7, 0xae, 0xef, 0xab, 0xae, 0xfa, 0xea, 0xeb, 0x9a, 0x01, 0x50, 0x62, 0xa9, + 0x8f, 0x32, 0x25, 0xb5, 0xc4, 0xb6, 0x39, 0x67, 0xd7, 0xfb, 0xc3, 0x58, 0xc6, 0xd2, 0x42, 0x9f, + 0x9b, 0x53, 0xc1, 0x4e, 0x7e, 0x83, 0xd6, 0xb7, 0xa9, 0x56, 0x77, 0xf8, 0x19, 0x04, 0x57, 0x77, + 0x19, 0x71, 0x6f, 0xec, 0x4d, 0x07, 0xc7, 0x7b, 0x47, 0xc5, 0xad, 0x23, 0x4b, 0x1a, 0xe2, 0x24, + 0xb8, 0xff, 0xe7, 0x93, 0xc6, 0xdc, 0x26, 0x21, 0x87, 0xe0, 0x8a, 0x54, 0xc2, 0xfd, 0xb1, 0x37, + 0x0d, 0x36, 0x0c, 0xa9, 0x04, 0xf7, 0xa1, 0x75, 0x91, 0x46, 0xf4, 0x8e, 0x37, 0x2b, 0x54, 0x01, + 0x21, 0x42, 0x70, 0x26, 0xb4, 0xe0, 0xc1, 0xd8, 0x9b, 0xf6, 0xe7, 0xf6, 0x3c, 0xf9, 0xdd, 0x03, + 0xf6, 0x3a, 0x15, 0x59, 0x7e, 0x23, 0xf5, 0x8c, 0xb4, 0x88, 0x84, 0x16, 0xf8, 0x15, 0xc0, 0x42, + 0xa6, 0xcb, 0x9f, 0x72, 0x2d, 0x74, 0xa1, 0x28, 0xdc, 0x2a, 0x3a, 0x95, 0xe9, 0xf2, 0xb5, 0x21, + 0x5c, 0xf1, 0xde, 0xa2, 0x04, 0x4c, 0xf3, 0x95, 0x6d, 0x5e, 0xd5, 0x55, 0x40, 0x46, 0xb2, 0x36, + 0x92, 0xab, 0xba, 0x2c, 0x32, 0xf9, 0x01, 0xba, 0xa5, 0x02, 0x23, 0xd1, 0x28, 0xb0, 0x3d, 0xfb, + 0x73, 0x7b, 0xc6, 0xaf, 0xa1, 0x9b, 0x38, 0x65, 0xb6, 0x70, 0x78, 0xcc, 0x4b, 0x2d, 0x8f, 0x95, + 0xbb, 0xba, 0x9b, 0xfc, 0xc9, 0x5f, 0x4d, 0xe8, 0xcc, 0x28, 0xcf, 0x45, 0x4c, 0xf8, 0x1c, 0x02, + 0xbd, 0x75, 0xf8, 0x59, 0x59, 0xc3, 0xd1, 0x55, 0x8f, 0x4d, 0x1a, 0x0e, 0xc1, 0xd7, 0xb2, 0x36, + 0x89, 0xaf, 0xa5, 0x19, 0x63, 0xa9, 0xe4, 0xa3, 0x31, 0x0c, 0xb2, 0x19, 0x30, 0x78, 0x3c, 0x20, + 0x8e, 0xa0, 0x73, 0x2b, 0x63, 0xfb, 0x60, 0xad, 0x0a, 0x59, 0x82, 0x5b, 0xdb, 0xda, 0x4f, 0x6d, + 0x7b, 0x0e, 0x1d, 0x4a, 0xb5, 0x5a, 0x51, 0xce, 0x3b, 0xe3, 0xe6, 0x34, 0x3c, 0xde, 0xa9, 0x6d, + 0x46, 0x59, 0xca, 0xe5, 0xe0, 0x01, 0xb4, 0x17, 0x32, 0x49, 0x56, 0x9a, 0x77, 0x2b, 0xb5, 0x1c, + 0x86, 0xc7, 0xd0, 0xcd, 0x9d, 0x63, 0xbc, 0x67, 0x9d, 0x64, 0x8f, 0x9d, 0x2c, 0x1d, 0x2c, 0xf3, + 0x4c, 0x45, 0x45, 0x3f, 0xd3, 0x42, 0x73, 0x18, 0x7b, 0xd3, 0x6e, 0x59, 0xb1, 0xc0, 0xf0, 0x53, + 0x80, 0xe2, 0x74, 0xbe, 0x4a, 0x35, 0x0f, 0x2b, 0x3d, 0x2b, 0x38, 0x72, 0xe8, 0x2c, 0x64, 0xaa, + 0xe9, 0x9d, 0xe6, 0x7d, 0xfb, 0xb0, 0x65, 0x38, 0xf9, 0x11, 0x7a, 0xe7, 0x42, 0x45, 0xc5, 0xfa, + 0x94, 0x0e, 0x7a, 0x4f, 0x1c, 0xe4, 0x10, 0xbc, 0x95, 0x9a, 0xea, 0xfb, 0x6e, 0x90, 0xca, 0xc0, + 0xcd, 0xa7, 0x03, 0x4f, 0xbe, 0x81, 0xde, 0x66, 0x5d, 0x71, 0x08, 0xad, 0x54, 0x46, 0x94, 0x73, + 0x6f, 0xdc, 0x9c, 0x06, 0xf3, 0x22, 0xc0, 0x7d, 0xe8, 0xde, 0x92, 0x50, 0x29, 0xa9, 0x9c, 0xfb, + 0x96, 0xd8, 0xc4, 0x93, 0x3f, 0x3c, 0x00, 0x73, 0xff, 0xf4, 0x46, 0xa4, 0xb1, 0xdd, 0x88, 0x8b, + 0xb3, 0x9a, 0x3a, 0xff, 0xe2, 0x0c, 0xbf, 0x70, 0x1f, 0xae, 0x6f, 0xd7, 0xea, 0xe3, 0xea, 0x67, + 0x52, 0xdc, 0x7b, 0xf2, 0xf5, 0x1e, 0x40, 0xfb, 0x52, 0x46, 0x74, 0x71, 0x56, 0xd7, 0x5c, 0x60, + 0xc6, 0xac, 0x53, 0x67, 0x56, 0xf1, 0xa1, 0x96, 0xe1, 0xe1, 0x97, 0xd0, 0xdb, 0xfc, 0x0e, 0x70, + 0x17, 0x42, 0x1b, 0x5c, 0x4a, 0x95, 0x88, 0x5b, 0xd6, 0xc0, 0x67, 0xb0, 0x6b, 0x81, 0x6d, 0x63, + 0xe6, 0x1d, 0xfe, 0xed, 0x43, 0x58, 0x59, 0x70, 0x04, 0x68, 0xcf, 0xf2, 0xf8, 0x7c, 0x9d, 0xb1, + 0x06, 0x86, 0xd0, 0x99, 0xe5, 0xf1, 0x09, 0x09, 0xcd, 0x3c, 0x17, 0xbc, 0x52, 0x32, 0x63, 0xbe, + 0xcb, 0x7a, 0x91, 0x65, 0xac, 0x89, 0x03, 0x80, 0xe2, 0x3c, 0xa7, 0x3c, 0x63, 0x81, 0x4b, 0xfc, + 0x5e, 0x6a, 0x62, 0x2d, 0x23, 0xc2, 0x05, 0x96, 0x6d, 0x3b, 0xd6, 0x2c, 0x13, 0xeb, 0x20, 0x83, + 0xbe, 0x69, 0x46, 0x42, 0xe9, 0x6b, 0xd3, 0xa5, 0x8b, 0x43, 0x60, 0x55, 0xc4, 0x5e, 0xea, 0x21, + 0xc2, 0x60, 0x96, 0xc7, 0x6f, 0x52, 0x45, 0x62, 0x71, 0x23, 0xae, 0x6f, 0x89, 0x01, 0xee, 0xc1, + 0x8e, 0x2b, 0x64, 0x1e, 0x6f, 0x9d, 0xb3, 0xd0, 0xa5, 0x9d, 0xde, 0xd0, 0xe2, 0x97, 0xef, 0xd6, + 0x52, 0xad, 0x13, 0xd6, 0xc7, 0x8f, 0x60, 0x6f, 0x96, 0xc7, 0x57, 0x4a, 0xa4, 0xf9, 0x92, 0xd4, + 0x4b, 0x12, 0x11, 0x29, 0xb6, 0xe3, 0x6e, 0x5f, 0xad, 0x12, 0x92, 0x6b, 0x7d, 0x29, 0x7f, 0x65, + 0x03, 0x27, 0x66, 0x4e, 0x22, 0xb2, 0x3f, 0x43, 0xb6, 0xeb, 0xc4, 0x6c, 0x10, 0x2b, 0x86, 0xb9, + 0x79, 0x5f, 0x29, 0xb2, 0x23, 0xee, 0xb9, 0xae, 0x2e, 0xb6, 0x39, 0x78, 0x78, 0x07, 0x83, 0xfa, + 0xf3, 0x1a, 0x1d, 0x5b, 0xe4, 0x45, 0x14, 0x99, 0xb7, 0x64, 0x0d, 0xe4, 0x30, 0xdc, 0xc2, 0x73, + 0x4a, 0xe4, 0x5b, 0xb2, 0x8c, 0x57, 0x67, 0xde, 0x64, 0x91, 0xd0, 0x05, 0xe3, 0xe3, 0x01, 0xf0, + 0x5a, 0xa9, 0x97, 0xc5, 0x36, 0x5a, 0xb6, 0x79, 0xc2, 0xef, 0x3f, 0x8c, 0x1a, 0xef, 0x3f, 0x8c, + 0x1a, 0xf7, 0x0f, 0x23, 0xef, 0xfd, 0xc3, 0xc8, 0xfb, 0xf7, 0x61, 0xe4, 0xfd, 0xf9, 0xdf, 0xa8, + 0xf1, 0x7f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x86, 0x52, 0x5b, 0xe0, 0x74, 0x06, 0x00, 0x00, +} diff --git a/vendor/github.com/coreos/etcd/raft/rawnode.go b/vendor/github.com/coreos/etcd/raft/rawnode.go new file mode 100644 index 00000000..fbd7a49e --- /dev/null +++ b/vendor/github.com/coreos/etcd/raft/rawnode.go @@ -0,0 +1,264 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import ( + "errors" + + pb "github.com/coreos/etcd/raft/raftpb" +) + +// ErrStepLocalMsg is returned when try to step a local raft message +var ErrStepLocalMsg = errors.New("raft: cannot step raft local message") + +// ErrStepPeerNotFound is returned when try to step a response message +// but there is no peer found in raft.prs for that node. +var ErrStepPeerNotFound = errors.New("raft: cannot step as peer not found") + +// RawNode is a thread-unsafe Node. +// The methods of this struct correspond to the methods of Node and are described +// more fully there. +type RawNode struct { + raft *raft + prevSoftSt *SoftState + prevHardSt pb.HardState +} + +func (rn *RawNode) newReady() Ready { + return newReady(rn.raft, rn.prevSoftSt, rn.prevHardSt) +} + +func (rn *RawNode) commitReady(rd Ready) { + if rd.SoftState != nil { + rn.prevSoftSt = rd.SoftState + } + if !IsEmptyHardState(rd.HardState) { + rn.prevHardSt = rd.HardState + } + if rn.prevHardSt.Commit != 0 { + // In most cases, prevHardSt and rd.HardState will be the same + // because when there are new entries to apply we just sent a + // HardState with an updated Commit value. However, on initial + // startup the two are different because we don't send a HardState + // until something changes, but we do send any un-applied but + // committed entries (and previously-committed entries may be + // incorporated into the snapshot, even if rd.CommittedEntries is + // empty). Therefore we mark all committed entries as applied + // whether they were included in rd.HardState or not. + rn.raft.raftLog.appliedTo(rn.prevHardSt.Commit) + } + if len(rd.Entries) > 0 { + e := rd.Entries[len(rd.Entries)-1] + rn.raft.raftLog.stableTo(e.Index, e.Term) + } + if !IsEmptySnap(rd.Snapshot) { + rn.raft.raftLog.stableSnapTo(rd.Snapshot.Metadata.Index) + } + if len(rd.ReadStates) != 0 { + rn.raft.readStates = nil + } +} + +// NewRawNode returns a new RawNode given configuration and a list of raft peers. +func NewRawNode(config *Config, peers []Peer) (*RawNode, error) { + if config.ID == 0 { + panic("config.ID must not be zero") + } + r := newRaft(config) + rn := &RawNode{ + raft: r, + } + lastIndex, err := config.Storage.LastIndex() + if err != nil { + panic(err) // TODO(bdarnell) + } + // If the log is empty, this is a new RawNode (like StartNode); otherwise it's + // restoring an existing RawNode (like RestartNode). + // TODO(bdarnell): rethink RawNode initialization and whether the application needs + // to be able to tell us when it expects the RawNode to exist. + if lastIndex == 0 { + r.becomeFollower(1, None) + ents := make([]pb.Entry, len(peers)) + for i, peer := range peers { + cc := pb.ConfChange{Type: pb.ConfChangeAddNode, NodeID: peer.ID, Context: peer.Context} + data, err := cc.Marshal() + if err != nil { + panic("unexpected marshal error") + } + + ents[i] = pb.Entry{Type: pb.EntryConfChange, Term: 1, Index: uint64(i + 1), Data: data} + } + r.raftLog.append(ents...) + r.raftLog.committed = uint64(len(ents)) + for _, peer := range peers { + r.addNode(peer.ID) + } + } + + // Set the initial hard and soft states after performing all initialization. + rn.prevSoftSt = r.softState() + if lastIndex == 0 { + rn.prevHardSt = emptyState + } else { + rn.prevHardSt = r.hardState() + } + + return rn, nil +} + +// Tick advances the internal logical clock by a single tick. +func (rn *RawNode) Tick() { + rn.raft.tick() +} + +// TickQuiesced advances the internal logical clock by a single tick without +// performing any other state machine processing. It allows the caller to avoid +// periodic heartbeats and elections when all of the peers in a Raft group are +// known to be at the same state. Expected usage is to periodically invoke Tick +// or TickQuiesced depending on whether the group is "active" or "quiesced". +// +// WARNING: Be very careful about using this method as it subverts the Raft +// state machine. You should probably be using Tick instead. +func (rn *RawNode) TickQuiesced() { + rn.raft.electionElapsed++ +} + +// Campaign causes this RawNode to transition to candidate state. +func (rn *RawNode) Campaign() error { + return rn.raft.Step(pb.Message{ + Type: pb.MsgHup, + }) +} + +// Propose proposes data be appended to the raft log. +func (rn *RawNode) Propose(data []byte) error { + return rn.raft.Step(pb.Message{ + Type: pb.MsgProp, + From: rn.raft.id, + Entries: []pb.Entry{ + {Data: data}, + }}) +} + +// ProposeConfChange proposes a config change. +func (rn *RawNode) ProposeConfChange(cc pb.ConfChange) error { + data, err := cc.Marshal() + if err != nil { + return err + } + return rn.raft.Step(pb.Message{ + Type: pb.MsgProp, + Entries: []pb.Entry{ + {Type: pb.EntryConfChange, Data: data}, + }, + }) +} + +// ApplyConfChange applies a config change to the local node. +func (rn *RawNode) ApplyConfChange(cc pb.ConfChange) *pb.ConfState { + if cc.NodeID == None { + return &pb.ConfState{Nodes: rn.raft.nodes(), Learners: rn.raft.learnerNodes()} + } + switch cc.Type { + case pb.ConfChangeAddNode: + rn.raft.addNode(cc.NodeID) + case pb.ConfChangeAddLearnerNode: + rn.raft.addLearner(cc.NodeID) + case pb.ConfChangeRemoveNode: + rn.raft.removeNode(cc.NodeID) + case pb.ConfChangeUpdateNode: + default: + panic("unexpected conf type") + } + return &pb.ConfState{Nodes: rn.raft.nodes(), Learners: rn.raft.learnerNodes()} +} + +// Step advances the state machine using the given message. +func (rn *RawNode) Step(m pb.Message) error { + // ignore unexpected local messages receiving over network + if IsLocalMsg(m.Type) { + return ErrStepLocalMsg + } + if pr := rn.raft.getProgress(m.From); pr != nil || !IsResponseMsg(m.Type) { + return rn.raft.Step(m) + } + return ErrStepPeerNotFound +} + +// Ready returns the current point-in-time state of this RawNode. +func (rn *RawNode) Ready() Ready { + rd := rn.newReady() + rn.raft.msgs = nil + return rd +} + +// HasReady called when RawNode user need to check if any Ready pending. +// Checking logic in this method should be consistent with Ready.containsUpdates(). +func (rn *RawNode) HasReady() bool { + r := rn.raft + if !r.softState().equal(rn.prevSoftSt) { + return true + } + if hardSt := r.hardState(); !IsEmptyHardState(hardSt) && !isHardStateEqual(hardSt, rn.prevHardSt) { + return true + } + if r.raftLog.unstable.snapshot != nil && !IsEmptySnap(*r.raftLog.unstable.snapshot) { + return true + } + if len(r.msgs) > 0 || len(r.raftLog.unstableEntries()) > 0 || r.raftLog.hasNextEnts() { + return true + } + if len(r.readStates) != 0 { + return true + } + return false +} + +// Advance notifies the RawNode that the application has applied and saved progress in the +// last Ready results. +func (rn *RawNode) Advance(rd Ready) { + rn.commitReady(rd) +} + +// Status returns the current status of the given group. +func (rn *RawNode) Status() *Status { + status := getStatus(rn.raft) + return &status +} + +// ReportUnreachable reports the given node is not reachable for the last send. +func (rn *RawNode) ReportUnreachable(id uint64) { + _ = rn.raft.Step(pb.Message{Type: pb.MsgUnreachable, From: id}) +} + +// ReportSnapshot reports the status of the sent snapshot. +func (rn *RawNode) ReportSnapshot(id uint64, status SnapshotStatus) { + rej := status == SnapshotFailure + + _ = rn.raft.Step(pb.Message{Type: pb.MsgSnapStatus, From: id, Reject: rej}) +} + +// TransferLeader tries to transfer leadership to the given transferee. +func (rn *RawNode) TransferLeader(transferee uint64) { + _ = rn.raft.Step(pb.Message{Type: pb.MsgTransferLeader, From: transferee}) +} + +// ReadIndex requests a read state. The read state will be set in ready. +// Read State has a read index. Once the application advances further than the read +// index, any linearizable read requests issued before the read request can be +// processed safely. The read state will have the same rctx attached. +func (rn *RawNode) ReadIndex(rctx []byte) { + _ = rn.raft.Step(pb.Message{Type: pb.MsgReadIndex, Entries: []pb.Entry{{Data: rctx}}}) +} diff --git a/vendor/github.com/coreos/etcd/raft/read_only.go b/vendor/github.com/coreos/etcd/raft/read_only.go new file mode 100644 index 00000000..ae746fa7 --- /dev/null +++ b/vendor/github.com/coreos/etcd/raft/read_only.go @@ -0,0 +1,118 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import pb "github.com/coreos/etcd/raft/raftpb" + +// ReadState provides state for read only query. +// It's caller's responsibility to call ReadIndex first before getting +// this state from ready, it's also caller's duty to differentiate if this +// state is what it requests through RequestCtx, eg. given a unique id as +// RequestCtx +type ReadState struct { + Index uint64 + RequestCtx []byte +} + +type readIndexStatus struct { + req pb.Message + index uint64 + acks map[uint64]struct{} +} + +type readOnly struct { + option ReadOnlyOption + pendingReadIndex map[string]*readIndexStatus + readIndexQueue []string +} + +func newReadOnly(option ReadOnlyOption) *readOnly { + return &readOnly{ + option: option, + pendingReadIndex: make(map[string]*readIndexStatus), + } +} + +// addRequest adds a read only reuqest into readonly struct. +// `index` is the commit index of the raft state machine when it received +// the read only request. +// `m` is the original read only request message from the local or remote node. +func (ro *readOnly) addRequest(index uint64, m pb.Message) { + ctx := string(m.Entries[0].Data) + if _, ok := ro.pendingReadIndex[ctx]; ok { + return + } + ro.pendingReadIndex[ctx] = &readIndexStatus{index: index, req: m, acks: make(map[uint64]struct{})} + ro.readIndexQueue = append(ro.readIndexQueue, ctx) +} + +// recvAck notifies the readonly struct that the raft state machine received +// an acknowledgment of the heartbeat that attached with the read only request +// context. +func (ro *readOnly) recvAck(m pb.Message) int { + rs, ok := ro.pendingReadIndex[string(m.Context)] + if !ok { + return 0 + } + + rs.acks[m.From] = struct{}{} + // add one to include an ack from local node + return len(rs.acks) + 1 +} + +// advance advances the read only request queue kept by the readonly struct. +// It dequeues the requests until it finds the read only request that has +// the same context as the given `m`. +func (ro *readOnly) advance(m pb.Message) []*readIndexStatus { + var ( + i int + found bool + ) + + ctx := string(m.Context) + rss := []*readIndexStatus{} + + for _, okctx := range ro.readIndexQueue { + i++ + rs, ok := ro.pendingReadIndex[okctx] + if !ok { + panic("cannot find corresponding read state from pending map") + } + rss = append(rss, rs) + if okctx == ctx { + found = true + break + } + } + + if found { + ro.readIndexQueue = ro.readIndexQueue[i:] + for _, rs := range rss { + delete(ro.pendingReadIndex, string(rs.req.Entries[0].Data)) + } + return rss + } + + return nil +} + +// lastPendingRequestCtx returns the context of the last pending read only +// request in readonly struct. +func (ro *readOnly) lastPendingRequestCtx() string { + if len(ro.readIndexQueue) == 0 { + return "" + } + return ro.readIndexQueue[len(ro.readIndexQueue)-1] +} diff --git a/vendor/github.com/coreos/etcd/raft/status.go b/vendor/github.com/coreos/etcd/raft/status.go new file mode 100644 index 00000000..f4d3d86a --- /dev/null +++ b/vendor/github.com/coreos/etcd/raft/status.go @@ -0,0 +1,88 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import ( + "fmt" + + pb "github.com/coreos/etcd/raft/raftpb" +) + +type Status struct { + ID uint64 + + pb.HardState + SoftState + + Applied uint64 + Progress map[uint64]Progress + + LeadTransferee uint64 +} + +// getStatus gets a copy of the current raft status. +func getStatus(r *raft) Status { + s := Status{ + ID: r.id, + LeadTransferee: r.leadTransferee, + } + + s.HardState = r.hardState() + s.SoftState = *r.softState() + + s.Applied = r.raftLog.applied + + if s.RaftState == StateLeader { + s.Progress = make(map[uint64]Progress) + for id, p := range r.prs { + s.Progress[id] = *p + } + + for id, p := range r.learnerPrs { + s.Progress[id] = *p + } + } + + return s +} + +// MarshalJSON translates the raft status into JSON. +// TODO: try to simplify this by introducing ID type into raft +func (s Status) MarshalJSON() ([]byte, error) { + j := fmt.Sprintf(`{"id":"%x","term":%d,"vote":"%x","commit":%d,"lead":"%x","raftState":%q,"applied":%d,"progress":{`, + s.ID, s.Term, s.Vote, s.Commit, s.Lead, s.RaftState, s.Applied) + + if len(s.Progress) == 0 { + j += "}," + } else { + for k, v := range s.Progress { + subj := fmt.Sprintf(`"%x":{"match":%d,"next":%d,"state":%q},`, k, v.Match, v.Next, v.State) + j += subj + } + // remove the trailing "," + j = j[:len(j)-1] + "}," + } + + j += fmt.Sprintf(`"leadtransferee":"%x"}`, s.LeadTransferee) + return []byte(j), nil +} + +func (s Status) String() string { + b, err := s.MarshalJSON() + if err != nil { + raftLogger.Panicf("unexpected error: %v", err) + } + return string(b) +} diff --git a/vendor/github.com/coreos/etcd/raft/storage.go b/vendor/github.com/coreos/etcd/raft/storage.go new file mode 100644 index 00000000..69c3a7d9 --- /dev/null +++ b/vendor/github.com/coreos/etcd/raft/storage.go @@ -0,0 +1,271 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import ( + "errors" + "sync" + + pb "github.com/coreos/etcd/raft/raftpb" +) + +// ErrCompacted is returned by Storage.Entries/Compact when a requested +// index is unavailable because it predates the last snapshot. +var ErrCompacted = errors.New("requested index is unavailable due to compaction") + +// ErrSnapOutOfDate is returned by Storage.CreateSnapshot when a requested +// index is older than the existing snapshot. +var ErrSnapOutOfDate = errors.New("requested index is older than the existing snapshot") + +// ErrUnavailable is returned by Storage interface when the requested log entries +// are unavailable. +var ErrUnavailable = errors.New("requested entry at index is unavailable") + +// ErrSnapshotTemporarilyUnavailable is returned by the Storage interface when the required +// snapshot is temporarily unavailable. +var ErrSnapshotTemporarilyUnavailable = errors.New("snapshot is temporarily unavailable") + +// Storage is an interface that may be implemented by the application +// to retrieve log entries from storage. +// +// If any Storage method returns an error, the raft instance will +// become inoperable and refuse to participate in elections; the +// application is responsible for cleanup and recovery in this case. +type Storage interface { + // InitialState returns the saved HardState and ConfState information. + InitialState() (pb.HardState, pb.ConfState, error) + // Entries returns a slice of log entries in the range [lo,hi). + // MaxSize limits the total size of the log entries returned, but + // Entries returns at least one entry if any. + Entries(lo, hi, maxSize uint64) ([]pb.Entry, error) + // Term returns the term of entry i, which must be in the range + // [FirstIndex()-1, LastIndex()]. The term of the entry before + // FirstIndex is retained for matching purposes even though the + // rest of that entry may not be available. + Term(i uint64) (uint64, error) + // LastIndex returns the index of the last entry in the log. + LastIndex() (uint64, error) + // FirstIndex returns the index of the first log entry that is + // possibly available via Entries (older entries have been incorporated + // into the latest Snapshot; if storage only contains the dummy entry the + // first log entry is not available). + FirstIndex() (uint64, error) + // Snapshot returns the most recent snapshot. + // If snapshot is temporarily unavailable, it should return ErrSnapshotTemporarilyUnavailable, + // so raft state machine could know that Storage needs some time to prepare + // snapshot and call Snapshot later. + Snapshot() (pb.Snapshot, error) +} + +// MemoryStorage implements the Storage interface backed by an +// in-memory array. +type MemoryStorage struct { + // Protects access to all fields. Most methods of MemoryStorage are + // run on the raft goroutine, but Append() is run on an application + // goroutine. + sync.Mutex + + hardState pb.HardState + snapshot pb.Snapshot + // ents[i] has raft log position i+snapshot.Metadata.Index + ents []pb.Entry +} + +// NewMemoryStorage creates an empty MemoryStorage. +func NewMemoryStorage() *MemoryStorage { + return &MemoryStorage{ + // When starting from scratch populate the list with a dummy entry at term zero. + ents: make([]pb.Entry, 1), + } +} + +// InitialState implements the Storage interface. +func (ms *MemoryStorage) InitialState() (pb.HardState, pb.ConfState, error) { + return ms.hardState, ms.snapshot.Metadata.ConfState, nil +} + +// SetHardState saves the current HardState. +func (ms *MemoryStorage) SetHardState(st pb.HardState) error { + ms.Lock() + defer ms.Unlock() + ms.hardState = st + return nil +} + +// Entries implements the Storage interface. +func (ms *MemoryStorage) Entries(lo, hi, maxSize uint64) ([]pb.Entry, error) { + ms.Lock() + defer ms.Unlock() + offset := ms.ents[0].Index + if lo <= offset { + return nil, ErrCompacted + } + if hi > ms.lastIndex()+1 { + raftLogger.Panicf("entries' hi(%d) is out of bound lastindex(%d)", hi, ms.lastIndex()) + } + // only contains dummy entries. + if len(ms.ents) == 1 { + return nil, ErrUnavailable + } + + ents := ms.ents[lo-offset : hi-offset] + return limitSize(ents, maxSize), nil +} + +// Term implements the Storage interface. +func (ms *MemoryStorage) Term(i uint64) (uint64, error) { + ms.Lock() + defer ms.Unlock() + offset := ms.ents[0].Index + if i < offset { + return 0, ErrCompacted + } + if int(i-offset) >= len(ms.ents) { + return 0, ErrUnavailable + } + return ms.ents[i-offset].Term, nil +} + +// LastIndex implements the Storage interface. +func (ms *MemoryStorage) LastIndex() (uint64, error) { + ms.Lock() + defer ms.Unlock() + return ms.lastIndex(), nil +} + +func (ms *MemoryStorage) lastIndex() uint64 { + return ms.ents[0].Index + uint64(len(ms.ents)) - 1 +} + +// FirstIndex implements the Storage interface. +func (ms *MemoryStorage) FirstIndex() (uint64, error) { + ms.Lock() + defer ms.Unlock() + return ms.firstIndex(), nil +} + +func (ms *MemoryStorage) firstIndex() uint64 { + return ms.ents[0].Index + 1 +} + +// Snapshot implements the Storage interface. +func (ms *MemoryStorage) Snapshot() (pb.Snapshot, error) { + ms.Lock() + defer ms.Unlock() + return ms.snapshot, nil +} + +// ApplySnapshot overwrites the contents of this Storage object with +// those of the given snapshot. +func (ms *MemoryStorage) ApplySnapshot(snap pb.Snapshot) error { + ms.Lock() + defer ms.Unlock() + + //handle check for old snapshot being applied + msIndex := ms.snapshot.Metadata.Index + snapIndex := snap.Metadata.Index + if msIndex >= snapIndex { + return ErrSnapOutOfDate + } + + ms.snapshot = snap + ms.ents = []pb.Entry{{Term: snap.Metadata.Term, Index: snap.Metadata.Index}} + return nil +} + +// CreateSnapshot makes a snapshot which can be retrieved with Snapshot() and +// can be used to reconstruct the state at that point. +// If any configuration changes have been made since the last compaction, +// the result of the last ApplyConfChange must be passed in. +func (ms *MemoryStorage) CreateSnapshot(i uint64, cs *pb.ConfState, data []byte) (pb.Snapshot, error) { + ms.Lock() + defer ms.Unlock() + if i <= ms.snapshot.Metadata.Index { + return pb.Snapshot{}, ErrSnapOutOfDate + } + + offset := ms.ents[0].Index + if i > ms.lastIndex() { + raftLogger.Panicf("snapshot %d is out of bound lastindex(%d)", i, ms.lastIndex()) + } + + ms.snapshot.Metadata.Index = i + ms.snapshot.Metadata.Term = ms.ents[i-offset].Term + if cs != nil { + ms.snapshot.Metadata.ConfState = *cs + } + ms.snapshot.Data = data + return ms.snapshot, nil +} + +// Compact discards all log entries prior to compactIndex. +// It is the application's responsibility to not attempt to compact an index +// greater than raftLog.applied. +func (ms *MemoryStorage) Compact(compactIndex uint64) error { + ms.Lock() + defer ms.Unlock() + offset := ms.ents[0].Index + if compactIndex <= offset { + return ErrCompacted + } + if compactIndex > ms.lastIndex() { + raftLogger.Panicf("compact %d is out of bound lastindex(%d)", compactIndex, ms.lastIndex()) + } + + i := compactIndex - offset + ents := make([]pb.Entry, 1, 1+uint64(len(ms.ents))-i) + ents[0].Index = ms.ents[i].Index + ents[0].Term = ms.ents[i].Term + ents = append(ents, ms.ents[i+1:]...) + ms.ents = ents + return nil +} + +// Append the new entries to storage. +// TODO (xiangli): ensure the entries are continuous and +// entries[0].Index > ms.entries[0].Index +func (ms *MemoryStorage) Append(entries []pb.Entry) error { + if len(entries) == 0 { + return nil + } + + ms.Lock() + defer ms.Unlock() + + first := ms.firstIndex() + last := entries[0].Index + uint64(len(entries)) - 1 + + // shortcut if there is no new entry. + if last < first { + return nil + } + // truncate compacted entries + if first > entries[0].Index { + entries = entries[first-entries[0].Index:] + } + + offset := entries[0].Index - ms.ents[0].Index + switch { + case uint64(len(ms.ents)) > offset: + ms.ents = append([]pb.Entry{}, ms.ents[:offset]...) + ms.ents = append(ms.ents, entries...) + case uint64(len(ms.ents)) == offset: + ms.ents = append(ms.ents, entries...) + default: + raftLogger.Panicf("missing log entry [last: %d, append at: %d]", + ms.lastIndex(), entries[0].Index) + } + return nil +} diff --git a/vendor/github.com/coreos/etcd/raft/util.go b/vendor/github.com/coreos/etcd/raft/util.go new file mode 100644 index 00000000..f4141fe6 --- /dev/null +++ b/vendor/github.com/coreos/etcd/raft/util.go @@ -0,0 +1,129 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import ( + "bytes" + "fmt" + + pb "github.com/coreos/etcd/raft/raftpb" +) + +func (st StateType) MarshalJSON() ([]byte, error) { + return []byte(fmt.Sprintf("%q", st.String())), nil +} + +// uint64Slice implements sort interface +type uint64Slice []uint64 + +func (p uint64Slice) Len() int { return len(p) } +func (p uint64Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func min(a, b uint64) uint64 { + if a > b { + return b + } + return a +} + +func max(a, b uint64) uint64 { + if a > b { + return a + } + return b +} + +func IsLocalMsg(msgt pb.MessageType) bool { + return msgt == pb.MsgHup || msgt == pb.MsgBeat || msgt == pb.MsgUnreachable || + msgt == pb.MsgSnapStatus || msgt == pb.MsgCheckQuorum +} + +func IsResponseMsg(msgt pb.MessageType) bool { + return msgt == pb.MsgAppResp || msgt == pb.MsgVoteResp || msgt == pb.MsgHeartbeatResp || msgt == pb.MsgUnreachable || msgt == pb.MsgPreVoteResp +} + +// voteResponseType maps vote and prevote message types to their corresponding responses. +func voteRespMsgType(msgt pb.MessageType) pb.MessageType { + switch msgt { + case pb.MsgVote: + return pb.MsgVoteResp + case pb.MsgPreVote: + return pb.MsgPreVoteResp + default: + panic(fmt.Sprintf("not a vote message: %s", msgt)) + } +} + +// EntryFormatter can be implemented by the application to provide human-readable formatting +// of entry data. Nil is a valid EntryFormatter and will use a default format. +type EntryFormatter func([]byte) string + +// DescribeMessage returns a concise human-readable description of a +// Message for debugging. +func DescribeMessage(m pb.Message, f EntryFormatter) string { + var buf bytes.Buffer + fmt.Fprintf(&buf, "%x->%x %v Term:%d Log:%d/%d", m.From, m.To, m.Type, m.Term, m.LogTerm, m.Index) + if m.Reject { + fmt.Fprintf(&buf, " Rejected") + if m.RejectHint != 0 { + fmt.Fprintf(&buf, "(Hint:%d)", m.RejectHint) + } + } + if m.Commit != 0 { + fmt.Fprintf(&buf, " Commit:%d", m.Commit) + } + if len(m.Entries) > 0 { + fmt.Fprintf(&buf, " Entries:[") + for i, e := range m.Entries { + if i != 0 { + buf.WriteString(", ") + } + buf.WriteString(DescribeEntry(e, f)) + } + fmt.Fprintf(&buf, "]") + } + if !IsEmptySnap(m.Snapshot) { + fmt.Fprintf(&buf, " Snapshot:%v", m.Snapshot) + } + return buf.String() +} + +// DescribeEntry returns a concise human-readable description of an +// Entry for debugging. +func DescribeEntry(e pb.Entry, f EntryFormatter) string { + var formatted string + if e.Type == pb.EntryNormal && f != nil { + formatted = f(e.Data) + } else { + formatted = fmt.Sprintf("%q", e.Data) + } + return fmt.Sprintf("%d/%d %s %s", e.Term, e.Index, e.Type, formatted) +} + +func limitSize(ents []pb.Entry, maxSize uint64) []pb.Entry { + if len(ents) == 0 { + return ents + } + size := ents[0].Size() + var limit int + for limit = 1; limit < len(ents); limit++ { + size += ents[limit].Size() + if uint64(size) > maxSize { + break + } + } + return ents[:limit] +} diff --git a/vendor/go.uber.org/atomic/LICENSE.txt b/vendor/go.uber.org/atomic/LICENSE.txt new file mode 100644 index 00000000..8765c9fb --- /dev/null +++ b/vendor/go.uber.org/atomic/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (c) 2016 Uber Technologies, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/go.uber.org/atomic/atomic.go b/vendor/go.uber.org/atomic/atomic.go new file mode 100644 index 00000000..1db6849f --- /dev/null +++ b/vendor/go.uber.org/atomic/atomic.go @@ -0,0 +1,351 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package atomic provides simple wrappers around numerics to enforce atomic +// access. +package atomic + +import ( + "math" + "sync/atomic" + "time" +) + +// Int32 is an atomic wrapper around an int32. +type Int32 struct{ v int32 } + +// NewInt32 creates an Int32. +func NewInt32(i int32) *Int32 { + return &Int32{i} +} + +// Load atomically loads the wrapped value. +func (i *Int32) Load() int32 { + return atomic.LoadInt32(&i.v) +} + +// Add atomically adds to the wrapped int32 and returns the new value. +func (i *Int32) Add(n int32) int32 { + return atomic.AddInt32(&i.v, n) +} + +// Sub atomically subtracts from the wrapped int32 and returns the new value. +func (i *Int32) Sub(n int32) int32 { + return atomic.AddInt32(&i.v, -n) +} + +// Inc atomically increments the wrapped int32 and returns the new value. +func (i *Int32) Inc() int32 { + return i.Add(1) +} + +// Dec atomically decrements the wrapped int32 and returns the new value. +func (i *Int32) Dec() int32 { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +func (i *Int32) CAS(old, new int32) bool { + return atomic.CompareAndSwapInt32(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Int32) Store(n int32) { + atomic.StoreInt32(&i.v, n) +} + +// Swap atomically swaps the wrapped int32 and returns the old value. +func (i *Int32) Swap(n int32) int32 { + return atomic.SwapInt32(&i.v, n) +} + +// Int64 is an atomic wrapper around an int64. +type Int64 struct{ v int64 } + +// NewInt64 creates an Int64. +func NewInt64(i int64) *Int64 { + return &Int64{i} +} + +// Load atomically loads the wrapped value. +func (i *Int64) Load() int64 { + return atomic.LoadInt64(&i.v) +} + +// Add atomically adds to the wrapped int64 and returns the new value. +func (i *Int64) Add(n int64) int64 { + return atomic.AddInt64(&i.v, n) +} + +// Sub atomically subtracts from the wrapped int64 and returns the new value. +func (i *Int64) Sub(n int64) int64 { + return atomic.AddInt64(&i.v, -n) +} + +// Inc atomically increments the wrapped int64 and returns the new value. +func (i *Int64) Inc() int64 { + return i.Add(1) +} + +// Dec atomically decrements the wrapped int64 and returns the new value. +func (i *Int64) Dec() int64 { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +func (i *Int64) CAS(old, new int64) bool { + return atomic.CompareAndSwapInt64(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Int64) Store(n int64) { + atomic.StoreInt64(&i.v, n) +} + +// Swap atomically swaps the wrapped int64 and returns the old value. +func (i *Int64) Swap(n int64) int64 { + return atomic.SwapInt64(&i.v, n) +} + +// Uint32 is an atomic wrapper around an uint32. +type Uint32 struct{ v uint32 } + +// NewUint32 creates a Uint32. +func NewUint32(i uint32) *Uint32 { + return &Uint32{i} +} + +// Load atomically loads the wrapped value. +func (i *Uint32) Load() uint32 { + return atomic.LoadUint32(&i.v) +} + +// Add atomically adds to the wrapped uint32 and returns the new value. +func (i *Uint32) Add(n uint32) uint32 { + return atomic.AddUint32(&i.v, n) +} + +// Sub atomically subtracts from the wrapped uint32 and returns the new value. +func (i *Uint32) Sub(n uint32) uint32 { + return atomic.AddUint32(&i.v, ^(n - 1)) +} + +// Inc atomically increments the wrapped uint32 and returns the new value. +func (i *Uint32) Inc() uint32 { + return i.Add(1) +} + +// Dec atomically decrements the wrapped int32 and returns the new value. +func (i *Uint32) Dec() uint32 { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +func (i *Uint32) CAS(old, new uint32) bool { + return atomic.CompareAndSwapUint32(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Uint32) Store(n uint32) { + atomic.StoreUint32(&i.v, n) +} + +// Swap atomically swaps the wrapped uint32 and returns the old value. +func (i *Uint32) Swap(n uint32) uint32 { + return atomic.SwapUint32(&i.v, n) +} + +// Uint64 is an atomic wrapper around a uint64. +type Uint64 struct{ v uint64 } + +// NewUint64 creates a Uint64. +func NewUint64(i uint64) *Uint64 { + return &Uint64{i} +} + +// Load atomically loads the wrapped value. +func (i *Uint64) Load() uint64 { + return atomic.LoadUint64(&i.v) +} + +// Add atomically adds to the wrapped uint64 and returns the new value. +func (i *Uint64) Add(n uint64) uint64 { + return atomic.AddUint64(&i.v, n) +} + +// Sub atomically subtracts from the wrapped uint64 and returns the new value. +func (i *Uint64) Sub(n uint64) uint64 { + return atomic.AddUint64(&i.v, ^(n - 1)) +} + +// Inc atomically increments the wrapped uint64 and returns the new value. +func (i *Uint64) Inc() uint64 { + return i.Add(1) +} + +// Dec atomically decrements the wrapped uint64 and returns the new value. +func (i *Uint64) Dec() uint64 { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +func (i *Uint64) CAS(old, new uint64) bool { + return atomic.CompareAndSwapUint64(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Uint64) Store(n uint64) { + atomic.StoreUint64(&i.v, n) +} + +// Swap atomically swaps the wrapped uint64 and returns the old value. +func (i *Uint64) Swap(n uint64) uint64 { + return atomic.SwapUint64(&i.v, n) +} + +// Bool is an atomic Boolean. +type Bool struct{ v uint32 } + +// NewBool creates a Bool. +func NewBool(initial bool) *Bool { + return &Bool{boolToInt(initial)} +} + +// Load atomically loads the Boolean. +func (b *Bool) Load() bool { + return truthy(atomic.LoadUint32(&b.v)) +} + +// CAS is an atomic compare-and-swap. +func (b *Bool) CAS(old, new bool) bool { + return atomic.CompareAndSwapUint32(&b.v, boolToInt(old), boolToInt(new)) +} + +// Store atomically stores the passed value. +func (b *Bool) Store(new bool) { + atomic.StoreUint32(&b.v, boolToInt(new)) +} + +// Swap sets the given value and returns the previous value. +func (b *Bool) Swap(new bool) bool { + return truthy(atomic.SwapUint32(&b.v, boolToInt(new))) +} + +// Toggle atomically negates the Boolean and returns the previous value. +func (b *Bool) Toggle() bool { + return truthy(atomic.AddUint32(&b.v, 1) - 1) +} + +func truthy(n uint32) bool { + return n&1 == 1 +} + +func boolToInt(b bool) uint32 { + if b { + return 1 + } + return 0 +} + +// Float64 is an atomic wrapper around float64. +type Float64 struct { + v uint64 +} + +// NewFloat64 creates a Float64. +func NewFloat64(f float64) *Float64 { + return &Float64{math.Float64bits(f)} +} + +// Load atomically loads the wrapped value. +func (f *Float64) Load() float64 { + return math.Float64frombits(atomic.LoadUint64(&f.v)) +} + +// Store atomically stores the passed value. +func (f *Float64) Store(s float64) { + atomic.StoreUint64(&f.v, math.Float64bits(s)) +} + +// Add atomically adds to the wrapped float64 and returns the new value. +func (f *Float64) Add(s float64) float64 { + for { + old := f.Load() + new := old + s + if f.CAS(old, new) { + return new + } + } +} + +// Sub atomically subtracts from the wrapped float64 and returns the new value. +func (f *Float64) Sub(s float64) float64 { + return f.Add(-s) +} + +// CAS is an atomic compare-and-swap. +func (f *Float64) CAS(old, new float64) bool { + return atomic.CompareAndSwapUint64(&f.v, math.Float64bits(old), math.Float64bits(new)) +} + +// Duration is an atomic wrapper around time.Duration +// https://godoc.org/time#Duration +type Duration struct { + v Int64 +} + +// NewDuration creates a Duration. +func NewDuration(d time.Duration) *Duration { + return &Duration{v: *NewInt64(int64(d))} +} + +// Load atomically loads the wrapped value. +func (d *Duration) Load() time.Duration { + return time.Duration(d.v.Load()) +} + +// Store atomically stores the passed value. +func (d *Duration) Store(n time.Duration) { + d.v.Store(int64(n)) +} + +// Add atomically adds to the wrapped time.Duration and returns the new value. +func (d *Duration) Add(n time.Duration) time.Duration { + return time.Duration(d.v.Add(int64(n))) +} + +// Sub atomically subtracts from the wrapped time.Duration and returns the new value. +func (d *Duration) Sub(n time.Duration) time.Duration { + return time.Duration(d.v.Sub(int64(n))) +} + +// Swap atomically swaps the wrapped time.Duration and returns the old value. +func (d *Duration) Swap(n time.Duration) time.Duration { + return time.Duration(d.v.Swap(int64(n))) +} + +// CAS is an atomic compare-and-swap. +func (d *Duration) CAS(old, new time.Duration) bool { + return d.v.CAS(int64(old), int64(new)) +} + +// Value shadows the type of the same name from sync/atomic +// https://godoc.org/sync/atomic#Value +type Value struct{ atomic.Value } diff --git a/vendor/go.uber.org/atomic/string.go b/vendor/go.uber.org/atomic/string.go new file mode 100644 index 00000000..ede8136f --- /dev/null +++ b/vendor/go.uber.org/atomic/string.go @@ -0,0 +1,49 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +// String is an atomic type-safe wrapper around Value for strings. +type String struct{ v Value } + +// NewString creates a String. +func NewString(str string) *String { + s := &String{} + if str != "" { + s.Store(str) + } + return s +} + +// Load atomically loads the wrapped string. +func (s *String) Load() string { + v := s.v.Load() + if v == nil { + return "" + } + return v.(string) +} + +// Store atomically stores the passed string. +// Note: Converting the string to an interface{} to store in the Value +// requires an allocation. +func (s *String) Store(str string) { + s.v.Store(str) +} diff --git a/vendor/go.uber.org/multierr/LICENSE.txt b/vendor/go.uber.org/multierr/LICENSE.txt new file mode 100644 index 00000000..858e0247 --- /dev/null +++ b/vendor/go.uber.org/multierr/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (c) 2017 Uber Technologies, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/go.uber.org/multierr/error.go b/vendor/go.uber.org/multierr/error.go new file mode 100644 index 00000000..de6ce473 --- /dev/null +++ b/vendor/go.uber.org/multierr/error.go @@ -0,0 +1,401 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package multierr allows combining one or more errors together. +// +// Overview +// +// Errors can be combined with the use of the Combine function. +// +// multierr.Combine( +// reader.Close(), +// writer.Close(), +// conn.Close(), +// ) +// +// If only two errors are being combined, the Append function may be used +// instead. +// +// err = multierr.Combine(reader.Close(), writer.Close()) +// +// This makes it possible to record resource cleanup failures from deferred +// blocks with the help of named return values. +// +// func sendRequest(req Request) (err error) { +// conn, err := openConnection() +// if err != nil { +// return err +// } +// defer func() { +// err = multierr.Append(err, conn.Close()) +// }() +// // ... +// } +// +// The underlying list of errors for a returned error object may be retrieved +// with the Errors function. +// +// errors := multierr.Errors(err) +// if len(errors) > 0 { +// fmt.Println("The following errors occurred:") +// } +// +// Advanced Usage +// +// Errors returned by Combine and Append MAY implement the following +// interface. +// +// type errorGroup interface { +// // Returns a slice containing the underlying list of errors. +// // +// // This slice MUST NOT be modified by the caller. +// Errors() []error +// } +// +// Note that if you need access to list of errors behind a multierr error, you +// should prefer using the Errors function. That said, if you need cheap +// read-only access to the underlying errors slice, you can attempt to cast +// the error to this interface. You MUST handle the failure case gracefully +// because errors returned by Combine and Append are not guaranteed to +// implement this interface. +// +// var errors []error +// group, ok := err.(errorGroup) +// if ok { +// errors = group.Errors() +// } else { +// errors = []error{err} +// } +package multierr // import "go.uber.org/multierr" + +import ( + "bytes" + "fmt" + "io" + "strings" + "sync" + + "go.uber.org/atomic" +) + +var ( + // Separator for single-line error messages. + _singlelineSeparator = []byte("; ") + + _newline = []byte("\n") + + // Prefix for multi-line messages + _multilinePrefix = []byte("the following errors occurred:") + + // Prefix for the first and following lines of an item in a list of + // multi-line error messages. + // + // For example, if a single item is: + // + // foo + // bar + // + // It will become, + // + // - foo + // bar + _multilineSeparator = []byte("\n - ") + _multilineIndent = []byte(" ") +) + +// _bufferPool is a pool of bytes.Buffers. +var _bufferPool = sync.Pool{ + New: func() interface{} { + return &bytes.Buffer{} + }, +} + +type errorGroup interface { + Errors() []error +} + +// Errors returns a slice containing zero or more errors that the supplied +// error is composed of. If the error is nil, the returned slice is empty. +// +// err := multierr.Append(r.Close(), w.Close()) +// errors := multierr.Errors(err) +// +// If the error is not composed of other errors, the returned slice contains +// just the error that was passed in. +// +// Callers of this function are free to modify the returned slice. +func Errors(err error) []error { + if err == nil { + return nil + } + + // Note that we're casting to multiError, not errorGroup. Our contract is + // that returned errors MAY implement errorGroup. Errors, however, only + // has special behavior for multierr-specific error objects. + // + // This behavior can be expanded in the future but I think it's prudent to + // start with as little as possible in terms of contract and possibility + // of misuse. + eg, ok := err.(*multiError) + if !ok { + return []error{err} + } + + errors := eg.Errors() + result := make([]error, len(errors)) + copy(result, errors) + return result +} + +// multiError is an error that holds one or more errors. +// +// An instance of this is guaranteed to be non-empty and flattened. That is, +// none of the errors inside multiError are other multiErrors. +// +// multiError formats to a semi-colon delimited list of error messages with +// %v and with a more readable multi-line format with %+v. +type multiError struct { + copyNeeded atomic.Bool + errors []error +} + +var _ errorGroup = (*multiError)(nil) + +// Errors returns the list of underlying errors. +// +// This slice MUST NOT be modified. +func (merr *multiError) Errors() []error { + if merr == nil { + return nil + } + return merr.errors +} + +func (merr *multiError) Error() string { + if merr == nil { + return "" + } + + buff := _bufferPool.Get().(*bytes.Buffer) + buff.Reset() + + merr.writeSingleline(buff) + + result := buff.String() + _bufferPool.Put(buff) + return result +} + +func (merr *multiError) Format(f fmt.State, c rune) { + if c == 'v' && f.Flag('+') { + merr.writeMultiline(f) + } else { + merr.writeSingleline(f) + } +} + +func (merr *multiError) writeSingleline(w io.Writer) { + first := true + for _, item := range merr.errors { + if first { + first = false + } else { + w.Write(_singlelineSeparator) + } + io.WriteString(w, item.Error()) + } +} + +func (merr *multiError) writeMultiline(w io.Writer) { + w.Write(_multilinePrefix) + for _, item := range merr.errors { + w.Write(_multilineSeparator) + writePrefixLine(w, _multilineIndent, fmt.Sprintf("%+v", item)) + } +} + +// Writes s to the writer with the given prefix added before each line after +// the first. +func writePrefixLine(w io.Writer, prefix []byte, s string) { + first := true + for len(s) > 0 { + if first { + first = false + } else { + w.Write(prefix) + } + + idx := strings.IndexByte(s, '\n') + if idx < 0 { + idx = len(s) - 1 + } + + io.WriteString(w, s[:idx+1]) + s = s[idx+1:] + } +} + +type inspectResult struct { + // Number of top-level non-nil errors + Count int + + // Total number of errors including multiErrors + Capacity int + + // Index of the first non-nil error in the list. Value is meaningless if + // Count is zero. + FirstErrorIdx int + + // Whether the list contains at least one multiError + ContainsMultiError bool +} + +// Inspects the given slice of errors so that we can efficiently allocate +// space for it. +func inspect(errors []error) (res inspectResult) { + first := true + for i, err := range errors { + if err == nil { + continue + } + + res.Count++ + if first { + first = false + res.FirstErrorIdx = i + } + + if merr, ok := err.(*multiError); ok { + res.Capacity += len(merr.errors) + res.ContainsMultiError = true + } else { + res.Capacity++ + } + } + return +} + +// fromSlice converts the given list of errors into a single error. +func fromSlice(errors []error) error { + res := inspect(errors) + switch res.Count { + case 0: + return nil + case 1: + // only one non-nil entry + return errors[res.FirstErrorIdx] + case len(errors): + if !res.ContainsMultiError { + // already flat + return &multiError{errors: errors} + } + } + + nonNilErrs := make([]error, 0, res.Capacity) + for _, err := range errors[res.FirstErrorIdx:] { + if err == nil { + continue + } + + if nested, ok := err.(*multiError); ok { + nonNilErrs = append(nonNilErrs, nested.errors...) + } else { + nonNilErrs = append(nonNilErrs, err) + } + } + + return &multiError{errors: nonNilErrs} +} + +// Combine combines the passed errors into a single error. +// +// If zero arguments were passed or if all items are nil, a nil error is +// returned. +// +// Combine(nil, nil) // == nil +// +// If only a single error was passed, it is returned as-is. +// +// Combine(err) // == err +// +// Combine skips over nil arguments so this function may be used to combine +// together errors from operations that fail independently of each other. +// +// multierr.Combine( +// reader.Close(), +// writer.Close(), +// pipe.Close(), +// ) +// +// If any of the passed errors is a multierr error, it will be flattened along +// with the other errors. +// +// multierr.Combine(multierr.Combine(err1, err2), err3) +// // is the same as +// multierr.Combine(err1, err2, err3) +// +// The returned error formats into a readable multi-line error message if +// formatted with %+v. +// +// fmt.Sprintf("%+v", multierr.Combine(err1, err2)) +func Combine(errors ...error) error { + return fromSlice(errors) +} + +// Append appends the given errors together. Either value may be nil. +// +// This function is a specialization of Combine for the common case where +// there are only two errors. +// +// err = multierr.Append(reader.Close(), writer.Close()) +// +// The following pattern may also be used to record failure of deferred +// operations without losing information about the original error. +// +// func doSomething(..) (err error) { +// f := acquireResource() +// defer func() { +// err = multierr.Append(err, f.Close()) +// }() +func Append(left error, right error) error { + switch { + case left == nil: + return right + case right == nil: + return left + } + + if _, ok := right.(*multiError); !ok { + if l, ok := left.(*multiError); ok && !l.copyNeeded.Swap(true) { + // Common case where the error on the left is constantly being + // appended to. + errs := append(l.errors, right) + return &multiError{errors: errs} + } else if !ok { + // Both errors are single errors. + return &multiError{errors: []error{left, right}} + } + } + + // Either right or both, left and right, are multiErrors. Rely on usual + // expensive logic. + errors := [2]error{left, right} + return fromSlice(errors[0:]) +} diff --git a/vendor/go.uber.org/zap/LICENSE.txt b/vendor/go.uber.org/zap/LICENSE.txt new file mode 100644 index 00000000..6652bed4 --- /dev/null +++ b/vendor/go.uber.org/zap/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (c) 2016-2017 Uber Technologies, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/go.uber.org/zap/array.go b/vendor/go.uber.org/zap/array.go new file mode 100644 index 00000000..5be3704a --- /dev/null +++ b/vendor/go.uber.org/zap/array.go @@ -0,0 +1,320 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "time" + + "go.uber.org/zap/zapcore" +) + +// Array constructs a field with the given key and ArrayMarshaler. It provides +// a flexible, but still type-safe and efficient, way to add array-like types +// to the logging context. The struct's MarshalLogArray method is called lazily. +func Array(key string, val zapcore.ArrayMarshaler) Field { + return Field{Key: key, Type: zapcore.ArrayMarshalerType, Interface: val} +} + +// Bools constructs a field that carries a slice of bools. +func Bools(key string, bs []bool) Field { + return Array(key, bools(bs)) +} + +// ByteStrings constructs a field that carries a slice of []byte, each of which +// must be UTF-8 encoded text. +func ByteStrings(key string, bss [][]byte) Field { + return Array(key, byteStringsArray(bss)) +} + +// Complex128s constructs a field that carries a slice of complex numbers. +func Complex128s(key string, nums []complex128) Field { + return Array(key, complex128s(nums)) +} + +// Complex64s constructs a field that carries a slice of complex numbers. +func Complex64s(key string, nums []complex64) Field { + return Array(key, complex64s(nums)) +} + +// Durations constructs a field that carries a slice of time.Durations. +func Durations(key string, ds []time.Duration) Field { + return Array(key, durations(ds)) +} + +// Float64s constructs a field that carries a slice of floats. +func Float64s(key string, nums []float64) Field { + return Array(key, float64s(nums)) +} + +// Float32s constructs a field that carries a slice of floats. +func Float32s(key string, nums []float32) Field { + return Array(key, float32s(nums)) +} + +// Ints constructs a field that carries a slice of integers. +func Ints(key string, nums []int) Field { + return Array(key, ints(nums)) +} + +// Int64s constructs a field that carries a slice of integers. +func Int64s(key string, nums []int64) Field { + return Array(key, int64s(nums)) +} + +// Int32s constructs a field that carries a slice of integers. +func Int32s(key string, nums []int32) Field { + return Array(key, int32s(nums)) +} + +// Int16s constructs a field that carries a slice of integers. +func Int16s(key string, nums []int16) Field { + return Array(key, int16s(nums)) +} + +// Int8s constructs a field that carries a slice of integers. +func Int8s(key string, nums []int8) Field { + return Array(key, int8s(nums)) +} + +// Strings constructs a field that carries a slice of strings. +func Strings(key string, ss []string) Field { + return Array(key, stringArray(ss)) +} + +// Times constructs a field that carries a slice of time.Times. +func Times(key string, ts []time.Time) Field { + return Array(key, times(ts)) +} + +// Uints constructs a field that carries a slice of unsigned integers. +func Uints(key string, nums []uint) Field { + return Array(key, uints(nums)) +} + +// Uint64s constructs a field that carries a slice of unsigned integers. +func Uint64s(key string, nums []uint64) Field { + return Array(key, uint64s(nums)) +} + +// Uint32s constructs a field that carries a slice of unsigned integers. +func Uint32s(key string, nums []uint32) Field { + return Array(key, uint32s(nums)) +} + +// Uint16s constructs a field that carries a slice of unsigned integers. +func Uint16s(key string, nums []uint16) Field { + return Array(key, uint16s(nums)) +} + +// Uint8s constructs a field that carries a slice of unsigned integers. +func Uint8s(key string, nums []uint8) Field { + return Array(key, uint8s(nums)) +} + +// Uintptrs constructs a field that carries a slice of pointer addresses. +func Uintptrs(key string, us []uintptr) Field { + return Array(key, uintptrs(us)) +} + +// Errors constructs a field that carries a slice of errors. +func Errors(key string, errs []error) Field { + return Array(key, errArray(errs)) +} + +type bools []bool + +func (bs bools) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range bs { + arr.AppendBool(bs[i]) + } + return nil +} + +type byteStringsArray [][]byte + +func (bss byteStringsArray) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range bss { + arr.AppendByteString(bss[i]) + } + return nil +} + +type complex128s []complex128 + +func (nums complex128s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendComplex128(nums[i]) + } + return nil +} + +type complex64s []complex64 + +func (nums complex64s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendComplex64(nums[i]) + } + return nil +} + +type durations []time.Duration + +func (ds durations) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range ds { + arr.AppendDuration(ds[i]) + } + return nil +} + +type float64s []float64 + +func (nums float64s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendFloat64(nums[i]) + } + return nil +} + +type float32s []float32 + +func (nums float32s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendFloat32(nums[i]) + } + return nil +} + +type ints []int + +func (nums ints) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendInt(nums[i]) + } + return nil +} + +type int64s []int64 + +func (nums int64s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendInt64(nums[i]) + } + return nil +} + +type int32s []int32 + +func (nums int32s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendInt32(nums[i]) + } + return nil +} + +type int16s []int16 + +func (nums int16s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendInt16(nums[i]) + } + return nil +} + +type int8s []int8 + +func (nums int8s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendInt8(nums[i]) + } + return nil +} + +type stringArray []string + +func (ss stringArray) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range ss { + arr.AppendString(ss[i]) + } + return nil +} + +type times []time.Time + +func (ts times) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range ts { + arr.AppendTime(ts[i]) + } + return nil +} + +type uints []uint + +func (nums uints) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendUint(nums[i]) + } + return nil +} + +type uint64s []uint64 + +func (nums uint64s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendUint64(nums[i]) + } + return nil +} + +type uint32s []uint32 + +func (nums uint32s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendUint32(nums[i]) + } + return nil +} + +type uint16s []uint16 + +func (nums uint16s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendUint16(nums[i]) + } + return nil +} + +type uint8s []uint8 + +func (nums uint8s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendUint8(nums[i]) + } + return nil +} + +type uintptrs []uintptr + +func (nums uintptrs) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendUintptr(nums[i]) + } + return nil +} diff --git a/vendor/go.uber.org/zap/buffer/buffer.go b/vendor/go.uber.org/zap/buffer/buffer.go new file mode 100644 index 00000000..d15f7fdb --- /dev/null +++ b/vendor/go.uber.org/zap/buffer/buffer.go @@ -0,0 +1,106 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package buffer provides a thin wrapper around a byte slice. Unlike the +// standard library's bytes.Buffer, it supports a portion of the strconv +// package's zero-allocation formatters. +package buffer // import "go.uber.org/zap/buffer" + +import "strconv" + +const _size = 1024 // by default, create 1 KiB buffers + +// Buffer is a thin wrapper around a byte slice. It's intended to be pooled, so +// the only way to construct one is via a Pool. +type Buffer struct { + bs []byte + pool Pool +} + +// AppendByte writes a single byte to the Buffer. +func (b *Buffer) AppendByte(v byte) { + b.bs = append(b.bs, v) +} + +// AppendString writes a string to the Buffer. +func (b *Buffer) AppendString(s string) { + b.bs = append(b.bs, s...) +} + +// AppendInt appends an integer to the underlying buffer (assuming base 10). +func (b *Buffer) AppendInt(i int64) { + b.bs = strconv.AppendInt(b.bs, i, 10) +} + +// AppendUint appends an unsigned integer to the underlying buffer (assuming +// base 10). +func (b *Buffer) AppendUint(i uint64) { + b.bs = strconv.AppendUint(b.bs, i, 10) +} + +// AppendBool appends a bool to the underlying buffer. +func (b *Buffer) AppendBool(v bool) { + b.bs = strconv.AppendBool(b.bs, v) +} + +// AppendFloat appends a float to the underlying buffer. It doesn't quote NaN +// or +/- Inf. +func (b *Buffer) AppendFloat(f float64, bitSize int) { + b.bs = strconv.AppendFloat(b.bs, f, 'f', -1, bitSize) +} + +// Len returns the length of the underlying byte slice. +func (b *Buffer) Len() int { + return len(b.bs) +} + +// Cap returns the capacity of the underlying byte slice. +func (b *Buffer) Cap() int { + return cap(b.bs) +} + +// Bytes returns a mutable reference to the underlying byte slice. +func (b *Buffer) Bytes() []byte { + return b.bs +} + +// String returns a string copy of the underlying byte slice. +func (b *Buffer) String() string { + return string(b.bs) +} + +// Reset resets the underlying byte slice. Subsequent writes re-use the slice's +// backing array. +func (b *Buffer) Reset() { + b.bs = b.bs[:0] +} + +// Write implements io.Writer. +func (b *Buffer) Write(bs []byte) (int, error) { + b.bs = append(b.bs, bs...) + return len(bs), nil +} + +// Free returns the Buffer to its Pool. +// +// Callers must not retain references to the Buffer after calling Free. +func (b *Buffer) Free() { + b.pool.put(b) +} diff --git a/vendor/go.uber.org/zap/buffer/pool.go b/vendor/go.uber.org/zap/buffer/pool.go new file mode 100644 index 00000000..8fb3e202 --- /dev/null +++ b/vendor/go.uber.org/zap/buffer/pool.go @@ -0,0 +1,49 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package buffer + +import "sync" + +// A Pool is a type-safe wrapper around a sync.Pool. +type Pool struct { + p *sync.Pool +} + +// NewPool constructs a new Pool. +func NewPool() Pool { + return Pool{p: &sync.Pool{ + New: func() interface{} { + return &Buffer{bs: make([]byte, 0, _size)} + }, + }} +} + +// Get retrieves a Buffer from the pool, creating one if necessary. +func (p Pool) Get() *Buffer { + buf := p.p.Get().(*Buffer) + buf.Reset() + buf.pool = p + return buf +} + +func (p Pool) put(buf *Buffer) { + p.p.Put(buf) +} diff --git a/vendor/go.uber.org/zap/config.go b/vendor/go.uber.org/zap/config.go new file mode 100644 index 00000000..dae13030 --- /dev/null +++ b/vendor/go.uber.org/zap/config.go @@ -0,0 +1,243 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "sort" + "time" + + "go.uber.org/zap/zapcore" +) + +// SamplingConfig sets a sampling strategy for the logger. Sampling caps the +// global CPU and I/O load that logging puts on your process while attempting +// to preserve a representative subset of your logs. +// +// Values configured here are per-second. See zapcore.NewSampler for details. +type SamplingConfig struct { + Initial int `json:"initial" yaml:"initial"` + Thereafter int `json:"thereafter" yaml:"thereafter"` +} + +// Config offers a declarative way to construct a logger. It doesn't do +// anything that can't be done with New, Options, and the various +// zapcore.WriteSyncer and zapcore.Core wrappers, but it's a simpler way to +// toggle common options. +// +// Note that Config intentionally supports only the most common options. More +// unusual logging setups (logging to network connections or message queues, +// splitting output between multiple files, etc.) are possible, but require +// direct use of the zapcore package. For sample code, see the package-level +// BasicConfiguration and AdvancedConfiguration examples. +// +// For an example showing runtime log level changes, see the documentation for +// AtomicLevel. +type Config struct { + // Level is the minimum enabled logging level. Note that this is a dynamic + // level, so calling Config.Level.SetLevel will atomically change the log + // level of all loggers descended from this config. + Level AtomicLevel `json:"level" yaml:"level"` + // Development puts the logger in development mode, which changes the + // behavior of DPanicLevel and takes stacktraces more liberally. + Development bool `json:"development" yaml:"development"` + // DisableCaller stops annotating logs with the calling function's file + // name and line number. By default, all logs are annotated. + DisableCaller bool `json:"disableCaller" yaml:"disableCaller"` + // DisableStacktrace completely disables automatic stacktrace capturing. By + // default, stacktraces are captured for WarnLevel and above logs in + // development and ErrorLevel and above in production. + DisableStacktrace bool `json:"disableStacktrace" yaml:"disableStacktrace"` + // Sampling sets a sampling policy. A nil SamplingConfig disables sampling. + Sampling *SamplingConfig `json:"sampling" yaml:"sampling"` + // Encoding sets the logger's encoding. Valid values are "json" and + // "console", as well as any third-party encodings registered via + // RegisterEncoder. + Encoding string `json:"encoding" yaml:"encoding"` + // EncoderConfig sets options for the chosen encoder. See + // zapcore.EncoderConfig for details. + EncoderConfig zapcore.EncoderConfig `json:"encoderConfig" yaml:"encoderConfig"` + // OutputPaths is a list of paths to write logging output to. See Open for + // details. + OutputPaths []string `json:"outputPaths" yaml:"outputPaths"` + // ErrorOutputPaths is a list of paths to write internal logger errors to. + // The default is standard error. + // + // Note that this setting only affects internal errors; for sample code that + // sends error-level logs to a different location from info- and debug-level + // logs, see the package-level AdvancedConfiguration example. + ErrorOutputPaths []string `json:"errorOutputPaths" yaml:"errorOutputPaths"` + // InitialFields is a collection of fields to add to the root logger. + InitialFields map[string]interface{} `json:"initialFields" yaml:"initialFields"` +} + +// NewProductionEncoderConfig returns an opinionated EncoderConfig for +// production environments. +func NewProductionEncoderConfig() zapcore.EncoderConfig { + return zapcore.EncoderConfig{ + TimeKey: "ts", + LevelKey: "level", + NameKey: "logger", + CallerKey: "caller", + MessageKey: "msg", + StacktraceKey: "stacktrace", + LineEnding: zapcore.DefaultLineEnding, + EncodeLevel: zapcore.LowercaseLevelEncoder, + EncodeTime: zapcore.EpochTimeEncoder, + EncodeDuration: zapcore.SecondsDurationEncoder, + EncodeCaller: zapcore.ShortCallerEncoder, + } +} + +// NewProductionConfig is a reasonable production logging configuration. +// Logging is enabled at InfoLevel and above. +// +// It uses a JSON encoder, writes to standard error, and enables sampling. +// Stacktraces are automatically included on logs of ErrorLevel and above. +func NewProductionConfig() Config { + return Config{ + Level: NewAtomicLevelAt(InfoLevel), + Development: false, + Sampling: &SamplingConfig{ + Initial: 100, + Thereafter: 100, + }, + Encoding: "json", + EncoderConfig: NewProductionEncoderConfig(), + OutputPaths: []string{"stderr"}, + ErrorOutputPaths: []string{"stderr"}, + } +} + +// NewDevelopmentEncoderConfig returns an opinionated EncoderConfig for +// development environments. +func NewDevelopmentEncoderConfig() zapcore.EncoderConfig { + return zapcore.EncoderConfig{ + // Keys can be anything except the empty string. + TimeKey: "T", + LevelKey: "L", + NameKey: "N", + CallerKey: "C", + MessageKey: "M", + StacktraceKey: "S", + LineEnding: zapcore.DefaultLineEnding, + EncodeLevel: zapcore.CapitalLevelEncoder, + EncodeTime: zapcore.ISO8601TimeEncoder, + EncodeDuration: zapcore.StringDurationEncoder, + EncodeCaller: zapcore.ShortCallerEncoder, + } +} + +// NewDevelopmentConfig is a reasonable development logging configuration. +// Logging is enabled at DebugLevel and above. +// +// It enables development mode (which makes DPanicLevel logs panic), uses a +// console encoder, writes to standard error, and disables sampling. +// Stacktraces are automatically included on logs of WarnLevel and above. +func NewDevelopmentConfig() Config { + return Config{ + Level: NewAtomicLevelAt(DebugLevel), + Development: true, + Encoding: "console", + EncoderConfig: NewDevelopmentEncoderConfig(), + OutputPaths: []string{"stderr"}, + ErrorOutputPaths: []string{"stderr"}, + } +} + +// Build constructs a logger from the Config and Options. +func (cfg Config) Build(opts ...Option) (*Logger, error) { + enc, err := cfg.buildEncoder() + if err != nil { + return nil, err + } + + sink, errSink, err := cfg.openSinks() + if err != nil { + return nil, err + } + + log := New( + zapcore.NewCore(enc, sink, cfg.Level), + cfg.buildOptions(errSink)..., + ) + if len(opts) > 0 { + log = log.WithOptions(opts...) + } + return log, nil +} + +func (cfg Config) buildOptions(errSink zapcore.WriteSyncer) []Option { + opts := []Option{ErrorOutput(errSink)} + + if cfg.Development { + opts = append(opts, Development()) + } + + if !cfg.DisableCaller { + opts = append(opts, AddCaller()) + } + + stackLevel := ErrorLevel + if cfg.Development { + stackLevel = WarnLevel + } + if !cfg.DisableStacktrace { + opts = append(opts, AddStacktrace(stackLevel)) + } + + if cfg.Sampling != nil { + opts = append(opts, WrapCore(func(core zapcore.Core) zapcore.Core { + return zapcore.NewSampler(core, time.Second, int(cfg.Sampling.Initial), int(cfg.Sampling.Thereafter)) + })) + } + + if len(cfg.InitialFields) > 0 { + fs := make([]Field, 0, len(cfg.InitialFields)) + keys := make([]string, 0, len(cfg.InitialFields)) + for k := range cfg.InitialFields { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + fs = append(fs, Any(k, cfg.InitialFields[k])) + } + opts = append(opts, Fields(fs...)) + } + + return opts +} + +func (cfg Config) openSinks() (zapcore.WriteSyncer, zapcore.WriteSyncer, error) { + sink, closeOut, err := Open(cfg.OutputPaths...) + if err != nil { + return nil, nil, err + } + errSink, _, err := Open(cfg.ErrorOutputPaths...) + if err != nil { + closeOut() + return nil, nil, err + } + return sink, errSink, nil +} + +func (cfg Config) buildEncoder() (zapcore.Encoder, error) { + return newEncoder(cfg.Encoding, cfg.EncoderConfig) +} diff --git a/vendor/go.uber.org/zap/doc.go b/vendor/go.uber.org/zap/doc.go new file mode 100644 index 00000000..3f16a8d4 --- /dev/null +++ b/vendor/go.uber.org/zap/doc.go @@ -0,0 +1,113 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package zap provides fast, structured, leveled logging. +// +// For applications that log in the hot path, reflection-based serialization +// and string formatting are prohibitively expensive - they're CPU-intensive +// and make many small allocations. Put differently, using json.Marshal and +// fmt.Fprintf to log tons of interface{} makes your application slow. +// +// Zap takes a different approach. It includes a reflection-free, +// zero-allocation JSON encoder, and the base Logger strives to avoid +// serialization overhead and allocations wherever possible. By building the +// high-level SugaredLogger on that foundation, zap lets users choose when +// they need to count every allocation and when they'd prefer a more familiar, +// loosely typed API. +// +// Choosing a Logger +// +// In contexts where performance is nice, but not critical, use the +// SugaredLogger. It's 4-10x faster than other structured logging packages and +// supports both structured and printf-style logging. Like log15 and go-kit, +// the SugaredLogger's structured logging APIs are loosely typed and accept a +// variadic number of key-value pairs. (For more advanced use cases, they also +// accept strongly typed fields - see the SugaredLogger.With documentation for +// details.) +// sugar := zap.NewExample().Sugar() +// defer sugar.Sync() +// sugar.Infow("failed to fetch URL", +// "url", "http://example.com", +// "attempt", 3, +// "backoff", time.Second, +// ) +// sugar.Printf("failed to fetch URL: %s", "http://example.com") +// +// By default, loggers are unbuffered. However, since zap's low-level APIs +// allow buffering, calling Sync before letting your process exit is a good +// habit. +// +// In the rare contexts where every microsecond and every allocation matter, +// use the Logger. It's even faster than the SugaredLogger and allocates far +// less, but it only supports strongly-typed, structured logging. +// logger := zap.NewExample() +// defer logger.Sync() +// logger.Info("failed to fetch URL", +// zap.String("url", "http://example.com"), +// zap.Int("attempt", 3), +// zap.Duration("backoff", time.Second), +// ) +// +// Choosing between the Logger and SugaredLogger doesn't need to be an +// application-wide decision: converting between the two is simple and +// inexpensive. +// logger := zap.NewExample() +// defer logger.Sync() +// sugar := logger.Sugar() +// plain := sugar.Desugar() +// +// Configuring Zap +// +// The simplest way to build a Logger is to use zap's opinionated presets: +// NewExample, NewProduction, and NewDevelopment. These presets build a logger +// with a single function call: +// logger, err := zap.NewProduction() +// if err != nil { +// log.Fatalf("can't initialize zap logger: %v", err) +// } +// defer logger.Sync() +// +// Presets are fine for small projects, but larger projects and organizations +// naturally require a bit more customization. For most users, zap's Config +// struct strikes the right balance between flexibility and convenience. See +// the package-level BasicConfiguration example for sample code. +// +// More unusual configurations (splitting output between files, sending logs +// to a message queue, etc.) are possible, but require direct use of +// go.uber.org/zap/zapcore. See the package-level AdvancedConfiguration +// example for sample code. +// +// Extending Zap +// +// The zap package itself is a relatively thin wrapper around the interfaces +// in go.uber.org/zap/zapcore. Extending zap to support a new encoding (e.g., +// BSON), a new log sink (e.g., Kafka), or something more exotic (perhaps an +// exception aggregation service, like Sentry or Rollbar) typically requires +// implementing the zapcore.Encoder, zapcore.WriteSyncer, or zapcore.Core +// interfaces. See the zapcore documentation for details. +// +// Similarly, package authors can use the high-performance Encoder and Core +// implementations in the zapcore package to build their own loggers. +// +// Frequently Asked Questions +// +// An FAQ covering everything from installation errors to design decisions is +// available at https://github.com/uber-go/zap/blob/master/FAQ.md. +package zap // import "go.uber.org/zap" diff --git a/vendor/go.uber.org/zap/encoder.go b/vendor/go.uber.org/zap/encoder.go new file mode 100644 index 00000000..2e9d3c34 --- /dev/null +++ b/vendor/go.uber.org/zap/encoder.go @@ -0,0 +1,75 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "errors" + "fmt" + "sync" + + "go.uber.org/zap/zapcore" +) + +var ( + errNoEncoderNameSpecified = errors.New("no encoder name specified") + + _encoderNameToConstructor = map[string]func(zapcore.EncoderConfig) (zapcore.Encoder, error){ + "console": func(encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) { + return zapcore.NewConsoleEncoder(encoderConfig), nil + }, + "json": func(encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) { + return zapcore.NewJSONEncoder(encoderConfig), nil + }, + } + _encoderMutex sync.RWMutex +) + +// RegisterEncoder registers an encoder constructor, which the Config struct +// can then reference. By default, the "json" and "console" encoders are +// registered. +// +// Attempting to register an encoder whose name is already taken returns an +// error. +func RegisterEncoder(name string, constructor func(zapcore.EncoderConfig) (zapcore.Encoder, error)) error { + _encoderMutex.Lock() + defer _encoderMutex.Unlock() + if name == "" { + return errNoEncoderNameSpecified + } + if _, ok := _encoderNameToConstructor[name]; ok { + return fmt.Errorf("encoder already registered for name %q", name) + } + _encoderNameToConstructor[name] = constructor + return nil +} + +func newEncoder(name string, encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) { + _encoderMutex.RLock() + defer _encoderMutex.RUnlock() + if name == "" { + return nil, errNoEncoderNameSpecified + } + constructor, ok := _encoderNameToConstructor[name] + if !ok { + return nil, fmt.Errorf("no encoder registered for name %q", name) + } + return constructor(encoderConfig) +} diff --git a/vendor/go.uber.org/zap/error.go b/vendor/go.uber.org/zap/error.go new file mode 100644 index 00000000..65982a51 --- /dev/null +++ b/vendor/go.uber.org/zap/error.go @@ -0,0 +1,80 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "sync" + + "go.uber.org/zap/zapcore" +) + +var _errArrayElemPool = sync.Pool{New: func() interface{} { + return &errArrayElem{} +}} + +// Error is shorthand for the common idiom NamedError("error", err). +func Error(err error) Field { + return NamedError("error", err) +} + +// NamedError constructs a field that lazily stores err.Error() under the +// provided key. Errors which also implement fmt.Formatter (like those produced +// by github.com/pkg/errors) will also have their verbose representation stored +// under key+"Verbose". If passed a nil error, the field is a no-op. +// +// For the common case in which the key is simply "error", the Error function +// is shorter and less repetitive. +func NamedError(key string, err error) Field { + if err == nil { + return Skip() + } + return Field{Key: key, Type: zapcore.ErrorType, Interface: err} +} + +type errArray []error + +func (errs errArray) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range errs { + if errs[i] == nil { + continue + } + // To represent each error as an object with an "error" attribute and + // potentially an "errorVerbose" attribute, we need to wrap it in a + // type that implements LogObjectMarshaler. To prevent this from + // allocating, pool the wrapper type. + elem := _errArrayElemPool.Get().(*errArrayElem) + elem.error = errs[i] + arr.AppendObject(elem) + elem.error = nil + _errArrayElemPool.Put(elem) + } + return nil +} + +type errArrayElem struct { + error +} + +func (e *errArrayElem) MarshalLogObject(enc zapcore.ObjectEncoder) error { + // Re-use the error field's logic, which supports non-standard error types. + Error(e.error).AddTo(enc) + return nil +} diff --git a/vendor/go.uber.org/zap/field.go b/vendor/go.uber.org/zap/field.go new file mode 100644 index 00000000..5130e134 --- /dev/null +++ b/vendor/go.uber.org/zap/field.go @@ -0,0 +1,310 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "fmt" + "math" + "time" + + "go.uber.org/zap/zapcore" +) + +// Field is an alias for Field. Aliasing this type dramatically +// improves the navigability of this package's API documentation. +type Field = zapcore.Field + +// Skip constructs a no-op field, which is often useful when handling invalid +// inputs in other Field constructors. +func Skip() Field { + return Field{Type: zapcore.SkipType} +} + +// Binary constructs a field that carries an opaque binary blob. +// +// Binary data is serialized in an encoding-appropriate format. For example, +// zap's JSON encoder base64-encodes binary blobs. To log UTF-8 encoded text, +// use ByteString. +func Binary(key string, val []byte) Field { + return Field{Key: key, Type: zapcore.BinaryType, Interface: val} +} + +// Bool constructs a field that carries a bool. +func Bool(key string, val bool) Field { + var ival int64 + if val { + ival = 1 + } + return Field{Key: key, Type: zapcore.BoolType, Integer: ival} +} + +// ByteString constructs a field that carries UTF-8 encoded text as a []byte. +// To log opaque binary blobs (which aren't necessarily valid UTF-8), use +// Binary. +func ByteString(key string, val []byte) Field { + return Field{Key: key, Type: zapcore.ByteStringType, Interface: val} +} + +// Complex128 constructs a field that carries a complex number. Unlike most +// numeric fields, this costs an allocation (to convert the complex128 to +// interface{}). +func Complex128(key string, val complex128) Field { + return Field{Key: key, Type: zapcore.Complex128Type, Interface: val} +} + +// Complex64 constructs a field that carries a complex number. Unlike most +// numeric fields, this costs an allocation (to convert the complex64 to +// interface{}). +func Complex64(key string, val complex64) Field { + return Field{Key: key, Type: zapcore.Complex64Type, Interface: val} +} + +// Float64 constructs a field that carries a float64. The way the +// floating-point value is represented is encoder-dependent, so marshaling is +// necessarily lazy. +func Float64(key string, val float64) Field { + return Field{Key: key, Type: zapcore.Float64Type, Integer: int64(math.Float64bits(val))} +} + +// Float32 constructs a field that carries a float32. The way the +// floating-point value is represented is encoder-dependent, so marshaling is +// necessarily lazy. +func Float32(key string, val float32) Field { + return Field{Key: key, Type: zapcore.Float32Type, Integer: int64(math.Float32bits(val))} +} + +// Int constructs a field with the given key and value. +func Int(key string, val int) Field { + return Int64(key, int64(val)) +} + +// Int64 constructs a field with the given key and value. +func Int64(key string, val int64) Field { + return Field{Key: key, Type: zapcore.Int64Type, Integer: val} +} + +// Int32 constructs a field with the given key and value. +func Int32(key string, val int32) Field { + return Field{Key: key, Type: zapcore.Int32Type, Integer: int64(val)} +} + +// Int16 constructs a field with the given key and value. +func Int16(key string, val int16) Field { + return Field{Key: key, Type: zapcore.Int16Type, Integer: int64(val)} +} + +// Int8 constructs a field with the given key and value. +func Int8(key string, val int8) Field { + return Field{Key: key, Type: zapcore.Int8Type, Integer: int64(val)} +} + +// String constructs a field with the given key and value. +func String(key string, val string) Field { + return Field{Key: key, Type: zapcore.StringType, String: val} +} + +// Uint constructs a field with the given key and value. +func Uint(key string, val uint) Field { + return Uint64(key, uint64(val)) +} + +// Uint64 constructs a field with the given key and value. +func Uint64(key string, val uint64) Field { + return Field{Key: key, Type: zapcore.Uint64Type, Integer: int64(val)} +} + +// Uint32 constructs a field with the given key and value. +func Uint32(key string, val uint32) Field { + return Field{Key: key, Type: zapcore.Uint32Type, Integer: int64(val)} +} + +// Uint16 constructs a field with the given key and value. +func Uint16(key string, val uint16) Field { + return Field{Key: key, Type: zapcore.Uint16Type, Integer: int64(val)} +} + +// Uint8 constructs a field with the given key and value. +func Uint8(key string, val uint8) Field { + return Field{Key: key, Type: zapcore.Uint8Type, Integer: int64(val)} +} + +// Uintptr constructs a field with the given key and value. +func Uintptr(key string, val uintptr) Field { + return Field{Key: key, Type: zapcore.UintptrType, Integer: int64(val)} +} + +// Reflect constructs a field with the given key and an arbitrary object. It uses +// an encoding-appropriate, reflection-based function to lazily serialize nearly +// any object into the logging context, but it's relatively slow and +// allocation-heavy. Outside tests, Any is always a better choice. +// +// If encoding fails (e.g., trying to serialize a map[int]string to JSON), Reflect +// includes the error message in the final log output. +func Reflect(key string, val interface{}) Field { + return Field{Key: key, Type: zapcore.ReflectType, Interface: val} +} + +// Namespace creates a named, isolated scope within the logger's context. All +// subsequent fields will be added to the new namespace. +// +// This helps prevent key collisions when injecting loggers into sub-components +// or third-party libraries. +func Namespace(key string) Field { + return Field{Key: key, Type: zapcore.NamespaceType} +} + +// Stringer constructs a field with the given key and the output of the value's +// String method. The Stringer's String method is called lazily. +func Stringer(key string, val fmt.Stringer) Field { + return Field{Key: key, Type: zapcore.StringerType, Interface: val} +} + +// Time constructs a Field with the given key and value. The encoder +// controls how the time is serialized. +func Time(key string, val time.Time) Field { + return Field{Key: key, Type: zapcore.TimeType, Integer: val.UnixNano(), Interface: val.Location()} +} + +// Stack constructs a field that stores a stacktrace of the current goroutine +// under provided key. Keep in mind that taking a stacktrace is eager and +// expensive (relatively speaking); this function both makes an allocation and +// takes about two microseconds. +func Stack(key string) Field { + // Returning the stacktrace as a string costs an allocation, but saves us + // from expanding the zapcore.Field union struct to include a byte slice. Since + // taking a stacktrace is already so expensive (~10us), the extra allocation + // is okay. + return String(key, takeStacktrace()) +} + +// Duration constructs a field with the given key and value. The encoder +// controls how the duration is serialized. +func Duration(key string, val time.Duration) Field { + return Field{Key: key, Type: zapcore.DurationType, Integer: int64(val)} +} + +// Object constructs a field with the given key and ObjectMarshaler. It +// provides a flexible, but still type-safe and efficient, way to add map- or +// struct-like user-defined types to the logging context. The struct's +// MarshalLogObject method is called lazily. +func Object(key string, val zapcore.ObjectMarshaler) Field { + return Field{Key: key, Type: zapcore.ObjectMarshalerType, Interface: val} +} + +// Any takes a key and an arbitrary value and chooses the best way to represent +// them as a field, falling back to a reflection-based approach only if +// necessary. +// +// Since byte/uint8 and rune/int32 are aliases, Any can't differentiate between +// them. To minimize surprises, []byte values are treated as binary blobs, byte +// values are treated as uint8, and runes are always treated as integers. +func Any(key string, value interface{}) Field { + switch val := value.(type) { + case zapcore.ObjectMarshaler: + return Object(key, val) + case zapcore.ArrayMarshaler: + return Array(key, val) + case bool: + return Bool(key, val) + case []bool: + return Bools(key, val) + case complex128: + return Complex128(key, val) + case []complex128: + return Complex128s(key, val) + case complex64: + return Complex64(key, val) + case []complex64: + return Complex64s(key, val) + case float64: + return Float64(key, val) + case []float64: + return Float64s(key, val) + case float32: + return Float32(key, val) + case []float32: + return Float32s(key, val) + case int: + return Int(key, val) + case []int: + return Ints(key, val) + case int64: + return Int64(key, val) + case []int64: + return Int64s(key, val) + case int32: + return Int32(key, val) + case []int32: + return Int32s(key, val) + case int16: + return Int16(key, val) + case []int16: + return Int16s(key, val) + case int8: + return Int8(key, val) + case []int8: + return Int8s(key, val) + case string: + return String(key, val) + case []string: + return Strings(key, val) + case uint: + return Uint(key, val) + case []uint: + return Uints(key, val) + case uint64: + return Uint64(key, val) + case []uint64: + return Uint64s(key, val) + case uint32: + return Uint32(key, val) + case []uint32: + return Uint32s(key, val) + case uint16: + return Uint16(key, val) + case []uint16: + return Uint16s(key, val) + case uint8: + return Uint8(key, val) + case []byte: + return Binary(key, val) + case uintptr: + return Uintptr(key, val) + case []uintptr: + return Uintptrs(key, val) + case time.Time: + return Time(key, val) + case []time.Time: + return Times(key, val) + case time.Duration: + return Duration(key, val) + case []time.Duration: + return Durations(key, val) + case error: + return NamedError(key, val) + case []error: + return Errors(key, val) + case fmt.Stringer: + return Stringer(key, val) + default: + return Reflect(key, val) + } +} diff --git a/vendor/go.uber.org/zap/flag.go b/vendor/go.uber.org/zap/flag.go new file mode 100644 index 00000000..13128750 --- /dev/null +++ b/vendor/go.uber.org/zap/flag.go @@ -0,0 +1,39 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "flag" + + "go.uber.org/zap/zapcore" +) + +// LevelFlag uses the standard library's flag.Var to declare a global flag +// with the specified name, default, and usage guidance. The returned value is +// a pointer to the value of the flag. +// +// If you don't want to use the flag package's global state, you can use any +// non-nil *Level as a flag.Value with your own *flag.FlagSet. +func LevelFlag(name string, defaultLevel zapcore.Level, usage string) *zapcore.Level { + lvl := defaultLevel + flag.Var(&lvl, name, usage) + return &lvl +} diff --git a/vendor/go.uber.org/zap/global.go b/vendor/go.uber.org/zap/global.go new file mode 100644 index 00000000..d02232e3 --- /dev/null +++ b/vendor/go.uber.org/zap/global.go @@ -0,0 +1,169 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "bytes" + "fmt" + "log" + "os" + "sync" + + "go.uber.org/zap/zapcore" +) + +const ( + _stdLogDefaultDepth = 2 + _loggerWriterDepth = 2 + _programmerErrorTemplate = "You've found a bug in zap! Please file a bug at " + + "https://github.com/uber-go/zap/issues/new and reference this error: %v" +) + +var ( + _globalMu sync.RWMutex + _globalL = NewNop() + _globalS = _globalL.Sugar() +) + +// L returns the global Logger, which can be reconfigured with ReplaceGlobals. +// It's safe for concurrent use. +func L() *Logger { + _globalMu.RLock() + l := _globalL + _globalMu.RUnlock() + return l +} + +// S returns the global SugaredLogger, which can be reconfigured with +// ReplaceGlobals. It's safe for concurrent use. +func S() *SugaredLogger { + _globalMu.RLock() + s := _globalS + _globalMu.RUnlock() + return s +} + +// ReplaceGlobals replaces the global Logger and SugaredLogger, and returns a +// function to restore the original values. It's safe for concurrent use. +func ReplaceGlobals(logger *Logger) func() { + _globalMu.Lock() + prev := _globalL + _globalL = logger + _globalS = logger.Sugar() + _globalMu.Unlock() + return func() { ReplaceGlobals(prev) } +} + +// NewStdLog returns a *log.Logger which writes to the supplied zap Logger at +// InfoLevel. To redirect the standard library's package-global logging +// functions, use RedirectStdLog instead. +func NewStdLog(l *Logger) *log.Logger { + logger := l.WithOptions(AddCallerSkip(_stdLogDefaultDepth + _loggerWriterDepth)) + f := logger.Info + return log.New(&loggerWriter{f}, "" /* prefix */, 0 /* flags */) +} + +// NewStdLogAt returns *log.Logger which writes to supplied zap logger at +// required level. +func NewStdLogAt(l *Logger, level zapcore.Level) (*log.Logger, error) { + logger := l.WithOptions(AddCallerSkip(_stdLogDefaultDepth + _loggerWriterDepth)) + logFunc, err := levelToFunc(logger, level) + if err != nil { + return nil, err + } + return log.New(&loggerWriter{logFunc}, "" /* prefix */, 0 /* flags */), nil +} + +// RedirectStdLog redirects output from the standard library's package-global +// logger to the supplied logger at InfoLevel. Since zap already handles caller +// annotations, timestamps, etc., it automatically disables the standard +// library's annotations and prefixing. +// +// It returns a function to restore the original prefix and flags and reset the +// standard library's output to os.Stderr. +func RedirectStdLog(l *Logger) func() { + f, err := redirectStdLogAt(l, InfoLevel) + if err != nil { + // Can't get here, since passing InfoLevel to redirectStdLogAt always + // works. + panic(fmt.Sprintf(_programmerErrorTemplate, err)) + } + return f +} + +// RedirectStdLogAt redirects output from the standard library's package-global +// logger to the supplied logger at the specified level. Since zap already +// handles caller annotations, timestamps, etc., it automatically disables the +// standard library's annotations and prefixing. +// +// It returns a function to restore the original prefix and flags and reset the +// standard library's output to os.Stderr. +func RedirectStdLogAt(l *Logger, level zapcore.Level) (func(), error) { + return redirectStdLogAt(l, level) +} + +func redirectStdLogAt(l *Logger, level zapcore.Level) (func(), error) { + flags := log.Flags() + prefix := log.Prefix() + log.SetFlags(0) + log.SetPrefix("") + logger := l.WithOptions(AddCallerSkip(_stdLogDefaultDepth + _loggerWriterDepth)) + logFunc, err := levelToFunc(logger, level) + if err != nil { + return nil, err + } + log.SetOutput(&loggerWriter{logFunc}) + return func() { + log.SetFlags(flags) + log.SetPrefix(prefix) + log.SetOutput(os.Stderr) + }, nil +} + +func levelToFunc(logger *Logger, lvl zapcore.Level) (func(string, ...Field), error) { + switch lvl { + case DebugLevel: + return logger.Debug, nil + case InfoLevel: + return logger.Info, nil + case WarnLevel: + return logger.Warn, nil + case ErrorLevel: + return logger.Error, nil + case DPanicLevel: + return logger.DPanic, nil + case PanicLevel: + return logger.Panic, nil + case FatalLevel: + return logger.Fatal, nil + } + return nil, fmt.Errorf("unrecognized level: %q", lvl) +} + +type loggerWriter struct { + logFunc func(msg string, fields ...Field) +} + +func (l *loggerWriter) Write(p []byte) (int, error) { + p = bytes.TrimSpace(p) + l.logFunc(string(p)) + return len(p), nil +} diff --git a/vendor/go.uber.org/zap/http_handler.go b/vendor/go.uber.org/zap/http_handler.go new file mode 100644 index 00000000..f171c384 --- /dev/null +++ b/vendor/go.uber.org/zap/http_handler.go @@ -0,0 +1,81 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "encoding/json" + "fmt" + "net/http" + + "go.uber.org/zap/zapcore" +) + +// ServeHTTP is a simple JSON endpoint that can report on or change the current +// logging level. +// +// GET requests return a JSON description of the current logging level. PUT +// requests change the logging level and expect a payload like: +// {"level":"info"} +// +// It's perfectly safe to change the logging level while a program is running. +func (lvl AtomicLevel) ServeHTTP(w http.ResponseWriter, r *http.Request) { + type errorResponse struct { + Error string `json:"error"` + } + type payload struct { + Level *zapcore.Level `json:"level"` + } + + enc := json.NewEncoder(w) + + switch r.Method { + + case "GET": + current := lvl.Level() + enc.Encode(payload{Level: ¤t}) + + case "PUT": + var req payload + + if errmess := func() string { + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return fmt.Sprintf("Request body must be well-formed JSON: %v", err) + } + if req.Level == nil { + return "Must specify a logging level." + } + return "" + }(); errmess != "" { + w.WriteHeader(http.StatusBadRequest) + enc.Encode(errorResponse{Error: errmess}) + return + } + + lvl.SetLevel(*req.Level) + enc.Encode(req) + + default: + w.WriteHeader(http.StatusMethodNotAllowed) + enc.Encode(errorResponse{ + Error: "Only GET and PUT are supported.", + }) + } +} diff --git a/vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go b/vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go new file mode 100644 index 00000000..dad583aa --- /dev/null +++ b/vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go @@ -0,0 +1,31 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package bufferpool houses zap's shared internal buffer pool. Third-party +// packages can recreate the same functionality with buffers.NewPool. +package bufferpool + +import "go.uber.org/zap/buffer" + +var ( + _pool = buffer.NewPool() + // Get retrieves a buffer from the pool, creating one if necessary. + Get = _pool.Get +) diff --git a/vendor/go.uber.org/zap/internal/color/color.go b/vendor/go.uber.org/zap/internal/color/color.go new file mode 100644 index 00000000..c4d5d02a --- /dev/null +++ b/vendor/go.uber.org/zap/internal/color/color.go @@ -0,0 +1,44 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package color adds coloring functionality for TTY output. +package color + +import "fmt" + +// Foreground colors. +const ( + Black Color = iota + 30 + Red + Green + Yellow + Blue + Magenta + Cyan + White +) + +// Color represents a text color. +type Color uint8 + +// Add adds the coloring to the given string. +func (c Color) Add(s string) string { + return fmt.Sprintf("\x1b[%dm%s\x1b[0m", uint8(c), s) +} diff --git a/vendor/go.uber.org/zap/internal/exit/exit.go b/vendor/go.uber.org/zap/internal/exit/exit.go new file mode 100644 index 00000000..dfc5b05f --- /dev/null +++ b/vendor/go.uber.org/zap/internal/exit/exit.go @@ -0,0 +1,64 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package exit provides stubs so that unit tests can exercise code that calls +// os.Exit(1). +package exit + +import "os" + +var real = func() { os.Exit(1) } + +// Exit normally terminates the process by calling os.Exit(1). If the package +// is stubbed, it instead records a call in the testing spy. +func Exit() { + real() +} + +// A StubbedExit is a testing fake for os.Exit. +type StubbedExit struct { + Exited bool + prev func() +} + +// Stub substitutes a fake for the call to os.Exit(1). +func Stub() *StubbedExit { + s := &StubbedExit{prev: real} + real = s.exit + return s +} + +// WithStub runs the supplied function with Exit stubbed. It returns the stub +// used, so that users can test whether the process would have crashed. +func WithStub(f func()) *StubbedExit { + s := Stub() + defer s.Unstub() + f() + return s +} + +// Unstub restores the previous exit function. +func (se *StubbedExit) Unstub() { + real = se.prev +} + +func (se *StubbedExit) exit() { + se.Exited = true +} diff --git a/vendor/go.uber.org/zap/level.go b/vendor/go.uber.org/zap/level.go new file mode 100644 index 00000000..3567a9a1 --- /dev/null +++ b/vendor/go.uber.org/zap/level.go @@ -0,0 +1,132 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "go.uber.org/atomic" + "go.uber.org/zap/zapcore" +) + +const ( + // DebugLevel logs are typically voluminous, and are usually disabled in + // production. + DebugLevel = zapcore.DebugLevel + // InfoLevel is the default logging priority. + InfoLevel = zapcore.InfoLevel + // WarnLevel logs are more important than Info, but don't need individual + // human review. + WarnLevel = zapcore.WarnLevel + // ErrorLevel logs are high-priority. If an application is running smoothly, + // it shouldn't generate any error-level logs. + ErrorLevel = zapcore.ErrorLevel + // DPanicLevel logs are particularly important errors. In development the + // logger panics after writing the message. + DPanicLevel = zapcore.DPanicLevel + // PanicLevel logs a message, then panics. + PanicLevel = zapcore.PanicLevel + // FatalLevel logs a message, then calls os.Exit(1). + FatalLevel = zapcore.FatalLevel +) + +// LevelEnablerFunc is a convenient way to implement zapcore.LevelEnabler with +// an anonymous function. +// +// It's particularly useful when splitting log output between different +// outputs (e.g., standard error and standard out). For sample code, see the +// package-level AdvancedConfiguration example. +type LevelEnablerFunc func(zapcore.Level) bool + +// Enabled calls the wrapped function. +func (f LevelEnablerFunc) Enabled(lvl zapcore.Level) bool { return f(lvl) } + +// An AtomicLevel is an atomically changeable, dynamic logging level. It lets +// you safely change the log level of a tree of loggers (the root logger and +// any children created by adding context) at runtime. +// +// The AtomicLevel itself is an http.Handler that serves a JSON endpoint to +// alter its level. +// +// AtomicLevels must be created with the NewAtomicLevel constructor to allocate +// their internal atomic pointer. +type AtomicLevel struct { + l *atomic.Int32 +} + +// NewAtomicLevel creates an AtomicLevel with InfoLevel and above logging +// enabled. +func NewAtomicLevel() AtomicLevel { + return AtomicLevel{ + l: atomic.NewInt32(int32(InfoLevel)), + } +} + +// NewAtomicLevelAt is a convenience function that creates an AtomicLevel +// and then calls SetLevel with the given level. +func NewAtomicLevelAt(l zapcore.Level) AtomicLevel { + a := NewAtomicLevel() + a.SetLevel(l) + return a +} + +// Enabled implements the zapcore.LevelEnabler interface, which allows the +// AtomicLevel to be used in place of traditional static levels. +func (lvl AtomicLevel) Enabled(l zapcore.Level) bool { + return lvl.Level().Enabled(l) +} + +// Level returns the minimum enabled log level. +func (lvl AtomicLevel) Level() zapcore.Level { + return zapcore.Level(int8(lvl.l.Load())) +} + +// SetLevel alters the logging level. +func (lvl AtomicLevel) SetLevel(l zapcore.Level) { + lvl.l.Store(int32(l)) +} + +// String returns the string representation of the underlying Level. +func (lvl AtomicLevel) String() string { + return lvl.Level().String() +} + +// UnmarshalText unmarshals the text to an AtomicLevel. It uses the same text +// representations as the static zapcore.Levels ("debug", "info", "warn", +// "error", "dpanic", "panic", and "fatal"). +func (lvl *AtomicLevel) UnmarshalText(text []byte) error { + if lvl.l == nil { + lvl.l = &atomic.Int32{} + } + + var l zapcore.Level + if err := l.UnmarshalText(text); err != nil { + return err + } + + lvl.SetLevel(l) + return nil +} + +// MarshalText marshals the AtomicLevel to a byte slice. It uses the same +// text representation as the static zapcore.Levels ("debug", "info", "warn", +// "error", "dpanic", "panic", and "fatal"). +func (lvl AtomicLevel) MarshalText() (text []byte, err error) { + return lvl.Level().MarshalText() +} diff --git a/vendor/go.uber.org/zap/logger.go b/vendor/go.uber.org/zap/logger.go new file mode 100644 index 00000000..dc8f6e3a --- /dev/null +++ b/vendor/go.uber.org/zap/logger.go @@ -0,0 +1,305 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "fmt" + "io/ioutil" + "os" + "runtime" + "strings" + "time" + + "go.uber.org/zap/zapcore" +) + +// A Logger provides fast, leveled, structured logging. All methods are safe +// for concurrent use. +// +// The Logger is designed for contexts in which every microsecond and every +// allocation matters, so its API intentionally favors performance and type +// safety over brevity. For most applications, the SugaredLogger strikes a +// better balance between performance and ergonomics. +type Logger struct { + core zapcore.Core + + development bool + name string + errorOutput zapcore.WriteSyncer + + addCaller bool + addStack zapcore.LevelEnabler + + callerSkip int +} + +// New constructs a new Logger from the provided zapcore.Core and Options. If +// the passed zapcore.Core is nil, it falls back to using a no-op +// implementation. +// +// This is the most flexible way to construct a Logger, but also the most +// verbose. For typical use cases, the highly-opinionated presets +// (NewProduction, NewDevelopment, and NewExample) or the Config struct are +// more convenient. +// +// For sample code, see the package-level AdvancedConfiguration example. +func New(core zapcore.Core, options ...Option) *Logger { + if core == nil { + return NewNop() + } + log := &Logger{ + core: core, + errorOutput: zapcore.Lock(os.Stderr), + addStack: zapcore.FatalLevel + 1, + } + return log.WithOptions(options...) +} + +// NewNop returns a no-op Logger. It never writes out logs or internal errors, +// and it never runs user-defined hooks. +// +// Using WithOptions to replace the Core or error output of a no-op Logger can +// re-enable logging. +func NewNop() *Logger { + return &Logger{ + core: zapcore.NewNopCore(), + errorOutput: zapcore.AddSync(ioutil.Discard), + addStack: zapcore.FatalLevel + 1, + } +} + +// NewProduction builds a sensible production Logger that writes InfoLevel and +// above logs to standard error as JSON. +// +// It's a shortcut for NewProductionConfig().Build(...Option). +func NewProduction(options ...Option) (*Logger, error) { + return NewProductionConfig().Build(options...) +} + +// NewDevelopment builds a development Logger that writes DebugLevel and above +// logs to standard error in a human-friendly format. +// +// It's a shortcut for NewDevelopmentConfig().Build(...Option). +func NewDevelopment(options ...Option) (*Logger, error) { + return NewDevelopmentConfig().Build(options...) +} + +// NewExample builds a Logger that's designed for use in zap's testable +// examples. It writes DebugLevel and above logs to standard out as JSON, but +// omits the timestamp and calling function to keep example output +// short and deterministic. +func NewExample(options ...Option) *Logger { + encoderCfg := zapcore.EncoderConfig{ + MessageKey: "msg", + LevelKey: "level", + NameKey: "logger", + EncodeLevel: zapcore.LowercaseLevelEncoder, + EncodeTime: zapcore.ISO8601TimeEncoder, + EncodeDuration: zapcore.StringDurationEncoder, + } + core := zapcore.NewCore(zapcore.NewJSONEncoder(encoderCfg), os.Stdout, DebugLevel) + return New(core).WithOptions(options...) +} + +// Sugar wraps the Logger to provide a more ergonomic, but slightly slower, +// API. Sugaring a Logger is quite inexpensive, so it's reasonable for a +// single application to use both Loggers and SugaredLoggers, converting +// between them on the boundaries of performance-sensitive code. +func (log *Logger) Sugar() *SugaredLogger { + core := log.clone() + core.callerSkip += 2 + return &SugaredLogger{core} +} + +// Named adds a new path segment to the logger's name. Segments are joined by +// periods. By default, Loggers are unnamed. +func (log *Logger) Named(s string) *Logger { + if s == "" { + return log + } + l := log.clone() + if log.name == "" { + l.name = s + } else { + l.name = strings.Join([]string{l.name, s}, ".") + } + return l +} + +// WithOptions clones the current Logger, applies the supplied Options, and +// returns the resulting Logger. It's safe to use concurrently. +func (log *Logger) WithOptions(opts ...Option) *Logger { + c := log.clone() + for _, opt := range opts { + opt.apply(c) + } + return c +} + +// With creates a child logger and adds structured context to it. Fields added +// to the child don't affect the parent, and vice versa. +func (log *Logger) With(fields ...Field) *Logger { + if len(fields) == 0 { + return log + } + l := log.clone() + l.core = l.core.With(fields) + return l +} + +// Check returns a CheckedEntry if logging a message at the specified level +// is enabled. It's a completely optional optimization; in high-performance +// applications, Check can help avoid allocating a slice to hold fields. +func (log *Logger) Check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { + return log.check(lvl, msg) +} + +// Debug logs a message at DebugLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +func (log *Logger) Debug(msg string, fields ...Field) { + if ce := log.check(DebugLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// Info logs a message at InfoLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +func (log *Logger) Info(msg string, fields ...Field) { + if ce := log.check(InfoLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// Warn logs a message at WarnLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +func (log *Logger) Warn(msg string, fields ...Field) { + if ce := log.check(WarnLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// Error logs a message at ErrorLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +func (log *Logger) Error(msg string, fields ...Field) { + if ce := log.check(ErrorLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// DPanic logs a message at DPanicLevel. The message includes any fields +// passed at the log site, as well as any fields accumulated on the logger. +// +// If the logger is in development mode, it then panics (DPanic means +// "development panic"). This is useful for catching errors that are +// recoverable, but shouldn't ever happen. +func (log *Logger) DPanic(msg string, fields ...Field) { + if ce := log.check(DPanicLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// Panic logs a message at PanicLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +// +// The logger then panics, even if logging at PanicLevel is disabled. +func (log *Logger) Panic(msg string, fields ...Field) { + if ce := log.check(PanicLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// Fatal logs a message at FatalLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +// +// The logger then calls os.Exit(1), even if logging at FatalLevel is +// disabled. +func (log *Logger) Fatal(msg string, fields ...Field) { + if ce := log.check(FatalLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// Sync calls the underlying Core's Sync method, flushing any buffered log +// entries. Applications should take care to call Sync before exiting. +func (log *Logger) Sync() error { + return log.core.Sync() +} + +// Core returns the Logger's underlying zapcore.Core. +func (log *Logger) Core() zapcore.Core { + return log.core +} + +func (log *Logger) clone() *Logger { + copy := *log + return © +} + +func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { + // check must always be called directly by a method in the Logger interface + // (e.g., Check, Info, Fatal). + const callerSkipOffset = 2 + + // Create basic checked entry thru the core; this will be non-nil if the + // log message will actually be written somewhere. + ent := zapcore.Entry{ + LoggerName: log.name, + Time: time.Now(), + Level: lvl, + Message: msg, + } + ce := log.core.Check(ent, nil) + willWrite := ce != nil + + // Set up any required terminal behavior. + switch ent.Level { + case zapcore.PanicLevel: + ce = ce.Should(ent, zapcore.WriteThenPanic) + case zapcore.FatalLevel: + ce = ce.Should(ent, zapcore.WriteThenFatal) + case zapcore.DPanicLevel: + if log.development { + ce = ce.Should(ent, zapcore.WriteThenPanic) + } + } + + // Only do further annotation if we're going to write this message; checked + // entries that exist only for terminal behavior don't benefit from + // annotation. + if !willWrite { + return ce + } + + // Thread the error output through to the CheckedEntry. + ce.ErrorOutput = log.errorOutput + if log.addCaller { + ce.Entry.Caller = zapcore.NewEntryCaller(runtime.Caller(log.callerSkip + callerSkipOffset)) + if !ce.Entry.Caller.Defined { + fmt.Fprintf(log.errorOutput, "%v Logger.check error: failed to get caller\n", time.Now().UTC()) + log.errorOutput.Sync() + } + } + if log.addStack.Enabled(ce.Entry.Level) { + ce.Entry.Stack = Stack("").String + } + + return ce +} diff --git a/vendor/go.uber.org/zap/options.go b/vendor/go.uber.org/zap/options.go new file mode 100644 index 00000000..7a6b0fca --- /dev/null +++ b/vendor/go.uber.org/zap/options.go @@ -0,0 +1,109 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import "go.uber.org/zap/zapcore" + +// An Option configures a Logger. +type Option interface { + apply(*Logger) +} + +// optionFunc wraps a func so it satisfies the Option interface. +type optionFunc func(*Logger) + +func (f optionFunc) apply(log *Logger) { + f(log) +} + +// WrapCore wraps or replaces the Logger's underlying zapcore.Core. +func WrapCore(f func(zapcore.Core) zapcore.Core) Option { + return optionFunc(func(log *Logger) { + log.core = f(log.core) + }) +} + +// Hooks registers functions which will be called each time the Logger writes +// out an Entry. Repeated use of Hooks is additive. +// +// Hooks are useful for simple side effects, like capturing metrics for the +// number of emitted logs. More complex side effects, including anything that +// requires access to the Entry's structured fields, should be implemented as +// a zapcore.Core instead. See zapcore.RegisterHooks for details. +func Hooks(hooks ...func(zapcore.Entry) error) Option { + return optionFunc(func(log *Logger) { + log.core = zapcore.RegisterHooks(log.core, hooks...) + }) +} + +// Fields adds fields to the Logger. +func Fields(fs ...Field) Option { + return optionFunc(func(log *Logger) { + log.core = log.core.With(fs) + }) +} + +// ErrorOutput sets the destination for errors generated by the Logger. Note +// that this option only affects internal errors; for sample code that sends +// error-level logs to a different location from info- and debug-level logs, +// see the package-level AdvancedConfiguration example. +// +// The supplied WriteSyncer must be safe for concurrent use. The Open and +// zapcore.Lock functions are the simplest ways to protect files with a mutex. +func ErrorOutput(w zapcore.WriteSyncer) Option { + return optionFunc(func(log *Logger) { + log.errorOutput = w + }) +} + +// Development puts the logger in development mode, which makes DPanic-level +// logs panic instead of simply logging an error. +func Development() Option { + return optionFunc(func(log *Logger) { + log.development = true + }) +} + +// AddCaller configures the Logger to annotate each message with the filename +// and line number of zap's caller. +func AddCaller() Option { + return optionFunc(func(log *Logger) { + log.addCaller = true + }) +} + +// AddCallerSkip increases the number of callers skipped by caller annotation +// (as enabled by the AddCaller option). When building wrappers around the +// Logger and SugaredLogger, supplying this Option prevents zap from always +// reporting the wrapper code as the caller. +func AddCallerSkip(skip int) Option { + return optionFunc(func(log *Logger) { + log.callerSkip += skip + }) +} + +// AddStacktrace configures the Logger to record a stack trace for all messages at +// or above a given level. +func AddStacktrace(lvl zapcore.LevelEnabler) Option { + return optionFunc(func(log *Logger) { + log.addStack = lvl + }) +} diff --git a/vendor/go.uber.org/zap/stacktrace.go b/vendor/go.uber.org/zap/stacktrace.go new file mode 100644 index 00000000..100fac21 --- /dev/null +++ b/vendor/go.uber.org/zap/stacktrace.go @@ -0,0 +1,126 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "runtime" + "strings" + "sync" + + "go.uber.org/zap/internal/bufferpool" +) + +const _zapPackage = "go.uber.org/zap" + +var ( + _stacktracePool = sync.Pool{ + New: func() interface{} { + return newProgramCounters(64) + }, + } + + // We add "." and "/" suffixes to the package name to ensure we only match + // the exact package and not any package with the same prefix. + _zapStacktracePrefixes = addPrefix(_zapPackage, ".", "/") + _zapStacktraceVendorContains = addPrefix("/vendor/", _zapStacktracePrefixes...) +) + +func takeStacktrace() string { + buffer := bufferpool.Get() + defer buffer.Free() + programCounters := _stacktracePool.Get().(*programCounters) + defer _stacktracePool.Put(programCounters) + + var numFrames int + for { + // Skip the call to runtime.Counters and takeStacktrace so that the + // program counters start at the caller of takeStacktrace. + numFrames = runtime.Callers(2, programCounters.pcs) + if numFrames < len(programCounters.pcs) { + break + } + // Don't put the too-short counter slice back into the pool; this lets + // the pool adjust if we consistently take deep stacktraces. + programCounters = newProgramCounters(len(programCounters.pcs) * 2) + } + + i := 0 + skipZapFrames := true // skip all consecutive zap frames at the beginning. + frames := runtime.CallersFrames(programCounters.pcs[:numFrames]) + + // Note: On the last iteration, frames.Next() returns false, with a valid + // frame, but we ignore this frame. The last frame is a a runtime frame which + // adds noise, since it's only either runtime.main or runtime.goexit. + for frame, more := frames.Next(); more; frame, more = frames.Next() { + if skipZapFrames && isZapFrame(frame.Function) { + continue + } else { + skipZapFrames = false + } + + if i != 0 { + buffer.AppendByte('\n') + } + i++ + buffer.AppendString(frame.Function) + buffer.AppendByte('\n') + buffer.AppendByte('\t') + buffer.AppendString(frame.File) + buffer.AppendByte(':') + buffer.AppendInt(int64(frame.Line)) + } + + return buffer.String() +} + +func isZapFrame(function string) bool { + for _, prefix := range _zapStacktracePrefixes { + if strings.HasPrefix(function, prefix) { + return true + } + } + + // We can't use a prefix match here since the location of the vendor + // directory affects the prefix. Instead we do a contains match. + for _, contains := range _zapStacktraceVendorContains { + if strings.Contains(function, contains) { + return true + } + } + + return false +} + +type programCounters struct { + pcs []uintptr +} + +func newProgramCounters(size int) *programCounters { + return &programCounters{make([]uintptr, size)} +} + +func addPrefix(prefix string, ss ...string) []string { + withPrefix := make([]string, len(ss)) + for i, s := range ss { + withPrefix[i] = prefix + s + } + return withPrefix +} diff --git a/vendor/go.uber.org/zap/sugar.go b/vendor/go.uber.org/zap/sugar.go new file mode 100644 index 00000000..77ca227f --- /dev/null +++ b/vendor/go.uber.org/zap/sugar.go @@ -0,0 +1,304 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "fmt" + + "go.uber.org/zap/zapcore" + + "go.uber.org/multierr" +) + +const ( + _oddNumberErrMsg = "Ignored key without a value." + _nonStringKeyErrMsg = "Ignored key-value pairs with non-string keys." +) + +// A SugaredLogger wraps the base Logger functionality in a slower, but less +// verbose, API. Any Logger can be converted to a SugaredLogger with its Sugar +// method. +// +// Unlike the Logger, the SugaredLogger doesn't insist on structured logging. +// For each log level, it exposes three methods: one for loosely-typed +// structured logging, one for println-style formatting, and one for +// printf-style formatting. For example, SugaredLoggers can produce InfoLevel +// output with Infow ("info with" structured context), Info, or Infof. +type SugaredLogger struct { + base *Logger +} + +// Desugar unwraps a SugaredLogger, exposing the original Logger. Desugaring +// is quite inexpensive, so it's reasonable for a single application to use +// both Loggers and SugaredLoggers, converting between them on the boundaries +// of performance-sensitive code. +func (s *SugaredLogger) Desugar() *Logger { + base := s.base.clone() + base.callerSkip -= 2 + return base +} + +// Named adds a sub-scope to the logger's name. See Logger.Named for details. +func (s *SugaredLogger) Named(name string) *SugaredLogger { + return &SugaredLogger{base: s.base.Named(name)} +} + +// With adds a variadic number of fields to the logging context. It accepts a +// mix of strongly-typed Field objects and loosely-typed key-value pairs. When +// processing pairs, the first element of the pair is used as the field key +// and the second as the field value. +// +// For example, +// sugaredLogger.With( +// "hello", "world", +// "failure", errors.New("oh no"), +// Stack(), +// "count", 42, +// "user", User{Name: "alice"}, +// ) +// is the equivalent of +// unsugared.With( +// String("hello", "world"), +// String("failure", "oh no"), +// Stack(), +// Int("count", 42), +// Object("user", User{Name: "alice"}), +// ) +// +// Note that the keys in key-value pairs should be strings. In development, +// passing a non-string key panics. In production, the logger is more +// forgiving: a separate error is logged, but the key-value pair is skipped +// and execution continues. Passing an orphaned key triggers similar behavior: +// panics in development and errors in production. +func (s *SugaredLogger) With(args ...interface{}) *SugaredLogger { + return &SugaredLogger{base: s.base.With(s.sweetenFields(args)...)} +} + +// Debug uses fmt.Sprint to construct and log a message. +func (s *SugaredLogger) Debug(args ...interface{}) { + s.log(DebugLevel, "", args, nil) +} + +// Info uses fmt.Sprint to construct and log a message. +func (s *SugaredLogger) Info(args ...interface{}) { + s.log(InfoLevel, "", args, nil) +} + +// Warn uses fmt.Sprint to construct and log a message. +func (s *SugaredLogger) Warn(args ...interface{}) { + s.log(WarnLevel, "", args, nil) +} + +// Error uses fmt.Sprint to construct and log a message. +func (s *SugaredLogger) Error(args ...interface{}) { + s.log(ErrorLevel, "", args, nil) +} + +// DPanic uses fmt.Sprint to construct and log a message. In development, the +// logger then panics. (See DPanicLevel for details.) +func (s *SugaredLogger) DPanic(args ...interface{}) { + s.log(DPanicLevel, "", args, nil) +} + +// Panic uses fmt.Sprint to construct and log a message, then panics. +func (s *SugaredLogger) Panic(args ...interface{}) { + s.log(PanicLevel, "", args, nil) +} + +// Fatal uses fmt.Sprint to construct and log a message, then calls os.Exit. +func (s *SugaredLogger) Fatal(args ...interface{}) { + s.log(FatalLevel, "", args, nil) +} + +// Debugf uses fmt.Sprintf to log a templated message. +func (s *SugaredLogger) Debugf(template string, args ...interface{}) { + s.log(DebugLevel, template, args, nil) +} + +// Infof uses fmt.Sprintf to log a templated message. +func (s *SugaredLogger) Infof(template string, args ...interface{}) { + s.log(InfoLevel, template, args, nil) +} + +// Warnf uses fmt.Sprintf to log a templated message. +func (s *SugaredLogger) Warnf(template string, args ...interface{}) { + s.log(WarnLevel, template, args, nil) +} + +// Errorf uses fmt.Sprintf to log a templated message. +func (s *SugaredLogger) Errorf(template string, args ...interface{}) { + s.log(ErrorLevel, template, args, nil) +} + +// DPanicf uses fmt.Sprintf to log a templated message. In development, the +// logger then panics. (See DPanicLevel for details.) +func (s *SugaredLogger) DPanicf(template string, args ...interface{}) { + s.log(DPanicLevel, template, args, nil) +} + +// Panicf uses fmt.Sprintf to log a templated message, then panics. +func (s *SugaredLogger) Panicf(template string, args ...interface{}) { + s.log(PanicLevel, template, args, nil) +} + +// Fatalf uses fmt.Sprintf to log a templated message, then calls os.Exit. +func (s *SugaredLogger) Fatalf(template string, args ...interface{}) { + s.log(FatalLevel, template, args, nil) +} + +// Debugw logs a message with some additional context. The variadic key-value +// pairs are treated as they are in With. +// +// When debug-level logging is disabled, this is much faster than +// s.With(keysAndValues).Debug(msg) +func (s *SugaredLogger) Debugw(msg string, keysAndValues ...interface{}) { + s.log(DebugLevel, msg, nil, keysAndValues) +} + +// Infow logs a message with some additional context. The variadic key-value +// pairs are treated as they are in With. +func (s *SugaredLogger) Infow(msg string, keysAndValues ...interface{}) { + s.log(InfoLevel, msg, nil, keysAndValues) +} + +// Warnw logs a message with some additional context. The variadic key-value +// pairs are treated as they are in With. +func (s *SugaredLogger) Warnw(msg string, keysAndValues ...interface{}) { + s.log(WarnLevel, msg, nil, keysAndValues) +} + +// Errorw logs a message with some additional context. The variadic key-value +// pairs are treated as they are in With. +func (s *SugaredLogger) Errorw(msg string, keysAndValues ...interface{}) { + s.log(ErrorLevel, msg, nil, keysAndValues) +} + +// DPanicw logs a message with some additional context. In development, the +// logger then panics. (See DPanicLevel for details.) The variadic key-value +// pairs are treated as they are in With. +func (s *SugaredLogger) DPanicw(msg string, keysAndValues ...interface{}) { + s.log(DPanicLevel, msg, nil, keysAndValues) +} + +// Panicw logs a message with some additional context, then panics. The +// variadic key-value pairs are treated as they are in With. +func (s *SugaredLogger) Panicw(msg string, keysAndValues ...interface{}) { + s.log(PanicLevel, msg, nil, keysAndValues) +} + +// Fatalw logs a message with some additional context, then calls os.Exit. The +// variadic key-value pairs are treated as they are in With. +func (s *SugaredLogger) Fatalw(msg string, keysAndValues ...interface{}) { + s.log(FatalLevel, msg, nil, keysAndValues) +} + +// Sync flushes any buffered log entries. +func (s *SugaredLogger) Sync() error { + return s.base.Sync() +} + +func (s *SugaredLogger) log(lvl zapcore.Level, template string, fmtArgs []interface{}, context []interface{}) { + // If logging at this level is completely disabled, skip the overhead of + // string formatting. + if lvl < DPanicLevel && !s.base.Core().Enabled(lvl) { + return + } + + // Format with Sprint, Sprintf, or neither. + msg := template + if msg == "" && len(fmtArgs) > 0 { + msg = fmt.Sprint(fmtArgs...) + } else if msg != "" && len(fmtArgs) > 0 { + msg = fmt.Sprintf(template, fmtArgs...) + } + + if ce := s.base.Check(lvl, msg); ce != nil { + ce.Write(s.sweetenFields(context)...) + } +} + +func (s *SugaredLogger) sweetenFields(args []interface{}) []Field { + if len(args) == 0 { + return nil + } + + // Allocate enough space for the worst case; if users pass only structured + // fields, we shouldn't penalize them with extra allocations. + fields := make([]Field, 0, len(args)) + var invalid invalidPairs + + for i := 0; i < len(args); { + // This is a strongly-typed field. Consume it and move on. + if f, ok := args[i].(Field); ok { + fields = append(fields, f) + i++ + continue + } + + // Make sure this element isn't a dangling key. + if i == len(args)-1 { + s.base.DPanic(_oddNumberErrMsg, Any("ignored", args[i])) + break + } + + // Consume this value and the next, treating them as a key-value pair. If the + // key isn't a string, add this pair to the slice of invalid pairs. + key, val := args[i], args[i+1] + if keyStr, ok := key.(string); !ok { + // Subsequent errors are likely, so allocate once up front. + if cap(invalid) == 0 { + invalid = make(invalidPairs, 0, len(args)/2) + } + invalid = append(invalid, invalidPair{i, key, val}) + } else { + fields = append(fields, Any(keyStr, val)) + } + i += 2 + } + + // If we encountered any invalid key-value pairs, log an error. + if len(invalid) > 0 { + s.base.DPanic(_nonStringKeyErrMsg, Array("invalid", invalid)) + } + return fields +} + +type invalidPair struct { + position int + key, value interface{} +} + +func (p invalidPair) MarshalLogObject(enc zapcore.ObjectEncoder) error { + enc.AddInt64("position", int64(p.position)) + Any("key", p.key).AddTo(enc) + Any("value", p.value).AddTo(enc) + return nil +} + +type invalidPairs []invalidPair + +func (ps invalidPairs) MarshalLogArray(enc zapcore.ArrayEncoder) error { + var err error + for i := range ps { + err = multierr.Append(err, enc.AppendObject(ps[i])) + } + return err +} diff --git a/vendor/go.uber.org/zap/time.go b/vendor/go.uber.org/zap/time.go new file mode 100644 index 00000000..c5a1f162 --- /dev/null +++ b/vendor/go.uber.org/zap/time.go @@ -0,0 +1,27 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import "time" + +func timeToMillis(t time.Time) int64 { + return t.UnixNano() / int64(time.Millisecond) +} diff --git a/vendor/go.uber.org/zap/writer.go b/vendor/go.uber.org/zap/writer.go new file mode 100644 index 00000000..16f55ce4 --- /dev/null +++ b/vendor/go.uber.org/zap/writer.go @@ -0,0 +1,96 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "io/ioutil" + "os" + + "go.uber.org/zap/zapcore" + + "go.uber.org/multierr" +) + +// Open is a high-level wrapper that takes a variadic number of paths, opens or +// creates each of the specified files, and combines them into a locked +// WriteSyncer. It also returns any error encountered and a function to close +// any opened files. +// +// Passing no paths returns a no-op WriteSyncer. The special paths "stdout" and +// "stderr" are interpreted as os.Stdout and os.Stderr, respectively. +func Open(paths ...string) (zapcore.WriteSyncer, func(), error) { + writers, close, err := open(paths) + if err != nil { + return nil, nil, err + } + + writer := CombineWriteSyncers(writers...) + return writer, close, nil +} + +func open(paths []string) ([]zapcore.WriteSyncer, func(), error) { + var openErr error + writers := make([]zapcore.WriteSyncer, 0, len(paths)) + files := make([]*os.File, 0, len(paths)) + close := func() { + for _, f := range files { + f.Close() + } + } + for _, path := range paths { + switch path { + case "stdout": + writers = append(writers, os.Stdout) + // Don't close standard out. + continue + case "stderr": + writers = append(writers, os.Stderr) + // Don't close standard error. + continue + } + f, err := os.OpenFile(path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644) + openErr = multierr.Append(openErr, err) + if err == nil { + writers = append(writers, f) + files = append(files, f) + } + } + + if openErr != nil { + close() + return writers, nil, openErr + } + + return writers, close, nil +} + +// CombineWriteSyncers is a utility that combines multiple WriteSyncers into a +// single, locked WriteSyncer. If no inputs are supplied, it returns a no-op +// WriteSyncer. +// +// It's provided purely as a convenience; the result is no different from +// using zapcore.NewMultiWriteSyncer and zapcore.Lock individually. +func CombineWriteSyncers(writers ...zapcore.WriteSyncer) zapcore.WriteSyncer { + if len(writers) == 0 { + return zapcore.AddSync(ioutil.Discard) + } + return zapcore.Lock(zapcore.NewMultiWriteSyncer(writers...)) +} diff --git a/vendor/go.uber.org/zap/zapcore/console_encoder.go b/vendor/go.uber.org/zap/zapcore/console_encoder.go new file mode 100644 index 00000000..b7875966 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/console_encoder.go @@ -0,0 +1,147 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "fmt" + "sync" + + "go.uber.org/zap/buffer" + "go.uber.org/zap/internal/bufferpool" +) + +var _sliceEncoderPool = sync.Pool{ + New: func() interface{} { + return &sliceArrayEncoder{elems: make([]interface{}, 0, 2)} + }, +} + +func getSliceEncoder() *sliceArrayEncoder { + return _sliceEncoderPool.Get().(*sliceArrayEncoder) +} + +func putSliceEncoder(e *sliceArrayEncoder) { + e.elems = e.elems[:0] + _sliceEncoderPool.Put(e) +} + +type consoleEncoder struct { + *jsonEncoder +} + +// NewConsoleEncoder creates an encoder whose output is designed for human - +// rather than machine - consumption. It serializes the core log entry data +// (message, level, timestamp, etc.) in a plain-text format and leaves the +// structured context as JSON. +// +// Note that although the console encoder doesn't use the keys specified in the +// encoder configuration, it will omit any element whose key is set to the empty +// string. +func NewConsoleEncoder(cfg EncoderConfig) Encoder { + return consoleEncoder{newJSONEncoder(cfg, true)} +} + +func (c consoleEncoder) Clone() Encoder { + return consoleEncoder{c.jsonEncoder.Clone().(*jsonEncoder)} +} + +func (c consoleEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, error) { + line := bufferpool.Get() + + // We don't want the entry's metadata to be quoted and escaped (if it's + // encoded as strings), which means that we can't use the JSON encoder. The + // simplest option is to use the memory encoder and fmt.Fprint. + // + // If this ever becomes a performance bottleneck, we can implement + // ArrayEncoder for our plain-text format. + arr := getSliceEncoder() + if c.TimeKey != "" && c.EncodeTime != nil { + c.EncodeTime(ent.Time, arr) + } + if c.LevelKey != "" && c.EncodeLevel != nil { + c.EncodeLevel(ent.Level, arr) + } + if ent.LoggerName != "" && c.NameKey != "" { + nameEncoder := c.EncodeName + + if nameEncoder == nil { + // Fall back to FullNameEncoder for backward compatibility. + nameEncoder = FullNameEncoder + } + + nameEncoder(ent.LoggerName, arr) + } + if ent.Caller.Defined && c.CallerKey != "" && c.EncodeCaller != nil { + c.EncodeCaller(ent.Caller, arr) + } + for i := range arr.elems { + if i > 0 { + line.AppendByte('\t') + } + fmt.Fprint(line, arr.elems[i]) + } + putSliceEncoder(arr) + + // Add the message itself. + if c.MessageKey != "" { + c.addTabIfNecessary(line) + line.AppendString(ent.Message) + } + + // Add any structured context. + c.writeContext(line, fields) + + // If there's no stacktrace key, honor that; this allows users to force + // single-line output. + if ent.Stack != "" && c.StacktraceKey != "" { + line.AppendByte('\n') + line.AppendString(ent.Stack) + } + + if c.LineEnding != "" { + line.AppendString(c.LineEnding) + } else { + line.AppendString(DefaultLineEnding) + } + return line, nil +} + +func (c consoleEncoder) writeContext(line *buffer.Buffer, extra []Field) { + context := c.jsonEncoder.Clone().(*jsonEncoder) + defer context.buf.Free() + + addFields(context, extra) + context.closeOpenNamespaces() + if context.buf.Len() == 0 { + return + } + + c.addTabIfNecessary(line) + line.AppendByte('{') + line.Write(context.buf.Bytes()) + line.AppendByte('}') +} + +func (c consoleEncoder) addTabIfNecessary(line *buffer.Buffer) { + if line.Len() > 0 { + line.AppendByte('\t') + } +} diff --git a/vendor/go.uber.org/zap/zapcore/core.go b/vendor/go.uber.org/zap/zapcore/core.go new file mode 100644 index 00000000..a1ef8b03 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/core.go @@ -0,0 +1,113 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +// Core is a minimal, fast logger interface. It's designed for library authors +// to wrap in a more user-friendly API. +type Core interface { + LevelEnabler + + // With adds structured context to the Core. + With([]Field) Core + // Check determines whether the supplied Entry should be logged (using the + // embedded LevelEnabler and possibly some extra logic). If the entry + // should be logged, the Core adds itself to the CheckedEntry and returns + // the result. + // + // Callers must use Check before calling Write. + Check(Entry, *CheckedEntry) *CheckedEntry + // Write serializes the Entry and any Fields supplied at the log site and + // writes them to their destination. + // + // If called, Write should always log the Entry and Fields; it should not + // replicate the logic of Check. + Write(Entry, []Field) error + // Sync flushes buffered logs (if any). + Sync() error +} + +type nopCore struct{} + +// NewNopCore returns a no-op Core. +func NewNopCore() Core { return nopCore{} } +func (nopCore) Enabled(Level) bool { return false } +func (n nopCore) With([]Field) Core { return n } +func (nopCore) Check(_ Entry, ce *CheckedEntry) *CheckedEntry { return ce } +func (nopCore) Write(Entry, []Field) error { return nil } +func (nopCore) Sync() error { return nil } + +// NewCore creates a Core that writes logs to a WriteSyncer. +func NewCore(enc Encoder, ws WriteSyncer, enab LevelEnabler) Core { + return &ioCore{ + LevelEnabler: enab, + enc: enc, + out: ws, + } +} + +type ioCore struct { + LevelEnabler + enc Encoder + out WriteSyncer +} + +func (c *ioCore) With(fields []Field) Core { + clone := c.clone() + addFields(clone.enc, fields) + return clone +} + +func (c *ioCore) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { + if c.Enabled(ent.Level) { + return ce.AddCore(ent, c) + } + return ce +} + +func (c *ioCore) Write(ent Entry, fields []Field) error { + buf, err := c.enc.EncodeEntry(ent, fields) + if err != nil { + return err + } + _, err = c.out.Write(buf.Bytes()) + buf.Free() + if err != nil { + return err + } + if ent.Level > ErrorLevel { + // Since we may be crashing the program, sync the output. Ignore Sync + // errors, pending a clean solution to issue #370. + c.Sync() + } + return nil +} + +func (c *ioCore) Sync() error { + return c.out.Sync() +} + +func (c *ioCore) clone() *ioCore { + return &ioCore{ + LevelEnabler: c.LevelEnabler, + enc: c.enc.Clone(), + out: c.out, + } +} diff --git a/vendor/go.uber.org/zap/zapcore/doc.go b/vendor/go.uber.org/zap/zapcore/doc.go new file mode 100644 index 00000000..31000e91 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/doc.go @@ -0,0 +1,24 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package zapcore defines and implements the low-level interfaces upon which +// zap is built. By providing alternate implementations of these interfaces, +// external packages can extend zap's capabilities. +package zapcore // import "go.uber.org/zap/zapcore" diff --git a/vendor/go.uber.org/zap/zapcore/encoder.go b/vendor/go.uber.org/zap/zapcore/encoder.go new file mode 100644 index 00000000..f0509522 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/encoder.go @@ -0,0 +1,348 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "time" + + "go.uber.org/zap/buffer" +) + +// DefaultLineEnding defines the default line ending when writing logs. +// Alternate line endings specified in EncoderConfig can override this +// behavior. +const DefaultLineEnding = "\n" + +// A LevelEncoder serializes a Level to a primitive type. +type LevelEncoder func(Level, PrimitiveArrayEncoder) + +// LowercaseLevelEncoder serializes a Level to a lowercase string. For example, +// InfoLevel is serialized to "info". +func LowercaseLevelEncoder(l Level, enc PrimitiveArrayEncoder) { + enc.AppendString(l.String()) +} + +// LowercaseColorLevelEncoder serializes a Level to a lowercase string and adds coloring. +// For example, InfoLevel is serialized to "info" and colored blue. +func LowercaseColorLevelEncoder(l Level, enc PrimitiveArrayEncoder) { + s, ok := _levelToLowercaseColorString[l] + if !ok { + s = _unknownLevelColor.Add(l.String()) + } + enc.AppendString(s) +} + +// CapitalLevelEncoder serializes a Level to an all-caps string. For example, +// InfoLevel is serialized to "INFO". +func CapitalLevelEncoder(l Level, enc PrimitiveArrayEncoder) { + enc.AppendString(l.CapitalString()) +} + +// CapitalColorLevelEncoder serializes a Level to an all-caps string and adds color. +// For example, InfoLevel is serialized to "INFO" and colored blue. +func CapitalColorLevelEncoder(l Level, enc PrimitiveArrayEncoder) { + s, ok := _levelToCapitalColorString[l] + if !ok { + s = _unknownLevelColor.Add(l.CapitalString()) + } + enc.AppendString(s) +} + +// UnmarshalText unmarshals text to a LevelEncoder. "capital" is unmarshaled to +// CapitalLevelEncoder, "coloredCapital" is unmarshaled to CapitalColorLevelEncoder, +// "colored" is unmarshaled to LowercaseColorLevelEncoder, and anything else +// is unmarshaled to LowercaseLevelEncoder. +func (e *LevelEncoder) UnmarshalText(text []byte) error { + switch string(text) { + case "capital": + *e = CapitalLevelEncoder + case "capitalColor": + *e = CapitalColorLevelEncoder + case "color": + *e = LowercaseColorLevelEncoder + default: + *e = LowercaseLevelEncoder + } + return nil +} + +// A TimeEncoder serializes a time.Time to a primitive type. +type TimeEncoder func(time.Time, PrimitiveArrayEncoder) + +// EpochTimeEncoder serializes a time.Time to a floating-point number of seconds +// since the Unix epoch. +func EpochTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { + nanos := t.UnixNano() + sec := float64(nanos) / float64(time.Second) + enc.AppendFloat64(sec) +} + +// EpochMillisTimeEncoder serializes a time.Time to a floating-point number of +// milliseconds since the Unix epoch. +func EpochMillisTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { + nanos := t.UnixNano() + millis := float64(nanos) / float64(time.Millisecond) + enc.AppendFloat64(millis) +} + +// EpochNanosTimeEncoder serializes a time.Time to an integer number of +// nanoseconds since the Unix epoch. +func EpochNanosTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { + enc.AppendInt64(t.UnixNano()) +} + +// ISO8601TimeEncoder serializes a time.Time to an ISO8601-formatted string +// with millisecond precision. +func ISO8601TimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { + enc.AppendString(t.Format("2006-01-02T15:04:05.000Z0700")) +} + +// UnmarshalText unmarshals text to a TimeEncoder. "iso8601" and "ISO8601" are +// unmarshaled to ISO8601TimeEncoder, "millis" is unmarshaled to +// EpochMillisTimeEncoder, and anything else is unmarshaled to EpochTimeEncoder. +func (e *TimeEncoder) UnmarshalText(text []byte) error { + switch string(text) { + case "iso8601", "ISO8601": + *e = ISO8601TimeEncoder + case "millis": + *e = EpochMillisTimeEncoder + case "nanos": + *e = EpochNanosTimeEncoder + default: + *e = EpochTimeEncoder + } + return nil +} + +// A DurationEncoder serializes a time.Duration to a primitive type. +type DurationEncoder func(time.Duration, PrimitiveArrayEncoder) + +// SecondsDurationEncoder serializes a time.Duration to a floating-point number of seconds elapsed. +func SecondsDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) { + enc.AppendFloat64(float64(d) / float64(time.Second)) +} + +// NanosDurationEncoder serializes a time.Duration to an integer number of +// nanoseconds elapsed. +func NanosDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) { + enc.AppendInt64(int64(d)) +} + +// StringDurationEncoder serializes a time.Duration using its built-in String +// method. +func StringDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) { + enc.AppendString(d.String()) +} + +// UnmarshalText unmarshals text to a DurationEncoder. "string" is unmarshaled +// to StringDurationEncoder, and anything else is unmarshaled to +// NanosDurationEncoder. +func (e *DurationEncoder) UnmarshalText(text []byte) error { + switch string(text) { + case "string": + *e = StringDurationEncoder + case "nanos": + *e = NanosDurationEncoder + default: + *e = SecondsDurationEncoder + } + return nil +} + +// A CallerEncoder serializes an EntryCaller to a primitive type. +type CallerEncoder func(EntryCaller, PrimitiveArrayEncoder) + +// FullCallerEncoder serializes a caller in /full/path/to/package/file:line +// format. +func FullCallerEncoder(caller EntryCaller, enc PrimitiveArrayEncoder) { + // TODO: consider using a byte-oriented API to save an allocation. + enc.AppendString(caller.String()) +} + +// ShortCallerEncoder serializes a caller in package/file:line format, trimming +// all but the final directory from the full path. +func ShortCallerEncoder(caller EntryCaller, enc PrimitiveArrayEncoder) { + // TODO: consider using a byte-oriented API to save an allocation. + enc.AppendString(caller.TrimmedPath()) +} + +// UnmarshalText unmarshals text to a CallerEncoder. "full" is unmarshaled to +// FullCallerEncoder and anything else is unmarshaled to ShortCallerEncoder. +func (e *CallerEncoder) UnmarshalText(text []byte) error { + switch string(text) { + case "full": + *e = FullCallerEncoder + default: + *e = ShortCallerEncoder + } + return nil +} + +// A NameEncoder serializes a period-separated logger name to a primitive +// type. +type NameEncoder func(string, PrimitiveArrayEncoder) + +// FullNameEncoder serializes the logger name as-is. +func FullNameEncoder(loggerName string, enc PrimitiveArrayEncoder) { + enc.AppendString(loggerName) +} + +// UnmarshalText unmarshals text to a NameEncoder. Currently, everything is +// unmarshaled to FullNameEncoder. +func (e *NameEncoder) UnmarshalText(text []byte) error { + switch string(text) { + case "full": + *e = FullNameEncoder + default: + *e = FullNameEncoder + } + return nil +} + +// An EncoderConfig allows users to configure the concrete encoders supplied by +// zapcore. +type EncoderConfig struct { + // Set the keys used for each log entry. If any key is empty, that portion + // of the entry is omitted. + MessageKey string `json:"messageKey" yaml:"messageKey"` + LevelKey string `json:"levelKey" yaml:"levelKey"` + TimeKey string `json:"timeKey" yaml:"timeKey"` + NameKey string `json:"nameKey" yaml:"nameKey"` + CallerKey string `json:"callerKey" yaml:"callerKey"` + StacktraceKey string `json:"stacktraceKey" yaml:"stacktraceKey"` + LineEnding string `json:"lineEnding" yaml:"lineEnding"` + // Configure the primitive representations of common complex types. For + // example, some users may want all time.Times serialized as floating-point + // seconds since epoch, while others may prefer ISO8601 strings. + EncodeLevel LevelEncoder `json:"levelEncoder" yaml:"levelEncoder"` + EncodeTime TimeEncoder `json:"timeEncoder" yaml:"timeEncoder"` + EncodeDuration DurationEncoder `json:"durationEncoder" yaml:"durationEncoder"` + EncodeCaller CallerEncoder `json:"callerEncoder" yaml:"callerEncoder"` + // Unlike the other primitive type encoders, EncodeName is optional. The + // zero value falls back to FullNameEncoder. + EncodeName NameEncoder `json:"nameEncoder" yaml:"nameEncoder"` +} + +// ObjectEncoder is a strongly-typed, encoding-agnostic interface for adding a +// map- or struct-like object to the logging context. Like maps, ObjectEncoders +// aren't safe for concurrent use (though typical use shouldn't require locks). +type ObjectEncoder interface { + // Logging-specific marshalers. + AddArray(key string, marshaler ArrayMarshaler) error + AddObject(key string, marshaler ObjectMarshaler) error + + // Built-in types. + AddBinary(key string, value []byte) // for arbitrary bytes + AddByteString(key string, value []byte) // for UTF-8 encoded bytes + AddBool(key string, value bool) + AddComplex128(key string, value complex128) + AddComplex64(key string, value complex64) + AddDuration(key string, value time.Duration) + AddFloat64(key string, value float64) + AddFloat32(key string, value float32) + AddInt(key string, value int) + AddInt64(key string, value int64) + AddInt32(key string, value int32) + AddInt16(key string, value int16) + AddInt8(key string, value int8) + AddString(key, value string) + AddTime(key string, value time.Time) + AddUint(key string, value uint) + AddUint64(key string, value uint64) + AddUint32(key string, value uint32) + AddUint16(key string, value uint16) + AddUint8(key string, value uint8) + AddUintptr(key string, value uintptr) + + // AddReflected uses reflection to serialize arbitrary objects, so it's slow + // and allocation-heavy. + AddReflected(key string, value interface{}) error + // OpenNamespace opens an isolated namespace where all subsequent fields will + // be added. Applications can use namespaces to prevent key collisions when + // injecting loggers into sub-components or third-party libraries. + OpenNamespace(key string) +} + +// ArrayEncoder is a strongly-typed, encoding-agnostic interface for adding +// array-like objects to the logging context. Of note, it supports mixed-type +// arrays even though they aren't typical in Go. Like slices, ArrayEncoders +// aren't safe for concurrent use (though typical use shouldn't require locks). +type ArrayEncoder interface { + // Built-in types. + PrimitiveArrayEncoder + + // Time-related types. + AppendDuration(time.Duration) + AppendTime(time.Time) + + // Logging-specific marshalers. + AppendArray(ArrayMarshaler) error + AppendObject(ObjectMarshaler) error + + // AppendReflected uses reflection to serialize arbitrary objects, so it's + // slow and allocation-heavy. + AppendReflected(value interface{}) error +} + +// PrimitiveArrayEncoder is the subset of the ArrayEncoder interface that deals +// only in Go's built-in types. It's included only so that Duration- and +// TimeEncoders cannot trigger infinite recursion. +type PrimitiveArrayEncoder interface { + // Built-in types. + AppendBool(bool) + AppendByteString([]byte) // for UTF-8 encoded bytes + AppendComplex128(complex128) + AppendComplex64(complex64) + AppendFloat64(float64) + AppendFloat32(float32) + AppendInt(int) + AppendInt64(int64) + AppendInt32(int32) + AppendInt16(int16) + AppendInt8(int8) + AppendString(string) + AppendUint(uint) + AppendUint64(uint64) + AppendUint32(uint32) + AppendUint16(uint16) + AppendUint8(uint8) + AppendUintptr(uintptr) +} + +// Encoder is a format-agnostic interface for all log entry marshalers. Since +// log encoders don't need to support the same wide range of use cases as +// general-purpose marshalers, it's possible to make them faster and +// lower-allocation. +// +// Implementations of the ObjectEncoder interface's methods can, of course, +// freely modify the receiver. However, the Clone and EncodeEntry methods will +// be called concurrently and shouldn't modify the receiver. +type Encoder interface { + ObjectEncoder + + // Clone copies the encoder, ensuring that adding fields to the copy doesn't + // affect the original. + Clone() Encoder + + // EncodeEntry encodes an entry and fields, along with any accumulated + // context, into a byte buffer and returns it. + EncodeEntry(Entry, []Field) (*buffer.Buffer, error) +} diff --git a/vendor/go.uber.org/zap/zapcore/entry.go b/vendor/go.uber.org/zap/zapcore/entry.go new file mode 100644 index 00000000..7d9893f3 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/entry.go @@ -0,0 +1,257 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "fmt" + "strings" + "sync" + "time" + + "go.uber.org/zap/internal/bufferpool" + "go.uber.org/zap/internal/exit" + + "go.uber.org/multierr" +) + +var ( + _cePool = sync.Pool{New: func() interface{} { + // Pre-allocate some space for cores. + return &CheckedEntry{ + cores: make([]Core, 4), + } + }} +) + +func getCheckedEntry() *CheckedEntry { + ce := _cePool.Get().(*CheckedEntry) + ce.reset() + return ce +} + +func putCheckedEntry(ce *CheckedEntry) { + if ce == nil { + return + } + _cePool.Put(ce) +} + +// NewEntryCaller makes an EntryCaller from the return signature of +// runtime.Caller. +func NewEntryCaller(pc uintptr, file string, line int, ok bool) EntryCaller { + if !ok { + return EntryCaller{} + } + return EntryCaller{ + PC: pc, + File: file, + Line: line, + Defined: true, + } +} + +// EntryCaller represents the caller of a logging function. +type EntryCaller struct { + Defined bool + PC uintptr + File string + Line int +} + +// String returns the full path and line number of the caller. +func (ec EntryCaller) String() string { + return ec.FullPath() +} + +// FullPath returns a /full/path/to/package/file:line description of the +// caller. +func (ec EntryCaller) FullPath() string { + if !ec.Defined { + return "undefined" + } + buf := bufferpool.Get() + buf.AppendString(ec.File) + buf.AppendByte(':') + buf.AppendInt(int64(ec.Line)) + caller := buf.String() + buf.Free() + return caller +} + +// TrimmedPath returns a package/file:line description of the caller, +// preserving only the leaf directory name and file name. +func (ec EntryCaller) TrimmedPath() string { + if !ec.Defined { + return "undefined" + } + // nb. To make sure we trim the path correctly on Windows too, we + // counter-intuitively need to use '/' and *not* os.PathSeparator here, + // because the path given originates from Go stdlib, specifically + // runtime.Caller() which (as of Mar/17) returns forward slashes even on + // Windows. + // + // See https://github.com/golang/go/issues/3335 + // and https://github.com/golang/go/issues/18151 + // + // for discussion on the issue on Go side. + // + // Find the last separator. + // + idx := strings.LastIndexByte(ec.File, '/') + if idx == -1 { + return ec.FullPath() + } + // Find the penultimate separator. + idx = strings.LastIndexByte(ec.File[:idx], '/') + if idx == -1 { + return ec.FullPath() + } + buf := bufferpool.Get() + // Keep everything after the penultimate separator. + buf.AppendString(ec.File[idx+1:]) + buf.AppendByte(':') + buf.AppendInt(int64(ec.Line)) + caller := buf.String() + buf.Free() + return caller +} + +// An Entry represents a complete log message. The entry's structured context +// is already serialized, but the log level, time, message, and call site +// information are available for inspection and modification. +// +// Entries are pooled, so any functions that accept them MUST be careful not to +// retain references to them. +type Entry struct { + Level Level + Time time.Time + LoggerName string + Message string + Caller EntryCaller + Stack string +} + +// CheckWriteAction indicates what action to take after a log entry is +// processed. Actions are ordered in increasing severity. +type CheckWriteAction uint8 + +const ( + // WriteThenNoop indicates that nothing special needs to be done. It's the + // default behavior. + WriteThenNoop CheckWriteAction = iota + // WriteThenPanic causes a panic after Write. + WriteThenPanic + // WriteThenFatal causes a fatal os.Exit after Write. + WriteThenFatal +) + +// CheckedEntry is an Entry together with a collection of Cores that have +// already agreed to log it. +// +// CheckedEntry references should be created by calling AddCore or Should on a +// nil *CheckedEntry. References are returned to a pool after Write, and MUST +// NOT be retained after calling their Write method. +type CheckedEntry struct { + Entry + ErrorOutput WriteSyncer + dirty bool // best-effort detection of pool misuse + should CheckWriteAction + cores []Core +} + +func (ce *CheckedEntry) reset() { + ce.Entry = Entry{} + ce.ErrorOutput = nil + ce.dirty = false + ce.should = WriteThenNoop + for i := range ce.cores { + // don't keep references to cores + ce.cores[i] = nil + } + ce.cores = ce.cores[:0] +} + +// Write writes the entry to the stored Cores, returns any errors, and returns +// the CheckedEntry reference to a pool for immediate re-use. Finally, it +// executes any required CheckWriteAction. +func (ce *CheckedEntry) Write(fields ...Field) { + if ce == nil { + return + } + + if ce.dirty { + if ce.ErrorOutput != nil { + // Make a best effort to detect unsafe re-use of this CheckedEntry. + // If the entry is dirty, log an internal error; because the + // CheckedEntry is being used after it was returned to the pool, + // the message may be an amalgamation from multiple call sites. + fmt.Fprintf(ce.ErrorOutput, "%v Unsafe CheckedEntry re-use near Entry %+v.\n", time.Now(), ce.Entry) + ce.ErrorOutput.Sync() + } + return + } + ce.dirty = true + + var err error + for i := range ce.cores { + err = multierr.Append(err, ce.cores[i].Write(ce.Entry, fields)) + } + if ce.ErrorOutput != nil { + if err != nil { + fmt.Fprintf(ce.ErrorOutput, "%v write error: %v\n", time.Now(), err) + ce.ErrorOutput.Sync() + } + } + + should, msg := ce.should, ce.Message + putCheckedEntry(ce) + + switch should { + case WriteThenPanic: + panic(msg) + case WriteThenFatal: + exit.Exit() + } +} + +// AddCore adds a Core that has agreed to log this CheckedEntry. It's intended to be +// used by Core.Check implementations, and is safe to call on nil CheckedEntry +// references. +func (ce *CheckedEntry) AddCore(ent Entry, core Core) *CheckedEntry { + if ce == nil { + ce = getCheckedEntry() + ce.Entry = ent + } + ce.cores = append(ce.cores, core) + return ce +} + +// Should sets this CheckedEntry's CheckWriteAction, which controls whether a +// Core will panic or fatal after writing this log entry. Like AddCore, it's +// safe to call on nil CheckedEntry references. +func (ce *CheckedEntry) Should(ent Entry, should CheckWriteAction) *CheckedEntry { + if ce == nil { + ce = getCheckedEntry() + ce.Entry = ent + } + ce.should = should + return ce +} diff --git a/vendor/go.uber.org/zap/zapcore/error.go b/vendor/go.uber.org/zap/zapcore/error.go new file mode 100644 index 00000000..a67c7bac --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/error.go @@ -0,0 +1,120 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "fmt" + "sync" +) + +// Encodes the given error into fields of an object. A field with the given +// name is added for the error message. +// +// If the error implements fmt.Formatter, a field with the name ${key}Verbose +// is also added with the full verbose error message. +// +// Finally, if the error implements errorGroup (from go.uber.org/multierr) or +// causer (from github.com/pkg/errors), a ${key}Causes field is added with an +// array of objects containing the errors this error was comprised of. +// +// { +// "error": err.Error(), +// "errorVerbose": fmt.Sprintf("%+v", err), +// "errorCauses": [ +// ... +// ], +// } +func encodeError(key string, err error, enc ObjectEncoder) error { + basic := err.Error() + enc.AddString(key, basic) + + switch e := err.(type) { + case errorGroup: + return enc.AddArray(key+"Causes", errArray(e.Errors())) + case fmt.Formatter: + verbose := fmt.Sprintf("%+v", e) + if verbose != basic { + // This is a rich error type, like those produced by + // github.com/pkg/errors. + enc.AddString(key+"Verbose", verbose) + } + } + return nil +} + +type errorGroup interface { + // Provides read-only access to the underlying list of errors, preferably + // without causing any allocs. + Errors() []error +} + +type causer interface { + // Provides access to the error that caused this error. + Cause() error +} + +// Note that errArry and errArrayElem are very similar to the version +// implemented in the top-level error.go file. We can't re-use this because +// that would require exporting errArray as part of the zapcore API. + +// Encodes a list of errors using the standard error encoding logic. +type errArray []error + +func (errs errArray) MarshalLogArray(arr ArrayEncoder) error { + for i := range errs { + if errs[i] == nil { + continue + } + + el := newErrArrayElem(errs[i]) + arr.AppendObject(el) + el.Free() + } + return nil +} + +var _errArrayElemPool = sync.Pool{New: func() interface{} { + return &errArrayElem{} +}} + +// Encodes any error into a {"error": ...} re-using the same errors logic. +// +// May be passed in place of an array to build a single-element array. +type errArrayElem struct{ err error } + +func newErrArrayElem(err error) *errArrayElem { + e := _errArrayElemPool.Get().(*errArrayElem) + e.err = err + return e +} + +func (e *errArrayElem) MarshalLogArray(arr ArrayEncoder) error { + return arr.AppendObject(e) +} + +func (e *errArrayElem) MarshalLogObject(enc ObjectEncoder) error { + return encodeError("error", e.err, enc) +} + +func (e *errArrayElem) Free() { + e.err = nil + _errArrayElemPool.Put(e) +} diff --git a/vendor/go.uber.org/zap/zapcore/field.go b/vendor/go.uber.org/zap/zapcore/field.go new file mode 100644 index 00000000..6a5e33e2 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/field.go @@ -0,0 +1,201 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "bytes" + "fmt" + "math" + "reflect" + "time" +) + +// A FieldType indicates which member of the Field union struct should be used +// and how it should be serialized. +type FieldType uint8 + +const ( + // UnknownType is the default field type. Attempting to add it to an encoder will panic. + UnknownType FieldType = iota + // ArrayMarshalerType indicates that the field carries an ArrayMarshaler. + ArrayMarshalerType + // ObjectMarshalerType indicates that the field carries an ObjectMarshaler. + ObjectMarshalerType + // BinaryType indicates that the field carries an opaque binary blob. + BinaryType + // BoolType indicates that the field carries a bool. + BoolType + // ByteStringType indicates that the field carries UTF-8 encoded bytes. + ByteStringType + // Complex128Type indicates that the field carries a complex128. + Complex128Type + // Complex64Type indicates that the field carries a complex128. + Complex64Type + // DurationType indicates that the field carries a time.Duration. + DurationType + // Float64Type indicates that the field carries a float64. + Float64Type + // Float32Type indicates that the field carries a float32. + Float32Type + // Int64Type indicates that the field carries an int64. + Int64Type + // Int32Type indicates that the field carries an int32. + Int32Type + // Int16Type indicates that the field carries an int16. + Int16Type + // Int8Type indicates that the field carries an int8. + Int8Type + // StringType indicates that the field carries a string. + StringType + // TimeType indicates that the field carries a time.Time. + TimeType + // Uint64Type indicates that the field carries a uint64. + Uint64Type + // Uint32Type indicates that the field carries a uint32. + Uint32Type + // Uint16Type indicates that the field carries a uint16. + Uint16Type + // Uint8Type indicates that the field carries a uint8. + Uint8Type + // UintptrType indicates that the field carries a uintptr. + UintptrType + // ReflectType indicates that the field carries an interface{}, which should + // be serialized using reflection. + ReflectType + // NamespaceType signals the beginning of an isolated namespace. All + // subsequent fields should be added to the new namespace. + NamespaceType + // StringerType indicates that the field carries a fmt.Stringer. + StringerType + // ErrorType indicates that the field carries an error. + ErrorType + // SkipType indicates that the field is a no-op. + SkipType +) + +// A Field is a marshaling operation used to add a key-value pair to a logger's +// context. Most fields are lazily marshaled, so it's inexpensive to add fields +// to disabled debug-level log statements. +type Field struct { + Key string + Type FieldType + Integer int64 + String string + Interface interface{} +} + +// AddTo exports a field through the ObjectEncoder interface. It's primarily +// useful to library authors, and shouldn't be necessary in most applications. +func (f Field) AddTo(enc ObjectEncoder) { + var err error + + switch f.Type { + case ArrayMarshalerType: + err = enc.AddArray(f.Key, f.Interface.(ArrayMarshaler)) + case ObjectMarshalerType: + err = enc.AddObject(f.Key, f.Interface.(ObjectMarshaler)) + case BinaryType: + enc.AddBinary(f.Key, f.Interface.([]byte)) + case BoolType: + enc.AddBool(f.Key, f.Integer == 1) + case ByteStringType: + enc.AddByteString(f.Key, f.Interface.([]byte)) + case Complex128Type: + enc.AddComplex128(f.Key, f.Interface.(complex128)) + case Complex64Type: + enc.AddComplex64(f.Key, f.Interface.(complex64)) + case DurationType: + enc.AddDuration(f.Key, time.Duration(f.Integer)) + case Float64Type: + enc.AddFloat64(f.Key, math.Float64frombits(uint64(f.Integer))) + case Float32Type: + enc.AddFloat32(f.Key, math.Float32frombits(uint32(f.Integer))) + case Int64Type: + enc.AddInt64(f.Key, f.Integer) + case Int32Type: + enc.AddInt32(f.Key, int32(f.Integer)) + case Int16Type: + enc.AddInt16(f.Key, int16(f.Integer)) + case Int8Type: + enc.AddInt8(f.Key, int8(f.Integer)) + case StringType: + enc.AddString(f.Key, f.String) + case TimeType: + if f.Interface != nil { + enc.AddTime(f.Key, time.Unix(0, f.Integer).In(f.Interface.(*time.Location))) + } else { + // Fall back to UTC if location is nil. + enc.AddTime(f.Key, time.Unix(0, f.Integer)) + } + case Uint64Type: + enc.AddUint64(f.Key, uint64(f.Integer)) + case Uint32Type: + enc.AddUint32(f.Key, uint32(f.Integer)) + case Uint16Type: + enc.AddUint16(f.Key, uint16(f.Integer)) + case Uint8Type: + enc.AddUint8(f.Key, uint8(f.Integer)) + case UintptrType: + enc.AddUintptr(f.Key, uintptr(f.Integer)) + case ReflectType: + err = enc.AddReflected(f.Key, f.Interface) + case NamespaceType: + enc.OpenNamespace(f.Key) + case StringerType: + enc.AddString(f.Key, f.Interface.(fmt.Stringer).String()) + case ErrorType: + encodeError(f.Key, f.Interface.(error), enc) + case SkipType: + break + default: + panic(fmt.Sprintf("unknown field type: %v", f)) + } + + if err != nil { + enc.AddString(fmt.Sprintf("%sError", f.Key), err.Error()) + } +} + +// Equals returns whether two fields are equal. For non-primitive types such as +// errors, marshalers, or reflect types, it uses reflect.DeepEqual. +func (f Field) Equals(other Field) bool { + if f.Type != other.Type { + return false + } + if f.Key != other.Key { + return false + } + + switch f.Type { + case BinaryType, ByteStringType: + return bytes.Equal(f.Interface.([]byte), other.Interface.([]byte)) + case ArrayMarshalerType, ObjectMarshalerType, ErrorType, ReflectType: + return reflect.DeepEqual(f.Interface, other.Interface) + default: + return f == other + } +} + +func addFields(enc ObjectEncoder, fields []Field) { + for i := range fields { + fields[i].AddTo(enc) + } +} diff --git a/vendor/go.uber.org/zap/zapcore/hook.go b/vendor/go.uber.org/zap/zapcore/hook.go new file mode 100644 index 00000000..5db4afb3 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/hook.go @@ -0,0 +1,68 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import "go.uber.org/multierr" + +type hooked struct { + Core + funcs []func(Entry) error +} + +// RegisterHooks wraps a Core and runs a collection of user-defined callback +// hooks each time a message is logged. Execution of the callbacks is blocking. +// +// This offers users an easy way to register simple callbacks (e.g., metrics +// collection) without implementing the full Core interface. +func RegisterHooks(core Core, hooks ...func(Entry) error) Core { + funcs := append([]func(Entry) error{}, hooks...) + return &hooked{ + Core: core, + funcs: funcs, + } +} + +func (h *hooked) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { + // Let the wrapped Core decide whether to log this message or not. This + // also gives the downstream a chance to register itself directly with the + // CheckedEntry. + if downstream := h.Core.Check(ent, ce); downstream != nil { + return downstream.AddCore(ent, h) + } + return ce +} + +func (h *hooked) With(fields []Field) Core { + return &hooked{ + Core: h.Core.With(fields), + funcs: h.funcs, + } +} + +func (h *hooked) Write(ent Entry, _ []Field) error { + // Since our downstream had a chance to register itself directly with the + // CheckedMessage, we don't need to call it here. + var err error + for i := range h.funcs { + err = multierr.Append(err, h.funcs[i](ent)) + } + return err +} diff --git a/vendor/go.uber.org/zap/zapcore/json_encoder.go b/vendor/go.uber.org/zap/zapcore/json_encoder.go new file mode 100644 index 00000000..1006ba2b --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/json_encoder.go @@ -0,0 +1,480 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "encoding/base64" + "encoding/json" + "math" + "sync" + "time" + "unicode/utf8" + + "go.uber.org/zap/buffer" + "go.uber.org/zap/internal/bufferpool" +) + +// For JSON-escaping; see jsonEncoder.safeAddString below. +const _hex = "0123456789abcdef" + +var _jsonPool = sync.Pool{New: func() interface{} { + return &jsonEncoder{} +}} + +func getJSONEncoder() *jsonEncoder { + return _jsonPool.Get().(*jsonEncoder) +} + +func putJSONEncoder(enc *jsonEncoder) { + enc.EncoderConfig = nil + enc.buf = nil + enc.spaced = false + enc.openNamespaces = 0 + _jsonPool.Put(enc) +} + +type jsonEncoder struct { + *EncoderConfig + buf *buffer.Buffer + spaced bool // include spaces after colons and commas + openNamespaces int +} + +// NewJSONEncoder creates a fast, low-allocation JSON encoder. The encoder +// appropriately escapes all field keys and values. +// +// Note that the encoder doesn't deduplicate keys, so it's possible to produce +// a message like +// {"foo":"bar","foo":"baz"} +// This is permitted by the JSON specification, but not encouraged. Many +// libraries will ignore duplicate key-value pairs (typically keeping the last +// pair) when unmarshaling, but users should attempt to avoid adding duplicate +// keys. +func NewJSONEncoder(cfg EncoderConfig) Encoder { + return newJSONEncoder(cfg, false) +} + +func newJSONEncoder(cfg EncoderConfig, spaced bool) *jsonEncoder { + return &jsonEncoder{ + EncoderConfig: &cfg, + buf: bufferpool.Get(), + spaced: spaced, + } +} + +func (enc *jsonEncoder) AddArray(key string, arr ArrayMarshaler) error { + enc.addKey(key) + return enc.AppendArray(arr) +} + +func (enc *jsonEncoder) AddObject(key string, obj ObjectMarshaler) error { + enc.addKey(key) + return enc.AppendObject(obj) +} + +func (enc *jsonEncoder) AddBinary(key string, val []byte) { + enc.AddString(key, base64.StdEncoding.EncodeToString(val)) +} + +func (enc *jsonEncoder) AddByteString(key string, val []byte) { + enc.addKey(key) + enc.AppendByteString(val) +} + +func (enc *jsonEncoder) AddBool(key string, val bool) { + enc.addKey(key) + enc.AppendBool(val) +} + +func (enc *jsonEncoder) AddComplex128(key string, val complex128) { + enc.addKey(key) + enc.AppendComplex128(val) +} + +func (enc *jsonEncoder) AddDuration(key string, val time.Duration) { + enc.addKey(key) + enc.AppendDuration(val) +} + +func (enc *jsonEncoder) AddFloat64(key string, val float64) { + enc.addKey(key) + enc.AppendFloat64(val) +} + +func (enc *jsonEncoder) AddInt64(key string, val int64) { + enc.addKey(key) + enc.AppendInt64(val) +} + +func (enc *jsonEncoder) AddReflected(key string, obj interface{}) error { + marshaled, err := json.Marshal(obj) + if err != nil { + return err + } + enc.addKey(key) + _, err = enc.buf.Write(marshaled) + return err +} + +func (enc *jsonEncoder) OpenNamespace(key string) { + enc.addKey(key) + enc.buf.AppendByte('{') + enc.openNamespaces++ +} + +func (enc *jsonEncoder) AddString(key, val string) { + enc.addKey(key) + enc.AppendString(val) +} + +func (enc *jsonEncoder) AddTime(key string, val time.Time) { + enc.addKey(key) + enc.AppendTime(val) +} + +func (enc *jsonEncoder) AddUint64(key string, val uint64) { + enc.addKey(key) + enc.AppendUint64(val) +} + +func (enc *jsonEncoder) AppendArray(arr ArrayMarshaler) error { + enc.addElementSeparator() + enc.buf.AppendByte('[') + err := arr.MarshalLogArray(enc) + enc.buf.AppendByte(']') + return err +} + +func (enc *jsonEncoder) AppendObject(obj ObjectMarshaler) error { + enc.addElementSeparator() + enc.buf.AppendByte('{') + err := obj.MarshalLogObject(enc) + enc.buf.AppendByte('}') + return err +} + +func (enc *jsonEncoder) AppendBool(val bool) { + enc.addElementSeparator() + enc.buf.AppendBool(val) +} + +func (enc *jsonEncoder) AppendByteString(val []byte) { + enc.addElementSeparator() + enc.buf.AppendByte('"') + enc.safeAddByteString(val) + enc.buf.AppendByte('"') +} + +func (enc *jsonEncoder) AppendComplex128(val complex128) { + enc.addElementSeparator() + // Cast to a platform-independent, fixed-size type. + r, i := float64(real(val)), float64(imag(val)) + enc.buf.AppendByte('"') + // Because we're always in a quoted string, we can use strconv without + // special-casing NaN and +/-Inf. + enc.buf.AppendFloat(r, 64) + enc.buf.AppendByte('+') + enc.buf.AppendFloat(i, 64) + enc.buf.AppendByte('i') + enc.buf.AppendByte('"') +} + +func (enc *jsonEncoder) AppendDuration(val time.Duration) { + cur := enc.buf.Len() + enc.EncodeDuration(val, enc) + if cur == enc.buf.Len() { + // User-supplied EncodeDuration is a no-op. Fall back to nanoseconds to keep + // JSON valid. + enc.AppendInt64(int64(val)) + } +} + +func (enc *jsonEncoder) AppendInt64(val int64) { + enc.addElementSeparator() + enc.buf.AppendInt(val) +} + +func (enc *jsonEncoder) AppendReflected(val interface{}) error { + marshaled, err := json.Marshal(val) + if err != nil { + return err + } + enc.addElementSeparator() + _, err = enc.buf.Write(marshaled) + return err +} + +func (enc *jsonEncoder) AppendString(val string) { + enc.addElementSeparator() + enc.buf.AppendByte('"') + enc.safeAddString(val) + enc.buf.AppendByte('"') +} + +func (enc *jsonEncoder) AppendTime(val time.Time) { + cur := enc.buf.Len() + enc.EncodeTime(val, enc) + if cur == enc.buf.Len() { + // User-supplied EncodeTime is a no-op. Fall back to nanos since epoch to keep + // output JSON valid. + enc.AppendInt64(val.UnixNano()) + } +} + +func (enc *jsonEncoder) AppendUint64(val uint64) { + enc.addElementSeparator() + enc.buf.AppendUint(val) +} + +func (enc *jsonEncoder) AddComplex64(k string, v complex64) { enc.AddComplex128(k, complex128(v)) } +func (enc *jsonEncoder) AddFloat32(k string, v float32) { enc.AddFloat64(k, float64(v)) } +func (enc *jsonEncoder) AddInt(k string, v int) { enc.AddInt64(k, int64(v)) } +func (enc *jsonEncoder) AddInt32(k string, v int32) { enc.AddInt64(k, int64(v)) } +func (enc *jsonEncoder) AddInt16(k string, v int16) { enc.AddInt64(k, int64(v)) } +func (enc *jsonEncoder) AddInt8(k string, v int8) { enc.AddInt64(k, int64(v)) } +func (enc *jsonEncoder) AddUint(k string, v uint) { enc.AddUint64(k, uint64(v)) } +func (enc *jsonEncoder) AddUint32(k string, v uint32) { enc.AddUint64(k, uint64(v)) } +func (enc *jsonEncoder) AddUint16(k string, v uint16) { enc.AddUint64(k, uint64(v)) } +func (enc *jsonEncoder) AddUint8(k string, v uint8) { enc.AddUint64(k, uint64(v)) } +func (enc *jsonEncoder) AddUintptr(k string, v uintptr) { enc.AddUint64(k, uint64(v)) } +func (enc *jsonEncoder) AppendComplex64(v complex64) { enc.AppendComplex128(complex128(v)) } +func (enc *jsonEncoder) AppendFloat64(v float64) { enc.appendFloat(v, 64) } +func (enc *jsonEncoder) AppendFloat32(v float32) { enc.appendFloat(float64(v), 32) } +func (enc *jsonEncoder) AppendInt(v int) { enc.AppendInt64(int64(v)) } +func (enc *jsonEncoder) AppendInt32(v int32) { enc.AppendInt64(int64(v)) } +func (enc *jsonEncoder) AppendInt16(v int16) { enc.AppendInt64(int64(v)) } +func (enc *jsonEncoder) AppendInt8(v int8) { enc.AppendInt64(int64(v)) } +func (enc *jsonEncoder) AppendUint(v uint) { enc.AppendUint64(uint64(v)) } +func (enc *jsonEncoder) AppendUint32(v uint32) { enc.AppendUint64(uint64(v)) } +func (enc *jsonEncoder) AppendUint16(v uint16) { enc.AppendUint64(uint64(v)) } +func (enc *jsonEncoder) AppendUint8(v uint8) { enc.AppendUint64(uint64(v)) } +func (enc *jsonEncoder) AppendUintptr(v uintptr) { enc.AppendUint64(uint64(v)) } + +func (enc *jsonEncoder) Clone() Encoder { + clone := enc.clone() + clone.buf.Write(enc.buf.Bytes()) + return clone +} + +func (enc *jsonEncoder) clone() *jsonEncoder { + clone := getJSONEncoder() + clone.EncoderConfig = enc.EncoderConfig + clone.spaced = enc.spaced + clone.openNamespaces = enc.openNamespaces + clone.buf = bufferpool.Get() + return clone +} + +func (enc *jsonEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, error) { + final := enc.clone() + final.buf.AppendByte('{') + + if final.LevelKey != "" { + final.addKey(final.LevelKey) + cur := final.buf.Len() + final.EncodeLevel(ent.Level, final) + if cur == final.buf.Len() { + // User-supplied EncodeLevel was a no-op. Fall back to strings to keep + // output JSON valid. + final.AppendString(ent.Level.String()) + } + } + if final.TimeKey != "" { + final.AddTime(final.TimeKey, ent.Time) + } + if ent.LoggerName != "" && final.NameKey != "" { + final.addKey(final.NameKey) + cur := final.buf.Len() + nameEncoder := final.EncodeName + + // if no name encoder provided, fall back to FullNameEncoder for backwards + // compatibility + if nameEncoder == nil { + nameEncoder = FullNameEncoder + } + + nameEncoder(ent.LoggerName, final) + if cur == final.buf.Len() { + // User-supplied EncodeName was a no-op. Fall back to strings to + // keep output JSON valid. + final.AppendString(ent.LoggerName) + } + } + if ent.Caller.Defined && final.CallerKey != "" { + final.addKey(final.CallerKey) + cur := final.buf.Len() + final.EncodeCaller(ent.Caller, final) + if cur == final.buf.Len() { + // User-supplied EncodeCaller was a no-op. Fall back to strings to + // keep output JSON valid. + final.AppendString(ent.Caller.String()) + } + } + if final.MessageKey != "" { + final.addKey(enc.MessageKey) + final.AppendString(ent.Message) + } + if enc.buf.Len() > 0 { + final.addElementSeparator() + final.buf.Write(enc.buf.Bytes()) + } + addFields(final, fields) + final.closeOpenNamespaces() + if ent.Stack != "" && final.StacktraceKey != "" { + final.AddString(final.StacktraceKey, ent.Stack) + } + final.buf.AppendByte('}') + if final.LineEnding != "" { + final.buf.AppendString(final.LineEnding) + } else { + final.buf.AppendString(DefaultLineEnding) + } + + ret := final.buf + putJSONEncoder(final) + return ret, nil +} + +func (enc *jsonEncoder) truncate() { + enc.buf.Reset() +} + +func (enc *jsonEncoder) closeOpenNamespaces() { + for i := 0; i < enc.openNamespaces; i++ { + enc.buf.AppendByte('}') + } +} + +func (enc *jsonEncoder) addKey(key string) { + enc.addElementSeparator() + enc.buf.AppendByte('"') + enc.safeAddString(key) + enc.buf.AppendByte('"') + enc.buf.AppendByte(':') + if enc.spaced { + enc.buf.AppendByte(' ') + } +} + +func (enc *jsonEncoder) addElementSeparator() { + last := enc.buf.Len() - 1 + if last < 0 { + return + } + switch enc.buf.Bytes()[last] { + case '{', '[', ':', ',', ' ': + return + default: + enc.buf.AppendByte(',') + if enc.spaced { + enc.buf.AppendByte(' ') + } + } +} + +func (enc *jsonEncoder) appendFloat(val float64, bitSize int) { + enc.addElementSeparator() + switch { + case math.IsNaN(val): + enc.buf.AppendString(`"NaN"`) + case math.IsInf(val, 1): + enc.buf.AppendString(`"+Inf"`) + case math.IsInf(val, -1): + enc.buf.AppendString(`"-Inf"`) + default: + enc.buf.AppendFloat(val, bitSize) + } +} + +// safeAddString JSON-escapes a string and appends it to the internal buffer. +// Unlike the standard library's encoder, it doesn't attempt to protect the +// user from browser vulnerabilities or JSONP-related problems. +func (enc *jsonEncoder) safeAddString(s string) { + for i := 0; i < len(s); { + if enc.tryAddRuneSelf(s[i]) { + i++ + continue + } + r, size := utf8.DecodeRuneInString(s[i:]) + if enc.tryAddRuneError(r, size) { + i++ + continue + } + enc.buf.AppendString(s[i : i+size]) + i += size + } +} + +// safeAddByteString is no-alloc equivalent of safeAddString(string(s)) for s []byte. +func (enc *jsonEncoder) safeAddByteString(s []byte) { + for i := 0; i < len(s); { + if enc.tryAddRuneSelf(s[i]) { + i++ + continue + } + r, size := utf8.DecodeRune(s[i:]) + if enc.tryAddRuneError(r, size) { + i++ + continue + } + enc.buf.Write(s[i : i+size]) + i += size + } +} + +// tryAddRuneSelf appends b if it is valid UTF-8 character represented in a single byte. +func (enc *jsonEncoder) tryAddRuneSelf(b byte) bool { + if b >= utf8.RuneSelf { + return false + } + if 0x20 <= b && b != '\\' && b != '"' { + enc.buf.AppendByte(b) + return true + } + switch b { + case '\\', '"': + enc.buf.AppendByte('\\') + enc.buf.AppendByte(b) + case '\n': + enc.buf.AppendByte('\\') + enc.buf.AppendByte('n') + case '\r': + enc.buf.AppendByte('\\') + enc.buf.AppendByte('r') + case '\t': + enc.buf.AppendByte('\\') + enc.buf.AppendByte('t') + default: + // Encode bytes < 0x20, except for the escape sequences above. + enc.buf.AppendString(`\u00`) + enc.buf.AppendByte(_hex[b>>4]) + enc.buf.AppendByte(_hex[b&0xF]) + } + return true +} + +func (enc *jsonEncoder) tryAddRuneError(r rune, size int) bool { + if r == utf8.RuneError && size == 1 { + enc.buf.AppendString(`\ufffd`) + return true + } + return false +} diff --git a/vendor/go.uber.org/zap/zapcore/level.go b/vendor/go.uber.org/zap/zapcore/level.go new file mode 100644 index 00000000..e575c9f4 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/level.go @@ -0,0 +1,175 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "bytes" + "errors" + "fmt" +) + +var errUnmarshalNilLevel = errors.New("can't unmarshal a nil *Level") + +// A Level is a logging priority. Higher levels are more important. +type Level int8 + +const ( + // DebugLevel logs are typically voluminous, and are usually disabled in + // production. + DebugLevel Level = iota - 1 + // InfoLevel is the default logging priority. + InfoLevel + // WarnLevel logs are more important than Info, but don't need individual + // human review. + WarnLevel + // ErrorLevel logs are high-priority. If an application is running smoothly, + // it shouldn't generate any error-level logs. + ErrorLevel + // DPanicLevel logs are particularly important errors. In development the + // logger panics after writing the message. + DPanicLevel + // PanicLevel logs a message, then panics. + PanicLevel + // FatalLevel logs a message, then calls os.Exit(1). + FatalLevel + + _minLevel = DebugLevel + _maxLevel = FatalLevel +) + +// String returns a lower-case ASCII representation of the log level. +func (l Level) String() string { + switch l { + case DebugLevel: + return "debug" + case InfoLevel: + return "info" + case WarnLevel: + return "warn" + case ErrorLevel: + return "error" + case DPanicLevel: + return "dpanic" + case PanicLevel: + return "panic" + case FatalLevel: + return "fatal" + default: + return fmt.Sprintf("Level(%d)", l) + } +} + +// CapitalString returns an all-caps ASCII representation of the log level. +func (l Level) CapitalString() string { + // Printing levels in all-caps is common enough that we should export this + // functionality. + switch l { + case DebugLevel: + return "DEBUG" + case InfoLevel: + return "INFO" + case WarnLevel: + return "WARN" + case ErrorLevel: + return "ERROR" + case DPanicLevel: + return "DPANIC" + case PanicLevel: + return "PANIC" + case FatalLevel: + return "FATAL" + default: + return fmt.Sprintf("LEVEL(%d)", l) + } +} + +// MarshalText marshals the Level to text. Note that the text representation +// drops the -Level suffix (see example). +func (l Level) MarshalText() ([]byte, error) { + return []byte(l.String()), nil +} + +// UnmarshalText unmarshals text to a level. Like MarshalText, UnmarshalText +// expects the text representation of a Level to drop the -Level suffix (see +// example). +// +// In particular, this makes it easy to configure logging levels using YAML, +// TOML, or JSON files. +func (l *Level) UnmarshalText(text []byte) error { + if l == nil { + return errUnmarshalNilLevel + } + if !l.unmarshalText(text) && !l.unmarshalText(bytes.ToLower(text)) { + return fmt.Errorf("unrecognized level: %q", text) + } + return nil +} + +func (l *Level) unmarshalText(text []byte) bool { + switch string(text) { + case "debug", "DEBUG": + *l = DebugLevel + case "info", "INFO", "": // make the zero value useful + *l = InfoLevel + case "warn", "WARN": + *l = WarnLevel + case "error", "ERROR": + *l = ErrorLevel + case "dpanic", "DPANIC": + *l = DPanicLevel + case "panic", "PANIC": + *l = PanicLevel + case "fatal", "FATAL": + *l = FatalLevel + default: + return false + } + return true +} + +// Set sets the level for the flag.Value interface. +func (l *Level) Set(s string) error { + return l.UnmarshalText([]byte(s)) +} + +// Get gets the level for the flag.Getter interface. +func (l *Level) Get() interface{} { + return *l +} + +// Enabled returns true if the given level is at or above this level. +func (l Level) Enabled(lvl Level) bool { + return lvl >= l +} + +// LevelEnabler decides whether a given logging level is enabled when logging a +// message. +// +// Enablers are intended to be used to implement deterministic filters; +// concerns like sampling are better implemented as a Core. +// +// Each concrete Level value implements a static LevelEnabler which returns +// true for itself and all higher logging levels. For example WarnLevel.Enabled() +// will return true for WarnLevel, ErrorLevel, DPanicLevel, PanicLevel, and +// FatalLevel, but return false for InfoLevel and DebugLevel. +type LevelEnabler interface { + Enabled(Level) bool +} diff --git a/vendor/go.uber.org/zap/zapcore/level_strings.go b/vendor/go.uber.org/zap/zapcore/level_strings.go new file mode 100644 index 00000000..7af8dadc --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/level_strings.go @@ -0,0 +1,46 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import "go.uber.org/zap/internal/color" + +var ( + _levelToColor = map[Level]color.Color{ + DebugLevel: color.Magenta, + InfoLevel: color.Blue, + WarnLevel: color.Yellow, + ErrorLevel: color.Red, + DPanicLevel: color.Red, + PanicLevel: color.Red, + FatalLevel: color.Red, + } + _unknownLevelColor = color.Red + + _levelToLowercaseColorString = make(map[Level]string, len(_levelToColor)) + _levelToCapitalColorString = make(map[Level]string, len(_levelToColor)) +) + +func init() { + for level, color := range _levelToColor { + _levelToLowercaseColorString[level] = color.Add(level.String()) + _levelToCapitalColorString[level] = color.Add(level.CapitalString()) + } +} diff --git a/vendor/go.uber.org/zap/zapcore/marshaler.go b/vendor/go.uber.org/zap/zapcore/marshaler.go new file mode 100644 index 00000000..2627a653 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/marshaler.go @@ -0,0 +1,53 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +// ObjectMarshaler allows user-defined types to efficiently add themselves to the +// logging context, and to selectively omit information which shouldn't be +// included in logs (e.g., passwords). +type ObjectMarshaler interface { + MarshalLogObject(ObjectEncoder) error +} + +// ObjectMarshalerFunc is a type adapter that turns a function into an +// ObjectMarshaler. +type ObjectMarshalerFunc func(ObjectEncoder) error + +// MarshalLogObject calls the underlying function. +func (f ObjectMarshalerFunc) MarshalLogObject(enc ObjectEncoder) error { + return f(enc) +} + +// ArrayMarshaler allows user-defined types to efficiently add themselves to the +// logging context, and to selectively omit information which shouldn't be +// included in logs (e.g., passwords). +type ArrayMarshaler interface { + MarshalLogArray(ArrayEncoder) error +} + +// ArrayMarshalerFunc is a type adapter that turns a function into an +// ArrayMarshaler. +type ArrayMarshalerFunc func(ArrayEncoder) error + +// MarshalLogArray calls the underlying function. +func (f ArrayMarshalerFunc) MarshalLogArray(enc ArrayEncoder) error { + return f(enc) +} diff --git a/vendor/go.uber.org/zap/zapcore/memory_encoder.go b/vendor/go.uber.org/zap/zapcore/memory_encoder.go new file mode 100644 index 00000000..5c46bc13 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/memory_encoder.go @@ -0,0 +1,179 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import "time" + +// MapObjectEncoder is an ObjectEncoder backed by a simple +// map[string]interface{}. It's not fast enough for production use, but it's +// helpful in tests. +type MapObjectEncoder struct { + // Fields contains the entire encoded log context. + Fields map[string]interface{} + // cur is a pointer to the namespace we're currently writing to. + cur map[string]interface{} +} + +// NewMapObjectEncoder creates a new map-backed ObjectEncoder. +func NewMapObjectEncoder() *MapObjectEncoder { + m := make(map[string]interface{}) + return &MapObjectEncoder{ + Fields: m, + cur: m, + } +} + +// AddArray implements ObjectEncoder. +func (m *MapObjectEncoder) AddArray(key string, v ArrayMarshaler) error { + arr := &sliceArrayEncoder{} + err := v.MarshalLogArray(arr) + m.cur[key] = arr.elems + return err +} + +// AddObject implements ObjectEncoder. +func (m *MapObjectEncoder) AddObject(k string, v ObjectMarshaler) error { + newMap := NewMapObjectEncoder() + m.cur[k] = newMap.Fields + return v.MarshalLogObject(newMap) +} + +// AddBinary implements ObjectEncoder. +func (m *MapObjectEncoder) AddBinary(k string, v []byte) { m.cur[k] = v } + +// AddByteString implements ObjectEncoder. +func (m *MapObjectEncoder) AddByteString(k string, v []byte) { m.cur[k] = string(v) } + +// AddBool implements ObjectEncoder. +func (m *MapObjectEncoder) AddBool(k string, v bool) { m.cur[k] = v } + +// AddDuration implements ObjectEncoder. +func (m MapObjectEncoder) AddDuration(k string, v time.Duration) { m.cur[k] = v } + +// AddComplex128 implements ObjectEncoder. +func (m *MapObjectEncoder) AddComplex128(k string, v complex128) { m.cur[k] = v } + +// AddComplex64 implements ObjectEncoder. +func (m *MapObjectEncoder) AddComplex64(k string, v complex64) { m.cur[k] = v } + +// AddFloat64 implements ObjectEncoder. +func (m *MapObjectEncoder) AddFloat64(k string, v float64) { m.cur[k] = v } + +// AddFloat32 implements ObjectEncoder. +func (m *MapObjectEncoder) AddFloat32(k string, v float32) { m.cur[k] = v } + +// AddInt implements ObjectEncoder. +func (m *MapObjectEncoder) AddInt(k string, v int) { m.cur[k] = v } + +// AddInt64 implements ObjectEncoder. +func (m *MapObjectEncoder) AddInt64(k string, v int64) { m.cur[k] = v } + +// AddInt32 implements ObjectEncoder. +func (m *MapObjectEncoder) AddInt32(k string, v int32) { m.cur[k] = v } + +// AddInt16 implements ObjectEncoder. +func (m *MapObjectEncoder) AddInt16(k string, v int16) { m.cur[k] = v } + +// AddInt8 implements ObjectEncoder. +func (m *MapObjectEncoder) AddInt8(k string, v int8) { m.cur[k] = v } + +// AddString implements ObjectEncoder. +func (m *MapObjectEncoder) AddString(k string, v string) { m.cur[k] = v } + +// AddTime implements ObjectEncoder. +func (m MapObjectEncoder) AddTime(k string, v time.Time) { m.cur[k] = v } + +// AddUint implements ObjectEncoder. +func (m *MapObjectEncoder) AddUint(k string, v uint) { m.cur[k] = v } + +// AddUint64 implements ObjectEncoder. +func (m *MapObjectEncoder) AddUint64(k string, v uint64) { m.cur[k] = v } + +// AddUint32 implements ObjectEncoder. +func (m *MapObjectEncoder) AddUint32(k string, v uint32) { m.cur[k] = v } + +// AddUint16 implements ObjectEncoder. +func (m *MapObjectEncoder) AddUint16(k string, v uint16) { m.cur[k] = v } + +// AddUint8 implements ObjectEncoder. +func (m *MapObjectEncoder) AddUint8(k string, v uint8) { m.cur[k] = v } + +// AddUintptr implements ObjectEncoder. +func (m *MapObjectEncoder) AddUintptr(k string, v uintptr) { m.cur[k] = v } + +// AddReflected implements ObjectEncoder. +func (m *MapObjectEncoder) AddReflected(k string, v interface{}) error { + m.cur[k] = v + return nil +} + +// OpenNamespace implements ObjectEncoder. +func (m *MapObjectEncoder) OpenNamespace(k string) { + ns := make(map[string]interface{}) + m.cur[k] = ns + m.cur = ns +} + +// sliceArrayEncoder is an ArrayEncoder backed by a simple []interface{}. Like +// the MapObjectEncoder, it's not designed for production use. +type sliceArrayEncoder struct { + elems []interface{} +} + +func (s *sliceArrayEncoder) AppendArray(v ArrayMarshaler) error { + enc := &sliceArrayEncoder{} + err := v.MarshalLogArray(enc) + s.elems = append(s.elems, enc.elems) + return err +} + +func (s *sliceArrayEncoder) AppendObject(v ObjectMarshaler) error { + m := NewMapObjectEncoder() + err := v.MarshalLogObject(m) + s.elems = append(s.elems, m.Fields) + return err +} + +func (s *sliceArrayEncoder) AppendReflected(v interface{}) error { + s.elems = append(s.elems, v) + return nil +} + +func (s *sliceArrayEncoder) AppendBool(v bool) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendByteString(v []byte) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendComplex128(v complex128) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendComplex64(v complex64) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendDuration(v time.Duration) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendFloat64(v float64) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendFloat32(v float32) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendInt(v int) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendInt64(v int64) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendInt32(v int32) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendInt16(v int16) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendInt8(v int8) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendString(v string) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendTime(v time.Time) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendUint(v uint) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendUint64(v uint64) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendUint32(v uint32) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendUint16(v uint16) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendUint8(v uint8) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendUintptr(v uintptr) { s.elems = append(s.elems, v) } diff --git a/vendor/go.uber.org/zap/zapcore/sampler.go b/vendor/go.uber.org/zap/zapcore/sampler.go new file mode 100644 index 00000000..e3164186 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/sampler.go @@ -0,0 +1,134 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "time" + + "go.uber.org/atomic" +) + +const ( + _numLevels = _maxLevel - _minLevel + 1 + _countersPerLevel = 4096 +) + +type counter struct { + resetAt atomic.Int64 + counter atomic.Uint64 +} + +type counters [_numLevels][_countersPerLevel]counter + +func newCounters() *counters { + return &counters{} +} + +func (cs *counters) get(lvl Level, key string) *counter { + i := lvl - _minLevel + j := fnv32a(key) % _countersPerLevel + return &cs[i][j] +} + +// fnv32a, adapted from "hash/fnv", but without a []byte(string) alloc +func fnv32a(s string) uint32 { + const ( + offset32 = 2166136261 + prime32 = 16777619 + ) + hash := uint32(offset32) + for i := 0; i < len(s); i++ { + hash ^= uint32(s[i]) + hash *= prime32 + } + return hash +} + +func (c *counter) IncCheckReset(t time.Time, tick time.Duration) uint64 { + tn := t.UnixNano() + resetAfter := c.resetAt.Load() + if resetAfter > tn { + return c.counter.Inc() + } + + c.counter.Store(1) + + newResetAfter := tn + tick.Nanoseconds() + if !c.resetAt.CAS(resetAfter, newResetAfter) { + // We raced with another goroutine trying to reset, and it also reset + // the counter to 1, so we need to reincrement the counter. + return c.counter.Inc() + } + + return 1 +} + +type sampler struct { + Core + + counts *counters + tick time.Duration + first, thereafter uint64 +} + +// NewSampler creates a Core that samples incoming entries, which caps the CPU +// and I/O load of logging while attempting to preserve a representative subset +// of your logs. +// +// Zap samples by logging the first N entries with a given level and message +// each tick. If more Entries with the same level and message are seen during +// the same interval, every Mth message is logged and the rest are dropped. +// +// Keep in mind that zap's sampling implementation is optimized for speed over +// absolute precision; under load, each tick may be slightly over- or +// under-sampled. +func NewSampler(core Core, tick time.Duration, first, thereafter int) Core { + return &sampler{ + Core: core, + tick: tick, + counts: newCounters(), + first: uint64(first), + thereafter: uint64(thereafter), + } +} + +func (s *sampler) With(fields []Field) Core { + return &sampler{ + Core: s.Core.With(fields), + tick: s.tick, + counts: s.counts, + first: s.first, + thereafter: s.thereafter, + } +} + +func (s *sampler) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { + if !s.Enabled(ent.Level) { + return ce + } + + counter := s.counts.get(ent.Level, ent.Message) + n := counter.IncCheckReset(ent.Time, s.tick) + if n > s.first && (n-s.first)%s.thereafter != 0 { + return ce + } + return s.Core.Check(ent, ce) +} diff --git a/vendor/go.uber.org/zap/zapcore/tee.go b/vendor/go.uber.org/zap/zapcore/tee.go new file mode 100644 index 00000000..07a32eef --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/tee.go @@ -0,0 +1,81 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import "go.uber.org/multierr" + +type multiCore []Core + +// NewTee creates a Core that duplicates log entries into two or more +// underlying Cores. +// +// Calling it with a single Core returns the input unchanged, and calling +// it with no input returns a no-op Core. +func NewTee(cores ...Core) Core { + switch len(cores) { + case 0: + return NewNopCore() + case 1: + return cores[0] + default: + return multiCore(cores) + } +} + +func (mc multiCore) With(fields []Field) Core { + clone := make(multiCore, len(mc)) + for i := range mc { + clone[i] = mc[i].With(fields) + } + return clone +} + +func (mc multiCore) Enabled(lvl Level) bool { + for i := range mc { + if mc[i].Enabled(lvl) { + return true + } + } + return false +} + +func (mc multiCore) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { + for i := range mc { + ce = mc[i].Check(ent, ce) + } + return ce +} + +func (mc multiCore) Write(ent Entry, fields []Field) error { + var err error + for i := range mc { + err = multierr.Append(err, mc[i].Write(ent, fields)) + } + return err +} + +func (mc multiCore) Sync() error { + var err error + for i := range mc { + err = multierr.Append(err, mc[i].Sync()) + } + return err +} diff --git a/vendor/go.uber.org/zap/zapcore/write_syncer.go b/vendor/go.uber.org/zap/zapcore/write_syncer.go new file mode 100644 index 00000000..209e25fe --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/write_syncer.go @@ -0,0 +1,123 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "io" + "sync" + + "go.uber.org/multierr" +) + +// A WriteSyncer is an io.Writer that can also flush any buffered data. Note +// that *os.File (and thus, os.Stderr and os.Stdout) implement WriteSyncer. +type WriteSyncer interface { + io.Writer + Sync() error +} + +// AddSync converts an io.Writer to a WriteSyncer. It attempts to be +// intelligent: if the concrete type of the io.Writer implements WriteSyncer, +// we'll use the existing Sync method. If it doesn't, we'll add a no-op Sync. +func AddSync(w io.Writer) WriteSyncer { + switch w := w.(type) { + case WriteSyncer: + return w + default: + return writerWrapper{w} + } +} + +type lockedWriteSyncer struct { + sync.Mutex + ws WriteSyncer +} + +// Lock wraps a WriteSyncer in a mutex to make it safe for concurrent use. In +// particular, *os.Files must be locked before use. +func Lock(ws WriteSyncer) WriteSyncer { + if _, ok := ws.(*lockedWriteSyncer); ok { + // no need to layer on another lock + return ws + } + return &lockedWriteSyncer{ws: ws} +} + +func (s *lockedWriteSyncer) Write(bs []byte) (int, error) { + s.Lock() + n, err := s.ws.Write(bs) + s.Unlock() + return n, err +} + +func (s *lockedWriteSyncer) Sync() error { + s.Lock() + err := s.ws.Sync() + s.Unlock() + return err +} + +type writerWrapper struct { + io.Writer +} + +func (w writerWrapper) Sync() error { + return nil +} + +type multiWriteSyncer []WriteSyncer + +// NewMultiWriteSyncer creates a WriteSyncer that duplicates its writes +// and sync calls, much like io.MultiWriter. +func NewMultiWriteSyncer(ws ...WriteSyncer) WriteSyncer { + if len(ws) == 1 { + return ws[0] + } + // Copy to protect against https://github.com/golang/go/issues/7809 + return multiWriteSyncer(append([]WriteSyncer(nil), ws...)) +} + +// See https://golang.org/src/io/multi.go +// When not all underlying syncers write the same number of bytes, +// the smallest number is returned even though Write() is called on +// all of them. +func (ws multiWriteSyncer) Write(p []byte) (int, error) { + var writeErr error + nWritten := 0 + for _, w := range ws { + n, err := w.Write(p) + writeErr = multierr.Append(writeErr, err) + if nWritten == 0 && n != 0 { + nWritten = n + } else if n < nWritten { + nWritten = n + } + } + return nWritten, writeErr +} + +func (ws multiWriteSyncer) Sync() error { + var err error + for _, w := range ws { + err = multierr.Append(err, w.Sync()) + } + return err +}