mirror of https://github.com/etcd-io/dbtester.git
vendor: upgrade etcd v3.3, Consul
Signed-off-by: Gyuho Lee <gyuhox@gmail.com>
This commit is contained in:
parent
01bb4fe475
commit
860b3283fa
|
|
@ -18,6 +18,7 @@
|
|||
"storage"
|
||||
]
|
||||
revision = "2a6493c7a9214bf56c1003bd97443d505cc7e952"
|
||||
source = "https://github.com/GoogleCloudPlatform/google-cloud-go"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
|
|
@ -28,32 +29,24 @@
|
|||
[[projects]]
|
||||
name = "github.com/cheggaaa/pb"
|
||||
packages = ["."]
|
||||
revision = "d7e6ca3010b6f084d8056847f55d7f572f180678"
|
||||
revision = "18d384da9bdc1e5a08fc2a62a494c321d9ae74ea"
|
||||
source = "https://github.com/cheggaaa/pb"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/coreos/etcd"
|
||||
packages = [
|
||||
"auth/authpb",
|
||||
"client",
|
||||
"clientv3",
|
||||
"etcdserver/api/v3rpc/rpctypes",
|
||||
"etcdserver/etcdserverpb",
|
||||
"mvcc/mvccpb",
|
||||
"pkg/cpuutil",
|
||||
"pkg/netutil",
|
||||
"pkg/pathutil",
|
||||
"pkg/report",
|
||||
"pkg/srv",
|
||||
"pkg/types",
|
||||
"version"
|
||||
"pkg/types"
|
||||
]
|
||||
revision = "47a8156851b5a59665421661edb7c813f8a7993e"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/coreos/go-semver"
|
||||
packages = ["semver"]
|
||||
revision = "8ab6407b697782a06568d4b7f1db25550ec2e4c6"
|
||||
version = "v0.2.0"
|
||||
revision = "eaf6ae02e47edbea90aa7f0fe6c4d558611e00ab"
|
||||
source = "https://github.com/coreos/etcd"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/coreos/go-systemd"
|
||||
|
|
@ -64,12 +57,14 @@
|
|||
[[projects]]
|
||||
name = "github.com/coreos/pkg"
|
||||
packages = ["capnslog"]
|
||||
revision = "fa29b1d70f0beaddd4c7021607cc3c3be8ce94b8"
|
||||
revision = "97fdf19511ea361ae1c100dd393cc47f8dcfa1e1"
|
||||
source = "https://github.com/coreos/pkg"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/dustin/go-humanize"
|
||||
packages = ["."]
|
||||
revision = "ef638b6c2e62b857442c6443dace9366a48c0ee2"
|
||||
revision = "bb3d318650d48840a39aa21a027c6630e198e626"
|
||||
source = "https://github.com/dustin/go-humanize"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/gogo/protobuf"
|
||||
|
|
@ -79,6 +74,7 @@
|
|||
"protoc-gen-gogo/descriptor"
|
||||
]
|
||||
revision = "160de10b2537169b5ae3e7e221d28269ef40d311"
|
||||
source = "https://github.com/gogo/protobuf"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
|
|
@ -95,9 +91,13 @@
|
|||
packages = [
|
||||
"proto",
|
||||
"protoc-gen-go/descriptor",
|
||||
"ptypes/any"
|
||||
"ptypes",
|
||||
"ptypes/any",
|
||||
"ptypes/duration",
|
||||
"ptypes/timestamp"
|
||||
]
|
||||
revision = "1e59b77b52bf8e4b449a57e6f79f21226d571845"
|
||||
source = "https://github.com/golang/protobuf"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
|
|
@ -161,6 +161,7 @@
|
|||
"vg/vgsvg"
|
||||
]
|
||||
revision = "51b62dc5319d7fce41240d13e780a93e640b9a38"
|
||||
source = "https://github.com/gonum/plot"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/googleapis/gax-go"
|
||||
|
|
@ -171,7 +172,8 @@
|
|||
[[projects]]
|
||||
name = "github.com/gyuho/dataframe"
|
||||
packages = ["."]
|
||||
revision = "73de2c550b1177c1640f3dacbbc1af00f913fedb"
|
||||
revision = "008fc241adc41d4bd5c54b9f6564ef16407c030e"
|
||||
source = "https://github.com/gyuho/dataframe"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/gyuho/linux-inspect"
|
||||
|
|
@ -185,11 +187,13 @@
|
|||
"top"
|
||||
]
|
||||
revision = "db8def6abe6a96a20aae6ae01664cc3817a27ba0"
|
||||
source = "https://github.com/gyuho/linux-inspect"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/hashicorp/consul"
|
||||
packages = ["api"]
|
||||
revision = "42f60b04bba2e4391e9a95a218c873986ebeea6b"
|
||||
revision = "b55059fc3d0327c92c41431e57dfd2df3f956b03"
|
||||
source = "https://github.com/hashicorp/consul"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
|
|
@ -246,17 +250,20 @@
|
|||
[[projects]]
|
||||
name = "github.com/olekukonko/tablewriter"
|
||||
packages = ["."]
|
||||
revision = "febf2d34b54a69ce7530036c7503b1c9fbfdf0bb"
|
||||
revision = "96aac992fc8b1a4c83841a6c3e7178d20d989625"
|
||||
source = "https://github.com/olekukonko/tablewriter"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/samuel/go-zookeeper"
|
||||
packages = ["zk"]
|
||||
revision = "1d7be4effb13d2d908342d349d71a284a7542693"
|
||||
revision = "471cd4e61d7a78ece1791fa5faa0345dc8c7d5a5"
|
||||
source = "https://github.com/samuel/go-zookeeper"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/spf13/cobra"
|
||||
packages = ["."]
|
||||
revision = "9495bc009a56819bdb0ddbc1a373e29c140bc674"
|
||||
revision = "b95ab734e27d33e0d8fbabf71ca990568d4e2020"
|
||||
source = "https://github.com/spf13/cobra"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/spf13/pflag"
|
||||
|
|
@ -264,12 +271,6 @@
|
|||
revision = "e57e3eeb33f795204c1ca35f56c44f83227c6e66"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/ugorji/go"
|
||||
packages = ["codec"]
|
||||
revision = "ccfe18359b55b97855cee1d3f74e5efbda4869dc"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/image"
|
||||
|
|
@ -295,7 +296,8 @@
|
|||
"lex/httplex",
|
||||
"trace"
|
||||
]
|
||||
revision = "c8c74377599bd978aee1cf3b9b63a8634051cec2"
|
||||
revision = "434ec0c7fe3742c984919a691b2018a6e9694425"
|
||||
source = "https://github.com/golang/net"
|
||||
|
||||
[[projects]]
|
||||
name = "golang.org/x/oauth2"
|
||||
|
|
@ -307,6 +309,13 @@
|
|||
"jwt"
|
||||
]
|
||||
revision = "30785a2c434e431ef7c507b54617d6a951d5f2b4"
|
||||
source = "https://github.com/golang/oauth2"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/sys"
|
||||
packages = ["unix"]
|
||||
revision = "d38bf781f16e180a1b2ad82697d2f81d7b7ecfac"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
|
|
@ -332,7 +341,8 @@
|
|||
[[projects]]
|
||||
name = "golang.org/x/time"
|
||||
packages = ["rate"]
|
||||
revision = "a4bde12657593d5e90d0533a3e4fd95e635124cb"
|
||||
revision = "6dc17368e09b0e8634d71cac8168d853e869a0c7"
|
||||
source = "https://github.com/golang/time"
|
||||
|
||||
[[projects]]
|
||||
name = "google.golang.org/api"
|
||||
|
|
@ -348,6 +358,7 @@
|
|||
"transport/http"
|
||||
]
|
||||
revision = "e5c227fa33ebccc5a430d863efae400431fc0e85"
|
||||
source = "https://github.com/google/google-api-go-client"
|
||||
|
||||
[[projects]]
|
||||
name = "google.golang.org/appengine"
|
||||
|
|
@ -380,31 +391,36 @@
|
|||
name = "google.golang.org/grpc"
|
||||
packages = [
|
||||
".",
|
||||
"balancer",
|
||||
"codes",
|
||||
"connectivity",
|
||||
"credentials",
|
||||
"grpclb/grpc_lb_v1",
|
||||
"grpclb/grpc_lb_v1/messages",
|
||||
"grpclog",
|
||||
"health/grpc_health_v1",
|
||||
"internal",
|
||||
"keepalive",
|
||||
"metadata",
|
||||
"naming",
|
||||
"peer",
|
||||
"resolver",
|
||||
"stats",
|
||||
"status",
|
||||
"tap",
|
||||
"transport"
|
||||
]
|
||||
revision = "b15215fb911b24a5d61d57feec4233d610530464"
|
||||
source = "https://github.com/grpc/grpc-go.git"
|
||||
revision = "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
source = "https://github.com/grpc/grpc-go"
|
||||
|
||||
[[projects]]
|
||||
name = "gopkg.in/yaml.v2"
|
||||
packages = ["."]
|
||||
revision = "53feefa2559fb8dfa8d81baad31be332c97d6c77"
|
||||
revision = "d670f9405373e636a5a2765eea47fac0c9bc91a4"
|
||||
source = "https://github.com/go-yaml/yaml"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "37b0bbd62b245e28469686b68faccde5593acb6e719031c70018e2b1cf29e36e"
|
||||
inputs-digest = "de282f54a00d78e5cc1cdf8a7554929ec7b716676d49647b2d9a83910b24df74"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
|
|
|
|||
166
Gopkg.toml
166
Gopkg.toml
|
|
@ -1,95 +1,141 @@
|
|||
################################
|
||||
# Direct dependencies
|
||||
|
||||
# v3.3.0 RC
|
||||
[[constraint]]
|
||||
name = "github.com/coreos/etcd"
|
||||
revision = "47a8156851b5a59665421661edb7c813f8a7993e"
|
||||
source = "https://github.com/coreos/etcd"
|
||||
revision = "eaf6ae02e47edbea90aa7f0fe6c4d558611e00ab"
|
||||
|
||||
# v1.7.5
|
||||
[[constraint]]
|
||||
name = "google.golang.org/grpc"
|
||||
source = "https://github.com/grpc/grpc-go.git"
|
||||
revision = "b15215fb911b24a5d61d57feec4233d610530464"
|
||||
source = "https://github.com/grpc/grpc-go"
|
||||
revision = "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/gogo/protobuf"
|
||||
source = "https://github.com/gogo/protobuf"
|
||||
revision = "160de10b2537169b5ae3e7e221d28269ef40d311"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/golang/protobuf"
|
||||
source = "https://github.com/golang/protobuf"
|
||||
revision = "1e59b77b52bf8e4b449a57e6f79f21226d571845"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/samuel/go-zookeeper"
|
||||
revision = "1d7be4effb13d2d908342d349d71a284a7542693"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/hashicorp/consul"
|
||||
revision = "42f60b04bba2e4391e9a95a218c873986ebeea6b"
|
||||
|
||||
[[constraint]]
|
||||
name = "golang.org/x/net"
|
||||
revision = "c8c74377599bd978aee1cf3b9b63a8634051cec2"
|
||||
source = "https://github.com/golang/net"
|
||||
revision = "434ec0c7fe3742c984919a691b2018a6e9694425"
|
||||
|
||||
[[constraint]]
|
||||
name = "golang.org/x/time"
|
||||
revision = "a4bde12657593d5e90d0533a3e4fd95e635124cb"
|
||||
source = "https://github.com/golang/time"
|
||||
revision = "6dc17368e09b0e8634d71cac8168d853e869a0c7"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/coreos/pkg"
|
||||
revision = "fa29b1d70f0beaddd4c7021607cc3c3be8ce94b8"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/gyuho/dataframe"
|
||||
revision = "73de2c550b1177c1640f3dacbbc1af00f913fedb"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/gyuho/linux-inspect"
|
||||
revision = "db8def6abe6a96a20aae6ae01664cc3817a27ba0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/gonum/plot"
|
||||
revision = "51b62dc5319d7fce41240d13e780a93e640b9a38"
|
||||
|
||||
# https://github.com/golang/oauth2
|
||||
[[constraint]]
|
||||
name = "golang.org/x/oauth2"
|
||||
revision = "30785a2c434e431ef7c507b54617d6a951d5f2b4"
|
||||
|
||||
# https://github.com/google/google-api-go-client
|
||||
[[constraint]]
|
||||
name = "google.golang.org/api"
|
||||
revision = "e5c227fa33ebccc5a430d863efae400431fc0e85"
|
||||
|
||||
# https://github.com/golang/appengine
|
||||
[[constraint]]
|
||||
name = "google.golang.org/appengine"
|
||||
revision = "5bee14b453b4c71be47ec1781b0fa61c2ea182db"
|
||||
|
||||
# https://github.com/GoogleCloudPlatform/google-cloud-go
|
||||
[[constraint]]
|
||||
name = "cloud.google.com/go"
|
||||
revision = "2a6493c7a9214bf56c1003bd97443d505cc7e952"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/dustin/go-humanize"
|
||||
revision = "ef638b6c2e62b857442c6443dace9366a48c0ee2"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/cheggaaa/pb"
|
||||
revision = "d7e6ca3010b6f084d8056847f55d7f572f180678"
|
||||
|
||||
[[constraint]]
|
||||
name = "gopkg.in/yaml.v2"
|
||||
revision = "53feefa2559fb8dfa8d81baad31be332c97d6c77"
|
||||
source = "https://github.com/go-yaml/yaml"
|
||||
revision = "d670f9405373e636a5a2765eea47fac0c9bc91a4"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/dustin/go-humanize"
|
||||
source = "https://github.com/dustin/go-humanize"
|
||||
revision = "bb3d318650d48840a39aa21a027c6630e198e626"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/spf13/cobra"
|
||||
revision = "9495bc009a56819bdb0ddbc1a373e29c140bc674"
|
||||
source = "https://github.com/spf13/cobra"
|
||||
revision = "b95ab734e27d33e0d8fbabf71ca990568d4e2020"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/olekukonko/tablewriter"
|
||||
revision = "febf2d34b54a69ce7530036c7503b1c9fbfdf0bb"
|
||||
################################
|
||||
source = "https://github.com/olekukonko/tablewriter"
|
||||
revision = "96aac992fc8b1a4c83841a6c3e7178d20d989625"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/cheggaaa/pb"
|
||||
source = "https://github.com/cheggaaa/pb"
|
||||
revision = "18d384da9bdc1e5a08fc2a62a494c321d9ae74ea"
|
||||
|
||||
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/samuel/go-zookeeper"
|
||||
source = "https://github.com/samuel/go-zookeeper"
|
||||
revision = "471cd4e61d7a78ece1791fa5faa0345dc8c7d5a5"
|
||||
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/hashicorp/consul"
|
||||
source = "https://github.com/hashicorp/consul"
|
||||
revision = "b55059fc3d0327c92c41431e57dfd2df3f956b03"
|
||||
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/gyuho/dataframe"
|
||||
source = "https://github.com/gyuho/dataframe"
|
||||
revision = "008fc241adc41d4bd5c54b9f6564ef16407c030e"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/gyuho/linux-inspect"
|
||||
source = "https://github.com/gyuho/linux-inspect"
|
||||
revision = "db8def6abe6a96a20aae6ae01664cc3817a27ba0"
|
||||
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/gonum/plot"
|
||||
source = "https://github.com/gonum/plot"
|
||||
revision = "51b62dc5319d7fce41240d13e780a93e640b9a38"
|
||||
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/coreos/pkg"
|
||||
source = "https://github.com/coreos/pkg"
|
||||
revision = "97fdf19511ea361ae1c100dd393cc47f8dcfa1e1"
|
||||
|
||||
|
||||
[[constraint]]
|
||||
name = "golang.org/x/oauth2"
|
||||
source = "https://github.com/golang/oauth2"
|
||||
revision = "30785a2c434e431ef7c507b54617d6a951d5f2b4"
|
||||
|
||||
[[constraint]]
|
||||
name = "google.golang.org/api"
|
||||
source = "https://github.com/google/google-api-go-client"
|
||||
revision = "e5c227fa33ebccc5a430d863efae400431fc0e85"
|
||||
|
||||
[[constraint]]
|
||||
name = "google.golang.org/appengine"
|
||||
source = "https://github.com/golang/appengine"
|
||||
revision = "5bee14b453b4c71be47ec1781b0fa61c2ea182db"
|
||||
|
||||
[[constraint]]
|
||||
name = "cloud.google.com/go"
|
||||
source = "https://github.com/GoogleCloudPlatform/google-cloud-go"
|
||||
revision = "2a6493c7a9214bf56c1003bd97443d505cc7e952"
|
||||
|
||||
|
||||
################################
|
||||
# Transitive dependencies, but to override
|
||||
|
||||
|
||||
################################
|
||||
# Transitive dependencies, and overrides
|
||||
|
||||
# v1.3.1-coreos.6
|
||||
[[override]]
|
||||
name = "github.com/coreos/bbolt"
|
||||
source = "https://github.com/coreos/bbolt"
|
||||
revision = "48ea1b39c25fc1bab3506fbc712ecbaa842c4d2d"
|
||||
|
||||
# v1.3.0
|
||||
[[override]]
|
||||
name = "github.com/grpc-ecosystem/grpc-gateway"
|
||||
source = "https://github.com/grpc-ecosystem/grpc-gateway"
|
||||
revision = "8cc3a55af3bcf171a1c23a90c4df9cf591706104"
|
||||
|
||||
|
||||
################################
|
||||
|
|
|
|||
|
|
@ -2,7 +2,6 @@ package pb
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
|
|
@ -11,12 +10,26 @@ type Units int
|
|||
const (
|
||||
// U_NO are default units, they represent a simple value and are not formatted at all.
|
||||
U_NO Units = iota
|
||||
// U_BYTES units are formatted in a human readable way (b, Bb, Mb, ...)
|
||||
// U_BYTES units are formatted in a human readable way (B, KiB, MiB, ...)
|
||||
U_BYTES
|
||||
// U_BYTES_DEC units are like U_BYTES, but base 10 (B, KB, MB, ...)
|
||||
U_BYTES_DEC
|
||||
// U_DURATION units are formatted in a human readable way (3h14m15s)
|
||||
U_DURATION
|
||||
)
|
||||
|
||||
const (
|
||||
KiB = 1024
|
||||
MiB = 1048576
|
||||
GiB = 1073741824
|
||||
TiB = 1099511627776
|
||||
|
||||
KB = 1e3
|
||||
MB = 1e6
|
||||
GB = 1e9
|
||||
TB = 1e12
|
||||
)
|
||||
|
||||
func Format(i int64) *formatter {
|
||||
return &formatter{n: i}
|
||||
}
|
||||
|
|
@ -28,11 +41,6 @@ type formatter struct {
|
|||
perSec bool
|
||||
}
|
||||
|
||||
func (f *formatter) Value(n int64) *formatter {
|
||||
f.n = n
|
||||
return f
|
||||
}
|
||||
|
||||
func (f *formatter) To(unit Units) *formatter {
|
||||
f.unit = unit
|
||||
return f
|
||||
|
|
@ -52,13 +60,10 @@ func (f *formatter) String() (out string) {
|
|||
switch f.unit {
|
||||
case U_BYTES:
|
||||
out = formatBytes(f.n)
|
||||
case U_BYTES_DEC:
|
||||
out = formatBytesDec(f.n)
|
||||
case U_DURATION:
|
||||
d := time.Duration(f.n)
|
||||
if d > time.Hour*24 {
|
||||
out = fmt.Sprintf("%dd", d/24/time.Hour)
|
||||
d -= (d / time.Hour / 24) * (time.Hour * 24)
|
||||
}
|
||||
out = fmt.Sprintf("%s%v", out, d)
|
||||
out = formatDuration(f.n)
|
||||
default:
|
||||
out = fmt.Sprintf(fmt.Sprintf("%%%dd", f.width), f.n)
|
||||
}
|
||||
|
|
@ -68,20 +73,46 @@ func (f *formatter) String() (out string) {
|
|||
return
|
||||
}
|
||||
|
||||
// Convert bytes to human readable string. Like a 2 MB, 64.2 KB, 52 B
|
||||
// Convert bytes to human readable string. Like 2 MiB, 64.2 KiB, 52 B
|
||||
func formatBytes(i int64) (result string) {
|
||||
switch {
|
||||
case i > (1024 * 1024 * 1024 * 1024):
|
||||
result = fmt.Sprintf("%.02f TB", float64(i)/1024/1024/1024/1024)
|
||||
case i > (1024 * 1024 * 1024):
|
||||
result = fmt.Sprintf("%.02f GB", float64(i)/1024/1024/1024)
|
||||
case i > (1024 * 1024):
|
||||
result = fmt.Sprintf("%.02f MB", float64(i)/1024/1024)
|
||||
case i > 1024:
|
||||
result = fmt.Sprintf("%.02f KB", float64(i)/1024)
|
||||
case i >= TiB:
|
||||
result = fmt.Sprintf("%.02f TiB", float64(i)/TiB)
|
||||
case i >= GiB:
|
||||
result = fmt.Sprintf("%.02f GiB", float64(i)/GiB)
|
||||
case i >= MiB:
|
||||
result = fmt.Sprintf("%.02f MiB", float64(i)/MiB)
|
||||
case i >= KiB:
|
||||
result = fmt.Sprintf("%.02f KiB", float64(i)/KiB)
|
||||
default:
|
||||
result = fmt.Sprintf("%d B", i)
|
||||
}
|
||||
result = strings.Trim(result, " ")
|
||||
return
|
||||
}
|
||||
|
||||
// Convert bytes to base-10 human readable string. Like 2 MB, 64.2 KB, 52 B
|
||||
func formatBytesDec(i int64) (result string) {
|
||||
switch {
|
||||
case i >= TB:
|
||||
result = fmt.Sprintf("%.02f TB", float64(i)/TB)
|
||||
case i >= GB:
|
||||
result = fmt.Sprintf("%.02f GB", float64(i)/GB)
|
||||
case i >= MB:
|
||||
result = fmt.Sprintf("%.02f MB", float64(i)/MB)
|
||||
case i >= KB:
|
||||
result = fmt.Sprintf("%.02f KB", float64(i)/KB)
|
||||
default:
|
||||
result = fmt.Sprintf("%d B", i)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func formatDuration(n int64) (result string) {
|
||||
d := time.Duration(n)
|
||||
if d > time.Hour*24 {
|
||||
result = fmt.Sprintf("%dd", d/24/time.Hour)
|
||||
d -= (d / time.Hour / 24) * (time.Hour * 24)
|
||||
}
|
||||
result = fmt.Sprintf("%s%v", result, d)
|
||||
return
|
||||
}
|
||||
|
|
|
|||
|
|
@ -13,7 +13,7 @@ import (
|
|||
)
|
||||
|
||||
// Current version
|
||||
const Version = "1.0.6"
|
||||
const Version = "1.0.19"
|
||||
|
||||
const (
|
||||
// Default refresh rate - 200ms
|
||||
|
|
@ -47,8 +47,6 @@ func New64(total int64) *ProgressBar {
|
|||
Units: U_NO,
|
||||
ManualUpdate: false,
|
||||
finish: make(chan struct{}),
|
||||
currentValue: -1,
|
||||
mu: new(sync.Mutex),
|
||||
}
|
||||
return pb.Format(FORMAT)
|
||||
}
|
||||
|
|
@ -67,7 +65,8 @@ func StartNew(total int) *ProgressBar {
|
|||
type Callback func(out string)
|
||||
|
||||
type ProgressBar struct {
|
||||
current int64 // current must be first member of struct (https://code.google.com/p/go/issues/detail?id=5278)
|
||||
current int64 // current must be first member of struct (https://code.google.com/p/go/issues/detail?id=5278)
|
||||
previous int64
|
||||
|
||||
Total int64
|
||||
RefreshRate time.Duration
|
||||
|
|
@ -91,13 +90,14 @@ type ProgressBar struct {
|
|||
finish chan struct{}
|
||||
isFinish bool
|
||||
|
||||
startTime time.Time
|
||||
startValue int64
|
||||
currentValue int64
|
||||
startTime time.Time
|
||||
startValue int64
|
||||
|
||||
changeTime time.Time
|
||||
|
||||
prefix, postfix string
|
||||
|
||||
mu *sync.Mutex
|
||||
mu sync.Mutex
|
||||
lastPrint string
|
||||
|
||||
BarStart string
|
||||
|
|
@ -112,7 +112,7 @@ type ProgressBar struct {
|
|||
// Start print
|
||||
func (pb *ProgressBar) Start() *ProgressBar {
|
||||
pb.startTime = time.Now()
|
||||
pb.startValue = pb.current
|
||||
pb.startValue = atomic.LoadInt64(&pb.current)
|
||||
if pb.Total == 0 {
|
||||
pb.ShowTimeLeft = false
|
||||
pb.ShowPercent = false
|
||||
|
|
@ -173,7 +173,7 @@ func (pb *ProgressBar) Postfix(postfix string) *ProgressBar {
|
|||
// Example: bar.Format("[\x00=\x00>\x00-\x00]") // \x00 is the delimiter
|
||||
func (pb *ProgressBar) Format(format string) *ProgressBar {
|
||||
var formatEntries []string
|
||||
if len(format) == 5 {
|
||||
if utf8.RuneCountInString(format) == 5 {
|
||||
formatEntries = strings.Split(format, "")
|
||||
} else {
|
||||
formatEntries = strings.Split(format, "\x00")
|
||||
|
|
@ -222,6 +222,8 @@ func (pb *ProgressBar) Finish() {
|
|||
pb.finishOnce.Do(func() {
|
||||
close(pb.finish)
|
||||
pb.write(atomic.LoadInt64(&pb.current))
|
||||
pb.mu.Lock()
|
||||
defer pb.mu.Unlock()
|
||||
switch {
|
||||
case pb.Output != nil:
|
||||
fmt.Fprintln(pb.Output)
|
||||
|
|
@ -232,6 +234,13 @@ func (pb *ProgressBar) Finish() {
|
|||
})
|
||||
}
|
||||
|
||||
// IsFinished return boolean
|
||||
func (pb *ProgressBar) IsFinished() bool {
|
||||
pb.mu.Lock()
|
||||
defer pb.mu.Unlock()
|
||||
return pb.isFinish
|
||||
}
|
||||
|
||||
// End print and write string 'str'
|
||||
func (pb *ProgressBar) FinishPrint(str string) {
|
||||
pb.Finish()
|
||||
|
|
@ -290,8 +299,12 @@ func (pb *ProgressBar) write(current int64) {
|
|||
}
|
||||
|
||||
// time left
|
||||
fromStart := time.Now().Sub(pb.startTime)
|
||||
pb.mu.Lock()
|
||||
currentFromStart := current - pb.startValue
|
||||
fromStart := time.Now().Sub(pb.startTime)
|
||||
lastChangeTime := pb.changeTime
|
||||
fromChange := lastChangeTime.Sub(pb.startTime)
|
||||
pb.mu.Unlock()
|
||||
select {
|
||||
case <-pb.finish:
|
||||
if pb.ShowFinalTime {
|
||||
|
|
@ -301,17 +314,20 @@ func (pb *ProgressBar) write(current int64) {
|
|||
}
|
||||
default:
|
||||
if pb.ShowTimeLeft && currentFromStart > 0 {
|
||||
perEntry := fromStart / time.Duration(currentFromStart)
|
||||
perEntry := fromChange / time.Duration(currentFromStart)
|
||||
var left time.Duration
|
||||
if pb.Total > 0 {
|
||||
left = time.Duration(pb.Total-currentFromStart) * perEntry
|
||||
left -= time.Since(lastChangeTime)
|
||||
left = (left / time.Second) * time.Second
|
||||
} else {
|
||||
left = time.Duration(currentFromStart) * perEntry
|
||||
left = (left / time.Second) * time.Second
|
||||
}
|
||||
timeLeft := Format(int64(left)).To(U_DURATION).String()
|
||||
timeLeftBox = fmt.Sprintf(" %s", timeLeft)
|
||||
if left > 0 {
|
||||
timeLeft := Format(int64(left)).To(U_DURATION).String()
|
||||
timeLeftBox = fmt.Sprintf(" %s", timeLeft)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -332,24 +348,32 @@ func (pb *ProgressBar) write(current int64) {
|
|||
size := width - barWidth
|
||||
if size > 0 {
|
||||
if pb.Total > 0 {
|
||||
curCount := int(math.Ceil((float64(current) / float64(pb.Total)) * float64(size)))
|
||||
emptCount := size - curCount
|
||||
curSize := int(math.Ceil((float64(current) / float64(pb.Total)) * float64(size)))
|
||||
emptySize := size - curSize
|
||||
barBox = pb.BarStart
|
||||
if emptCount < 0 {
|
||||
emptCount = 0
|
||||
if emptySize < 0 {
|
||||
emptySize = 0
|
||||
}
|
||||
if curCount > size {
|
||||
curCount = size
|
||||
if curSize > size {
|
||||
curSize = size
|
||||
}
|
||||
if emptCount <= 0 {
|
||||
barBox += strings.Repeat(pb.Current, curCount)
|
||||
} else if curCount > 0 {
|
||||
barBox += strings.Repeat(pb.Current, curCount-1) + pb.CurrentN
|
||||
|
||||
cursorLen := escapeAwareRuneCountInString(pb.Current)
|
||||
if emptySize <= 0 {
|
||||
barBox += strings.Repeat(pb.Current, curSize/cursorLen)
|
||||
} else if curSize > 0 {
|
||||
cursorEndLen := escapeAwareRuneCountInString(pb.CurrentN)
|
||||
cursorRepetitions := (curSize - cursorEndLen) / cursorLen
|
||||
barBox += strings.Repeat(pb.Current, cursorRepetitions)
|
||||
barBox += pb.CurrentN
|
||||
}
|
||||
barBox += strings.Repeat(pb.Empty, emptCount) + pb.BarEnd
|
||||
|
||||
emptyLen := escapeAwareRuneCountInString(pb.Empty)
|
||||
barBox += strings.Repeat(pb.Empty, emptySize/emptyLen)
|
||||
barBox += pb.BarEnd
|
||||
} else {
|
||||
barBox = pb.BarStart
|
||||
pos := size - int(current)%int(size)
|
||||
barBox = pb.BarStart
|
||||
if pos-1 > 0 {
|
||||
barBox += strings.Repeat(pb.Empty, pos-1)
|
||||
}
|
||||
|
|
@ -364,16 +388,17 @@ func (pb *ProgressBar) write(current int64) {
|
|||
|
||||
// check len
|
||||
out = pb.prefix + countersBox + barBox + percentBox + speedBox + timeLeftBox + pb.postfix
|
||||
if escapeAwareRuneCountInString(out) < width {
|
||||
end = strings.Repeat(" ", width-utf8.RuneCountInString(out))
|
||||
if cl := escapeAwareRuneCountInString(out); cl < width {
|
||||
end = strings.Repeat(" ", width-cl)
|
||||
}
|
||||
|
||||
// and print!
|
||||
pb.mu.Lock()
|
||||
pb.lastPrint = out + end
|
||||
isFinish := pb.isFinish
|
||||
pb.mu.Unlock()
|
||||
switch {
|
||||
case pb.isFinish:
|
||||
case isFinish:
|
||||
return
|
||||
case pb.Output != nil:
|
||||
fmt.Fprint(pb.Output, "\r"+out+end)
|
||||
|
|
@ -406,10 +431,14 @@ func (pb *ProgressBar) GetWidth() int {
|
|||
// Write the current state of the progressbar
|
||||
func (pb *ProgressBar) Update() {
|
||||
c := atomic.LoadInt64(&pb.current)
|
||||
if pb.AlwaysUpdate || c != pb.currentValue {
|
||||
pb.write(c)
|
||||
pb.currentValue = c
|
||||
p := atomic.LoadInt64(&pb.previous)
|
||||
if p != c {
|
||||
pb.mu.Lock()
|
||||
pb.changeTime = time.Now()
|
||||
pb.mu.Unlock()
|
||||
atomic.StoreInt64(&pb.previous, c)
|
||||
}
|
||||
pb.write(c)
|
||||
if pb.AutoStat {
|
||||
if c == 0 {
|
||||
pb.startTime = time.Now()
|
||||
|
|
@ -420,7 +449,10 @@ func (pb *ProgressBar) Update() {
|
|||
}
|
||||
}
|
||||
|
||||
// String return the last bar print
|
||||
func (pb *ProgressBar) String() string {
|
||||
pb.mu.Lock()
|
||||
defer pb.mu.Unlock()
|
||||
return pb.lastPrint
|
||||
}
|
||||
|
||||
|
|
@ -435,10 +467,3 @@ func (pb *ProgressBar) refresher() {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
type window struct {
|
||||
Row uint16
|
||||
Col uint16
|
||||
Xpixel uint16
|
||||
Ypixel uint16
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,8 +0,0 @@
|
|||
// +build linux darwin freebsd netbsd openbsd dragonfly
|
||||
// +build !appengine
|
||||
|
||||
package pb
|
||||
|
||||
import "syscall"
|
||||
|
||||
const sysIoctl = syscall.SYS_IOCTL
|
||||
|
|
@ -1,6 +0,0 @@
|
|||
// +build solaris
|
||||
// +build !appengine
|
||||
|
||||
package pb
|
||||
|
||||
const sysIoctl = 54
|
||||
|
|
@ -8,25 +8,24 @@ import (
|
|||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
"runtime"
|
||||
"sync"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
TIOCGWINSZ = 0x5413
|
||||
TIOCGWINSZ_OSX = 1074295912
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
var tty *os.File
|
||||
|
||||
var ErrPoolWasStarted = errors.New("Bar pool was started")
|
||||
|
||||
var echoLocked bool
|
||||
var echoLockMutex sync.Mutex
|
||||
var (
|
||||
echoLockMutex sync.Mutex
|
||||
origTermStatePtr *unix.Termios
|
||||
tty *os.File
|
||||
)
|
||||
|
||||
func init() {
|
||||
echoLockMutex.Lock()
|
||||
defer echoLockMutex.Unlock()
|
||||
|
||||
var err error
|
||||
tty, err = os.Open("/dev/tty")
|
||||
if err != nil {
|
||||
|
|
@ -36,64 +35,63 @@ func init() {
|
|||
|
||||
// terminalWidth returns width of the terminal.
|
||||
func terminalWidth() (int, error) {
|
||||
w := new(window)
|
||||
tio := syscall.TIOCGWINSZ
|
||||
if runtime.GOOS == "darwin" {
|
||||
tio = TIOCGWINSZ_OSX
|
||||
}
|
||||
res, _, err := syscall.Syscall(sysIoctl,
|
||||
tty.Fd(),
|
||||
uintptr(tio),
|
||||
uintptr(unsafe.Pointer(w)),
|
||||
)
|
||||
if int(res) == -1 {
|
||||
echoLockMutex.Lock()
|
||||
defer echoLockMutex.Unlock()
|
||||
|
||||
fd := int(tty.Fd())
|
||||
|
||||
ws, err := unix.IoctlGetWinsize(fd, unix.TIOCGWINSZ)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return int(w.Col), nil
|
||||
}
|
||||
|
||||
var oldState syscall.Termios
|
||||
return int(ws.Col), nil
|
||||
}
|
||||
|
||||
func lockEcho() (quit chan int, err error) {
|
||||
echoLockMutex.Lock()
|
||||
defer echoLockMutex.Unlock()
|
||||
if echoLocked {
|
||||
err = ErrPoolWasStarted
|
||||
return
|
||||
}
|
||||
echoLocked = true
|
||||
|
||||
fd := tty.Fd()
|
||||
if _, _, e := syscall.Syscall6(sysIoctl, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&oldState)), 0, 0, 0); e != 0 {
|
||||
err = fmt.Errorf("Can't get terminal settings: %v", e)
|
||||
return
|
||||
if origTermStatePtr != nil {
|
||||
return quit, ErrPoolWasStarted
|
||||
}
|
||||
|
||||
newState := oldState
|
||||
newState.Lflag &^= syscall.ECHO
|
||||
newState.Lflag |= syscall.ICANON | syscall.ISIG
|
||||
newState.Iflag |= syscall.ICRNL
|
||||
if _, _, e := syscall.Syscall6(sysIoctl, fd, ioctlWriteTermios, uintptr(unsafe.Pointer(&newState)), 0, 0, 0); e != 0 {
|
||||
err = fmt.Errorf("Can't set terminal settings: %v", e)
|
||||
return
|
||||
fd := int(tty.Fd())
|
||||
|
||||
oldTermStatePtr, err := unix.IoctlGetTermios(fd, ioctlReadTermios)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Can't get terminal settings: %v", err)
|
||||
}
|
||||
|
||||
oldTermios := *oldTermStatePtr
|
||||
newTermios := oldTermios
|
||||
newTermios.Lflag &^= syscall.ECHO
|
||||
newTermios.Lflag |= syscall.ICANON | syscall.ISIG
|
||||
newTermios.Iflag |= syscall.ICRNL
|
||||
if err := unix.IoctlSetTermios(fd, ioctlWriteTermios, &newTermios); err != nil {
|
||||
return nil, fmt.Errorf("Can't set terminal settings: %v", err)
|
||||
}
|
||||
|
||||
quit = make(chan int, 1)
|
||||
go catchTerminate(quit)
|
||||
return
|
||||
}
|
||||
|
||||
func unlockEcho() (err error) {
|
||||
func unlockEcho() error {
|
||||
echoLockMutex.Lock()
|
||||
defer echoLockMutex.Unlock()
|
||||
if !echoLocked {
|
||||
return
|
||||
if origTermStatePtr == nil {
|
||||
return nil
|
||||
}
|
||||
echoLocked = false
|
||||
fd := tty.Fd()
|
||||
if _, _, e := syscall.Syscall6(sysIoctl, fd, ioctlWriteTermios, uintptr(unsafe.Pointer(&oldState)), 0, 0, 0); e != 0 {
|
||||
err = fmt.Errorf("Can't set terminal settings")
|
||||
|
||||
fd := int(tty.Fd())
|
||||
|
||||
if err := unix.IoctlSetTermios(fd, ioctlWriteTermios, origTermStatePtr); err != nil {
|
||||
return fmt.Errorf("Can't set terminal settings: %v", err)
|
||||
}
|
||||
return
|
||||
|
||||
origTermStatePtr = nil
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// listen exit signals and restore terminal state
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@
|
|||
package pb
|
||||
|
||||
import (
|
||||
"io"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
|
@ -19,14 +20,19 @@ func StartPool(pbs ...*ProgressBar) (pool *Pool, err error) {
|
|||
}
|
||||
|
||||
type Pool struct {
|
||||
RefreshRate time.Duration
|
||||
bars []*ProgressBar
|
||||
quit chan int
|
||||
finishOnce sync.Once
|
||||
Output io.Writer
|
||||
RefreshRate time.Duration
|
||||
bars []*ProgressBar
|
||||
lastBarsCount int
|
||||
quit chan int
|
||||
m sync.Mutex
|
||||
finishOnce sync.Once
|
||||
}
|
||||
|
||||
// Add progress bars.
|
||||
func (p *Pool) Add(pbs ...*ProgressBar) {
|
||||
p.m.Lock()
|
||||
defer p.m.Unlock()
|
||||
for _, bar := range pbs {
|
||||
bar.ManualUpdate = true
|
||||
bar.NotPrint = true
|
||||
|
|
|
|||
|
|
@ -8,13 +8,18 @@ import (
|
|||
)
|
||||
|
||||
func (p *Pool) print(first bool) bool {
|
||||
p.m.Lock()
|
||||
defer p.m.Unlock()
|
||||
var out string
|
||||
if !first {
|
||||
coords, err := getCursorPos()
|
||||
if err != nil {
|
||||
log.Panic(err)
|
||||
}
|
||||
coords.Y -= int16(len(p.bars))
|
||||
coords.Y -= int16(p.lastBarsCount)
|
||||
if coords.Y < 0 {
|
||||
coords.Y = 0
|
||||
}
|
||||
coords.X = 0
|
||||
|
||||
err = setCursorPos(coords)
|
||||
|
|
@ -24,12 +29,17 @@ func (p *Pool) print(first bool) bool {
|
|||
}
|
||||
isFinished := true
|
||||
for _, bar := range p.bars {
|
||||
if !bar.isFinish {
|
||||
if !bar.IsFinished() {
|
||||
isFinished = false
|
||||
}
|
||||
bar.Update()
|
||||
out += fmt.Sprintf("\r%s\n", bar.String())
|
||||
}
|
||||
fmt.Print(out)
|
||||
if p.Output != nil {
|
||||
fmt.Fprint(p.Output, out)
|
||||
} else {
|
||||
fmt.Print(out)
|
||||
}
|
||||
p.lastBarsCount = len(p.bars)
|
||||
return isFinished
|
||||
}
|
||||
|
|
|
|||
|
|
@ -5,18 +5,25 @@ package pb
|
|||
import "fmt"
|
||||
|
||||
func (p *Pool) print(first bool) bool {
|
||||
p.m.Lock()
|
||||
defer p.m.Unlock()
|
||||
var out string
|
||||
if !first {
|
||||
out = fmt.Sprintf("\033[%dA", len(p.bars))
|
||||
out = fmt.Sprintf("\033[%dA", p.lastBarsCount)
|
||||
}
|
||||
isFinished := true
|
||||
for _, bar := range p.bars {
|
||||
if !bar.isFinish {
|
||||
if !bar.IsFinished() {
|
||||
isFinished = false
|
||||
}
|
||||
bar.Update()
|
||||
out += fmt.Sprintf("\r%s\n", bar.String())
|
||||
}
|
||||
fmt.Print(out)
|
||||
if p.Output != nil {
|
||||
fmt.Fprint(p.Output, out)
|
||||
} else {
|
||||
fmt.Print(out)
|
||||
}
|
||||
p.lastBarsCount = len(p.bars)
|
||||
return isFinished
|
||||
}
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ var ctrlFinder = regexp.MustCompile("\x1b\x5b[0-9]+\x6d")
|
|||
func escapeAwareRuneCountInString(s string) int {
|
||||
n := runewidth.StringWidth(s)
|
||||
for _, sm := range ctrlFinder.FindAllString(s, -1) {
|
||||
n -= len(sm)
|
||||
n -= runewidth.StringWidth(sm)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,7 +0,0 @@
|
|||
// +build linux solaris
|
||||
// +build !appengine
|
||||
|
||||
package pb
|
||||
|
||||
const ioctlReadTermios = 0x5401 // syscall.TCGETS
|
||||
const ioctlWriteTermios = 0x5402 // syscall.TCSETS
|
||||
|
|
@ -0,0 +1,13 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build linux solaris
|
||||
// +build !appengine
|
||||
|
||||
package pb
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
const ioctlReadTermios = unix.TCGETS
|
||||
const ioctlWriteTermios = unix.TCSETS
|
||||
|
|
@ -1,6 +1,5 @@
|
|||
// Code generated by protoc-gen-gogo.
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: auth.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package authpb is a generated protocol buffer package.
|
||||
|
|
@ -22,6 +21,8 @@ import (
|
|||
|
||||
math "math"
|
||||
|
||||
_ "github.com/gogo/protobuf/gogoproto"
|
||||
|
||||
io "io"
|
||||
)
|
||||
|
||||
|
|
@ -217,24 +218,6 @@ func (m *Role) MarshalTo(dAtA []byte) (int, error) {
|
|||
return i, nil
|
||||
}
|
||||
|
||||
func encodeFixed64Auth(dAtA []byte, offset int, v uint64) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
dAtA[offset+4] = uint8(v >> 32)
|
||||
dAtA[offset+5] = uint8(v >> 40)
|
||||
dAtA[offset+6] = uint8(v >> 48)
|
||||
dAtA[offset+7] = uint8(v >> 56)
|
||||
return offset + 8
|
||||
}
|
||||
func encodeFixed32Auth(dAtA []byte, offset int, v uint32) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
return offset + 4
|
||||
}
|
||||
func encodeVarintAuth(dAtA []byte, offset int, v uint64) int {
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
|
|
|
|||
|
|
@ -15,11 +15,11 @@
|
|||
package auth
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rsa"
|
||||
"io/ioutil"
|
||||
|
||||
jwt "github.com/dgrijalva/jwt-go"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type tokenJWT struct {
|
||||
|
|
|
|||
|
|
@ -18,6 +18,7 @@ package auth
|
|||
// JWT based mechanism will be added in the near future.
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
|
@ -25,8 +26,6 @@ import (
|
|||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -119,6 +118,9 @@ func (t *tokenSimple) genTokenPrefix() (string, error) {
|
|||
func (t *tokenSimple) assignSimpleTokenToUser(username, token string) {
|
||||
t.simpleTokensMu.Lock()
|
||||
defer t.simpleTokensMu.Unlock()
|
||||
if t.simpleTokenKeeper == nil {
|
||||
return
|
||||
}
|
||||
|
||||
_, ok := t.simpleTokens[token]
|
||||
if ok {
|
||||
|
|
@ -186,9 +188,9 @@ func (t *tokenSimple) info(ctx context.Context, token string, revision uint64) (
|
|||
|
||||
func (t *tokenSimple) assign(ctx context.Context, username string, rev uint64) (string, error) {
|
||||
// rev isn't used in simple token, it is only used in JWT
|
||||
index := ctx.Value("index").(uint64)
|
||||
simpleToken := ctx.Value("simpleToken").(string)
|
||||
token := fmt.Sprintf("%s.%d", simpleToken, index)
|
||||
index := ctx.Value(AuthenticateParamIndex{}).(uint64)
|
||||
simpleTokenPrefix := ctx.Value(AuthenticateParamSimpleTokenPrefix{}).(string)
|
||||
token := fmt.Sprintf("%s.%d", simpleTokenPrefix, index)
|
||||
t.assignSimpleTokenToUser(username, token)
|
||||
|
||||
return token, nil
|
||||
|
|
|
|||
|
|
@ -16,6 +16,7 @@ package auth
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"sort"
|
||||
|
|
@ -26,9 +27,9 @@ import (
|
|||
"github.com/coreos/etcd/auth/authpb"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/etcd/mvcc/backend"
|
||||
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/peer"
|
||||
|
|
@ -80,6 +81,12 @@ type AuthInfo struct {
|
|||
Revision uint64
|
||||
}
|
||||
|
||||
// AuthenticateParamIndex is used for a key of context in the parameters of Authenticate()
|
||||
type AuthenticateParamIndex struct{}
|
||||
|
||||
// AuthenticateParamSimpleTokenPrefix is used for a key of context in the parameters of Authenticate()
|
||||
type AuthenticateParamSimpleTokenPrefix struct{}
|
||||
|
||||
type AuthStore interface {
|
||||
// AuthEnable turns on the authentication feature
|
||||
AuthEnable() error
|
||||
|
|
@ -165,6 +172,9 @@ type AuthStore interface {
|
|||
|
||||
// WithRoot generates and installs a token that can be used as a root credential
|
||||
WithRoot(ctx context.Context) context.Context
|
||||
|
||||
// HasRole checks that user has role
|
||||
HasRole(user, role string) bool
|
||||
}
|
||||
|
||||
type TokenProvider interface {
|
||||
|
|
@ -448,7 +458,7 @@ func (as *authStore) UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUser
|
|||
}
|
||||
|
||||
user.Roles = append(user.Roles, r.Role)
|
||||
sort.Sort(sort.StringSlice(user.Roles))
|
||||
sort.Strings(user.Roles)
|
||||
|
||||
putUser(tx, user)
|
||||
|
||||
|
|
@ -463,14 +473,14 @@ func (as *authStore) UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUser
|
|||
func (as *authStore) UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) {
|
||||
tx := as.be.BatchTx()
|
||||
tx.Lock()
|
||||
defer tx.Unlock()
|
||||
|
||||
var resp pb.AuthUserGetResponse
|
||||
|
||||
user := getUser(tx, r.Name)
|
||||
tx.Unlock()
|
||||
|
||||
if user == nil {
|
||||
return nil, ErrUserNotFound
|
||||
}
|
||||
|
||||
var resp pb.AuthUserGetResponse
|
||||
resp.Roles = append(resp.Roles, user.Roles...)
|
||||
return &resp, nil
|
||||
}
|
||||
|
|
@ -478,17 +488,14 @@ func (as *authStore) UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse,
|
|||
func (as *authStore) UserList(r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) {
|
||||
tx := as.be.BatchTx()
|
||||
tx.Lock()
|
||||
defer tx.Unlock()
|
||||
|
||||
var resp pb.AuthUserListResponse
|
||||
|
||||
users := getAllUsers(tx)
|
||||
tx.Unlock()
|
||||
|
||||
for _, u := range users {
|
||||
resp.Users = append(resp.Users, string(u.Name))
|
||||
resp := &pb.AuthUserListResponse{Users: make([]string, len(users))}
|
||||
for i := range users {
|
||||
resp.Users[i] = string(users[i].Name)
|
||||
}
|
||||
|
||||
return &resp, nil
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (as *authStore) UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) {
|
||||
|
|
@ -549,17 +556,14 @@ func (as *authStore) RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse,
|
|||
func (as *authStore) RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) {
|
||||
tx := as.be.BatchTx()
|
||||
tx.Lock()
|
||||
defer tx.Unlock()
|
||||
|
||||
var resp pb.AuthRoleListResponse
|
||||
|
||||
roles := getAllRoles(tx)
|
||||
tx.Unlock()
|
||||
|
||||
for _, r := range roles {
|
||||
resp.Roles = append(resp.Roles, string(r.Name))
|
||||
resp := &pb.AuthRoleListResponse{Roles: make([]string, len(roles))}
|
||||
for i := range roles {
|
||||
resp.Roles[i] = string(roles[i].Name)
|
||||
}
|
||||
|
||||
return &resp, nil
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (as *authStore) RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) {
|
||||
|
|
@ -785,9 +789,9 @@ func (as *authStore) IsAdminPermitted(authInfo *AuthInfo) error {
|
|||
|
||||
tx := as.be.BatchTx()
|
||||
tx.Lock()
|
||||
defer tx.Unlock()
|
||||
|
||||
u := getUser(tx, authInfo.Username)
|
||||
tx.Unlock()
|
||||
|
||||
if u == nil {
|
||||
return ErrUserNotFound
|
||||
}
|
||||
|
|
@ -819,18 +823,15 @@ func getAllUsers(tx backend.BatchTx) []*authpb.User {
|
|||
return nil
|
||||
}
|
||||
|
||||
var users []*authpb.User
|
||||
|
||||
for _, v := range vs {
|
||||
users := make([]*authpb.User, len(vs))
|
||||
for i := range vs {
|
||||
user := &authpb.User{}
|
||||
err := user.Unmarshal(v)
|
||||
err := user.Unmarshal(vs[i])
|
||||
if err != nil {
|
||||
plog.Panicf("failed to unmarshal user struct: %s", err)
|
||||
}
|
||||
|
||||
users = append(users, user)
|
||||
users[i] = user
|
||||
}
|
||||
|
||||
return users
|
||||
}
|
||||
|
||||
|
|
@ -866,18 +867,15 @@ func getAllRoles(tx backend.BatchTx) []*authpb.Role {
|
|||
return nil
|
||||
}
|
||||
|
||||
var roles []*authpb.Role
|
||||
|
||||
for _, v := range vs {
|
||||
roles := make([]*authpb.Role, len(vs))
|
||||
for i := range vs {
|
||||
role := &authpb.Role{}
|
||||
err := role.Unmarshal(v)
|
||||
err := role.Unmarshal(vs[i])
|
||||
if err != nil {
|
||||
plog.Panicf("failed to unmarshal role struct: %s", err)
|
||||
}
|
||||
|
||||
roles = append(roles, role)
|
||||
roles[i] = role
|
||||
}
|
||||
|
||||
return roles
|
||||
}
|
||||
|
||||
|
|
@ -939,12 +937,9 @@ func NewAuthStore(be backend.Backend, tp TokenProvider) *authStore {
|
|||
}
|
||||
|
||||
func hasRootRole(u *authpb.User) bool {
|
||||
for _, r := range u.Roles {
|
||||
if r == rootRole {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
// u.Roles is sorted in UserGrantRole(), so we can use binary search.
|
||||
idx := sort.SearchStrings(u.Roles, rootRole)
|
||||
return idx != len(u.Roles) && u.Roles[idx] == rootRole
|
||||
}
|
||||
|
||||
func (as *authStore) commitRevision(tx backend.BatchTx) {
|
||||
|
|
@ -1073,13 +1068,13 @@ func (as *authStore) WithRoot(ctx context.Context) context.Context {
|
|||
|
||||
var ctxForAssign context.Context
|
||||
if ts := as.tokenProvider.(*tokenSimple); ts != nil {
|
||||
ctx1 := context.WithValue(ctx, "index", uint64(0))
|
||||
ctx1 := context.WithValue(ctx, AuthenticateParamIndex{}, uint64(0))
|
||||
prefix, err := ts.genTokenPrefix()
|
||||
if err != nil {
|
||||
plog.Errorf("failed to generate prefix of internally used token")
|
||||
return ctx
|
||||
}
|
||||
ctxForAssign = context.WithValue(ctx1, "simpleToken", prefix)
|
||||
ctxForAssign = context.WithValue(ctx1, AuthenticateParamSimpleTokenPrefix{}, prefix)
|
||||
} else {
|
||||
ctxForAssign = ctx
|
||||
}
|
||||
|
|
@ -1095,5 +1090,27 @@ func (as *authStore) WithRoot(ctx context.Context) context.Context {
|
|||
"token": token,
|
||||
}
|
||||
tokenMD := metadata.New(mdMap)
|
||||
return metadata.NewContext(ctx, tokenMD)
|
||||
|
||||
// use "mdIncomingKey{}" since it's called from local etcdserver
|
||||
return metadata.NewIncomingContext(ctx, tokenMD)
|
||||
}
|
||||
|
||||
func (as *authStore) HasRole(user, role string) bool {
|
||||
tx := as.be.BatchTx()
|
||||
tx.Lock()
|
||||
u := getUser(tx, user)
|
||||
tx.Unlock()
|
||||
|
||||
if u == nil {
|
||||
plog.Warningf("tried to check user %s has role %s, but user %s doesn't exist", user, role, user)
|
||||
return false
|
||||
}
|
||||
|
||||
for _, r := range u.Roles {
|
||||
if role == r {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
|
|
|||
|
|
@ -16,11 +16,10 @@ package client
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type Role struct {
|
||||
|
|
|
|||
|
|
@ -16,12 +16,11 @@ package client
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
var (
|
||||
|
|
|
|||
|
|
@ -15,6 +15,7 @@
|
|||
package client
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
|
@ -29,8 +30,6 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/coreos/etcd/version"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
var (
|
||||
|
|
@ -372,12 +371,7 @@ func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Respo
|
|||
if err == context.Canceled || err == context.DeadlineExceeded {
|
||||
return nil, nil, err
|
||||
}
|
||||
if isOneShot {
|
||||
return nil, nil, err
|
||||
}
|
||||
continue
|
||||
}
|
||||
if resp.StatusCode/100 == 5 {
|
||||
} else if resp.StatusCode/100 == 5 {
|
||||
switch resp.StatusCode {
|
||||
case http.StatusInternalServerError, http.StatusServiceUnavailable:
|
||||
// TODO: make sure this is a no leader response
|
||||
|
|
@ -385,10 +379,16 @@ func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Respo
|
|||
default:
|
||||
cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s returns server error [%s]", eps[k].String(), http.StatusText(resp.StatusCode)))
|
||||
}
|
||||
if isOneShot {
|
||||
return nil, nil, cerr.Errors[0]
|
||||
err = cerr.Errors[0]
|
||||
}
|
||||
if err != nil {
|
||||
if !isOneShot {
|
||||
continue
|
||||
}
|
||||
continue
|
||||
c.Lock()
|
||||
c.pinned = (k + 1) % leps
|
||||
c.Unlock()
|
||||
return nil, nil, err
|
||||
}
|
||||
if k != pinned {
|
||||
c.Lock()
|
||||
|
|
@ -670,8 +670,15 @@ func (r *redirectedHTTPAction) HTTPRequest(ep url.URL) *http.Request {
|
|||
}
|
||||
|
||||
func shuffleEndpoints(r *rand.Rand, eps []url.URL) []url.URL {
|
||||
p := r.Perm(len(eps))
|
||||
neps := make([]url.URL, len(eps))
|
||||
// copied from Go 1.9<= rand.Rand.Perm
|
||||
n := len(eps)
|
||||
p := make([]int, n)
|
||||
for i := 0; i < n; i++ {
|
||||
j := r.Intn(i + 1)
|
||||
p[i] = p[j]
|
||||
p[j] = i
|
||||
}
|
||||
neps := make([]url.URL, n)
|
||||
for i, k := range p {
|
||||
neps[i] = eps[k]
|
||||
}
|
||||
|
|
|
|||
|
|
@ -19,9 +19,9 @@ Create a Config and exchange it for a Client:
|
|||
|
||||
import (
|
||||
"net/http"
|
||||
"context"
|
||||
|
||||
"github.com/coreos/etcd/client"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
cfg := client.Config{
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
|
|
@ -17,6 +17,7 @@ package client
|
|||
//go:generate codecgen -d 1819 -r "Node|Response|Nodes" -o keys.generated.go keys.go
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
|
@ -28,7 +29,6 @@ import (
|
|||
|
||||
"github.com/coreos/etcd/pkg/pathutil"
|
||||
"github.com/ugorji/go/codec"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -653,8 +653,7 @@ func unmarshalHTTPResponse(code int, header http.Header, body []byte) (res *Resp
|
|||
default:
|
||||
err = unmarshalFailedKeysResponse(body)
|
||||
}
|
||||
|
||||
return
|
||||
return res, err
|
||||
}
|
||||
|
||||
func unmarshalSuccessfulKeysResponse(header http.Header, body []byte) (*Response, error) {
|
||||
|
|
|
|||
|
|
@ -16,14 +16,13 @@ package client
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/coreos/etcd/pkg/types"
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -15,12 +15,13 @@
|
|||
package clientv3
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/coreos/etcd/auth/authpb"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
|
|
@ -100,60 +101,65 @@ type Auth interface {
|
|||
}
|
||||
|
||||
type auth struct {
|
||||
remote pb.AuthClient
|
||||
remote pb.AuthClient
|
||||
callOpts []grpc.CallOption
|
||||
}
|
||||
|
||||
func NewAuth(c *Client) Auth {
|
||||
return &auth{remote: pb.NewAuthClient(c.ActiveConnection())}
|
||||
api := &auth{remote: RetryAuthClient(c)}
|
||||
if c != nil {
|
||||
api.callOpts = c.callOpts
|
||||
}
|
||||
return api
|
||||
}
|
||||
|
||||
func (auth *auth) AuthEnable(ctx context.Context) (*AuthEnableResponse, error) {
|
||||
resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{}, grpc.FailFast(false))
|
||||
resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{}, auth.callOpts...)
|
||||
return (*AuthEnableResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *auth) AuthDisable(ctx context.Context) (*AuthDisableResponse, error) {
|
||||
resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{}, grpc.FailFast(false))
|
||||
resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{}, auth.callOpts...)
|
||||
return (*AuthDisableResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *auth) UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error) {
|
||||
resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password})
|
||||
resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password}, auth.callOpts...)
|
||||
return (*AuthUserAddResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *auth) UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error) {
|
||||
resp, err := auth.remote.UserDelete(ctx, &pb.AuthUserDeleteRequest{Name: name})
|
||||
resp, err := auth.remote.UserDelete(ctx, &pb.AuthUserDeleteRequest{Name: name}, auth.callOpts...)
|
||||
return (*AuthUserDeleteResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *auth) UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error) {
|
||||
resp, err := auth.remote.UserChangePassword(ctx, &pb.AuthUserChangePasswordRequest{Name: name, Password: password})
|
||||
resp, err := auth.remote.UserChangePassword(ctx, &pb.AuthUserChangePasswordRequest{Name: name, Password: password}, auth.callOpts...)
|
||||
return (*AuthUserChangePasswordResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *auth) UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error) {
|
||||
resp, err := auth.remote.UserGrantRole(ctx, &pb.AuthUserGrantRoleRequest{User: user, Role: role})
|
||||
resp, err := auth.remote.UserGrantRole(ctx, &pb.AuthUserGrantRoleRequest{User: user, Role: role}, auth.callOpts...)
|
||||
return (*AuthUserGrantRoleResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *auth) UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error) {
|
||||
resp, err := auth.remote.UserGet(ctx, &pb.AuthUserGetRequest{Name: name}, grpc.FailFast(false))
|
||||
resp, err := auth.remote.UserGet(ctx, &pb.AuthUserGetRequest{Name: name}, auth.callOpts...)
|
||||
return (*AuthUserGetResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *auth) UserList(ctx context.Context) (*AuthUserListResponse, error) {
|
||||
resp, err := auth.remote.UserList(ctx, &pb.AuthUserListRequest{}, grpc.FailFast(false))
|
||||
resp, err := auth.remote.UserList(ctx, &pb.AuthUserListRequest{}, auth.callOpts...)
|
||||
return (*AuthUserListResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *auth) UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error) {
|
||||
resp, err := auth.remote.UserRevokeRole(ctx, &pb.AuthUserRevokeRoleRequest{Name: name, Role: role})
|
||||
resp, err := auth.remote.UserRevokeRole(ctx, &pb.AuthUserRevokeRoleRequest{Name: name, Role: role}, auth.callOpts...)
|
||||
return (*AuthUserRevokeRoleResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *auth) RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error) {
|
||||
resp, err := auth.remote.RoleAdd(ctx, &pb.AuthRoleAddRequest{Name: name})
|
||||
resp, err := auth.remote.RoleAdd(ctx, &pb.AuthRoleAddRequest{Name: name}, auth.callOpts...)
|
||||
return (*AuthRoleAddResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
|
|
@ -163,27 +169,27 @@ func (auth *auth) RoleGrantPermission(ctx context.Context, name string, key, ran
|
|||
RangeEnd: []byte(rangeEnd),
|
||||
PermType: authpb.Permission_Type(permType),
|
||||
}
|
||||
resp, err := auth.remote.RoleGrantPermission(ctx, &pb.AuthRoleGrantPermissionRequest{Name: name, Perm: perm})
|
||||
resp, err := auth.remote.RoleGrantPermission(ctx, &pb.AuthRoleGrantPermissionRequest{Name: name, Perm: perm}, auth.callOpts...)
|
||||
return (*AuthRoleGrantPermissionResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *auth) RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error) {
|
||||
resp, err := auth.remote.RoleGet(ctx, &pb.AuthRoleGetRequest{Role: role}, grpc.FailFast(false))
|
||||
resp, err := auth.remote.RoleGet(ctx, &pb.AuthRoleGetRequest{Role: role}, auth.callOpts...)
|
||||
return (*AuthRoleGetResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *auth) RoleList(ctx context.Context) (*AuthRoleListResponse, error) {
|
||||
resp, err := auth.remote.RoleList(ctx, &pb.AuthRoleListRequest{}, grpc.FailFast(false))
|
||||
resp, err := auth.remote.RoleList(ctx, &pb.AuthRoleListRequest{}, auth.callOpts...)
|
||||
return (*AuthRoleListResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *auth) RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error) {
|
||||
resp, err := auth.remote.RoleRevokePermission(ctx, &pb.AuthRoleRevokePermissionRequest{Role: role, Key: key, RangeEnd: rangeEnd})
|
||||
resp, err := auth.remote.RoleRevokePermission(ctx, &pb.AuthRoleRevokePermissionRequest{Role: role, Key: key, RangeEnd: rangeEnd}, auth.callOpts...)
|
||||
return (*AuthRoleRevokePermissionResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *auth) RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error) {
|
||||
resp, err := auth.remote.RoleDelete(ctx, &pb.AuthRoleDeleteRequest{Role: role})
|
||||
resp, err := auth.remote.RoleDelete(ctx, &pb.AuthRoleDeleteRequest{Role: role}, auth.callOpts...)
|
||||
return (*AuthRoleDeleteResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
|
|
@ -196,12 +202,13 @@ func StrToPermissionType(s string) (PermissionType, error) {
|
|||
}
|
||||
|
||||
type authenticator struct {
|
||||
conn *grpc.ClientConn // conn in-use
|
||||
remote pb.AuthClient
|
||||
conn *grpc.ClientConn // conn in-use
|
||||
remote pb.AuthClient
|
||||
callOpts []grpc.CallOption
|
||||
}
|
||||
|
||||
func (auth *authenticator) authenticate(ctx context.Context, name string, password string) (*AuthenticateResponse, error) {
|
||||
resp, err := auth.remote.Authenticate(ctx, &pb.AuthenticateRequest{Name: name, Password: password}, grpc.FailFast(false))
|
||||
resp, err := auth.remote.Authenticate(ctx, &pb.AuthenticateRequest{Name: name, Password: password}, auth.callOpts...)
|
||||
return (*AuthenticateResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
|
|
@ -209,14 +216,18 @@ func (auth *authenticator) close() {
|
|||
auth.conn.Close()
|
||||
}
|
||||
|
||||
func newAuthenticator(endpoint string, opts []grpc.DialOption) (*authenticator, error) {
|
||||
func newAuthenticator(endpoint string, opts []grpc.DialOption, c *Client) (*authenticator, error) {
|
||||
conn, err := grpc.Dial(endpoint, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &authenticator{
|
||||
api := &authenticator{
|
||||
conn: conn,
|
||||
remote: pb.NewAuthClient(conn),
|
||||
}, nil
|
||||
}
|
||||
if c != nil {
|
||||
api.callOpts = c.callOpts
|
||||
}
|
||||
return api, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,356 +0,0 @@
|
|||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
)
|
||||
|
||||
// ErrNoAddrAvilable is returned by Get() when the balancer does not have
|
||||
// any active connection to endpoints at the time.
|
||||
// This error is returned only when opts.BlockingWait is true.
|
||||
var ErrNoAddrAvilable = grpc.Errorf(codes.Unavailable, "there is no address available")
|
||||
|
||||
// simpleBalancer does the bare minimum to expose multiple eps
|
||||
// to the grpc reconnection code path
|
||||
type simpleBalancer struct {
|
||||
// addrs are the client's endpoints for grpc
|
||||
addrs []grpc.Address
|
||||
// notifyCh notifies grpc of the set of addresses for connecting
|
||||
notifyCh chan []grpc.Address
|
||||
|
||||
// readyc closes once the first connection is up
|
||||
readyc chan struct{}
|
||||
readyOnce sync.Once
|
||||
|
||||
// mu protects upEps, pinAddr, and connectingAddr
|
||||
mu sync.RWMutex
|
||||
|
||||
// upc closes when upEps transitions from empty to non-zero or the balancer closes.
|
||||
upc chan struct{}
|
||||
|
||||
// downc closes when grpc calls down() on pinAddr
|
||||
downc chan struct{}
|
||||
|
||||
// stopc is closed to signal updateNotifyLoop should stop.
|
||||
stopc chan struct{}
|
||||
|
||||
// donec closes when all goroutines are exited
|
||||
donec chan struct{}
|
||||
|
||||
// updateAddrsC notifies updateNotifyLoop to update addrs.
|
||||
updateAddrsC chan struct{}
|
||||
|
||||
// grpc issues TLS cert checks using the string passed into dial so
|
||||
// that string must be the host. To recover the full scheme://host URL,
|
||||
// have a map from hosts to the original endpoint.
|
||||
host2ep map[string]string
|
||||
|
||||
// pinAddr is the currently pinned address; set to the empty string on
|
||||
// intialization and shutdown.
|
||||
pinAddr string
|
||||
|
||||
closed bool
|
||||
}
|
||||
|
||||
func newSimpleBalancer(eps []string) *simpleBalancer {
|
||||
notifyCh := make(chan []grpc.Address, 1)
|
||||
addrs := make([]grpc.Address, len(eps))
|
||||
for i := range eps {
|
||||
addrs[i].Addr = getHost(eps[i])
|
||||
}
|
||||
sb := &simpleBalancer{
|
||||
addrs: addrs,
|
||||
notifyCh: notifyCh,
|
||||
readyc: make(chan struct{}),
|
||||
upc: make(chan struct{}),
|
||||
stopc: make(chan struct{}),
|
||||
downc: make(chan struct{}),
|
||||
donec: make(chan struct{}),
|
||||
updateAddrsC: make(chan struct{}, 1),
|
||||
host2ep: getHost2ep(eps),
|
||||
}
|
||||
close(sb.downc)
|
||||
go sb.updateNotifyLoop()
|
||||
return sb
|
||||
}
|
||||
|
||||
func (b *simpleBalancer) Start(target string, config grpc.BalancerConfig) error { return nil }
|
||||
|
||||
func (b *simpleBalancer) ConnectNotify() <-chan struct{} {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
return b.upc
|
||||
}
|
||||
|
||||
func (b *simpleBalancer) getEndpoint(host string) string {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
return b.host2ep[host]
|
||||
}
|
||||
|
||||
func getHost2ep(eps []string) map[string]string {
|
||||
hm := make(map[string]string, len(eps))
|
||||
for i := range eps {
|
||||
_, host, _ := parseEndpoint(eps[i])
|
||||
hm[host] = eps[i]
|
||||
}
|
||||
return hm
|
||||
}
|
||||
|
||||
func (b *simpleBalancer) updateAddrs(eps []string) {
|
||||
np := getHost2ep(eps)
|
||||
|
||||
b.mu.Lock()
|
||||
|
||||
match := len(np) == len(b.host2ep)
|
||||
for k, v := range np {
|
||||
if b.host2ep[k] != v {
|
||||
match = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if match {
|
||||
// same endpoints, so no need to update address
|
||||
b.mu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
b.host2ep = np
|
||||
|
||||
addrs := make([]grpc.Address, 0, len(eps))
|
||||
for i := range eps {
|
||||
addrs = append(addrs, grpc.Address{Addr: getHost(eps[i])})
|
||||
}
|
||||
b.addrs = addrs
|
||||
|
||||
// updating notifyCh can trigger new connections,
|
||||
// only update addrs if all connections are down
|
||||
// or addrs does not include pinAddr.
|
||||
update := !hasAddr(addrs, b.pinAddr)
|
||||
b.mu.Unlock()
|
||||
|
||||
if update {
|
||||
select {
|
||||
case b.updateAddrsC <- struct{}{}:
|
||||
case <-b.stopc:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func hasAddr(addrs []grpc.Address, targetAddr string) bool {
|
||||
for _, addr := range addrs {
|
||||
if targetAddr == addr.Addr {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (b *simpleBalancer) updateNotifyLoop() {
|
||||
defer close(b.donec)
|
||||
|
||||
for {
|
||||
b.mu.RLock()
|
||||
upc, downc, addr := b.upc, b.downc, b.pinAddr
|
||||
b.mu.RUnlock()
|
||||
// downc or upc should be closed
|
||||
select {
|
||||
case <-downc:
|
||||
downc = nil
|
||||
default:
|
||||
}
|
||||
select {
|
||||
case <-upc:
|
||||
upc = nil
|
||||
default:
|
||||
}
|
||||
switch {
|
||||
case downc == nil && upc == nil:
|
||||
// stale
|
||||
select {
|
||||
case <-b.stopc:
|
||||
return
|
||||
default:
|
||||
}
|
||||
case downc == nil:
|
||||
b.notifyAddrs()
|
||||
select {
|
||||
case <-upc:
|
||||
case <-b.updateAddrsC:
|
||||
b.notifyAddrs()
|
||||
case <-b.stopc:
|
||||
return
|
||||
}
|
||||
case upc == nil:
|
||||
select {
|
||||
// close connections that are not the pinned address
|
||||
case b.notifyCh <- []grpc.Address{{Addr: addr}}:
|
||||
case <-downc:
|
||||
case <-b.stopc:
|
||||
return
|
||||
}
|
||||
select {
|
||||
case <-downc:
|
||||
case <-b.updateAddrsC:
|
||||
case <-b.stopc:
|
||||
return
|
||||
}
|
||||
b.notifyAddrs()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *simpleBalancer) notifyAddrs() {
|
||||
b.mu.RLock()
|
||||
addrs := b.addrs
|
||||
b.mu.RUnlock()
|
||||
select {
|
||||
case b.notifyCh <- addrs:
|
||||
case <-b.stopc:
|
||||
}
|
||||
}
|
||||
|
||||
func (b *simpleBalancer) Up(addr grpc.Address) func(error) {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
// gRPC might call Up after it called Close. We add this check
|
||||
// to "fix" it up at application layer. Or our simplerBalancer
|
||||
// might panic since b.upc is closed.
|
||||
if b.closed {
|
||||
return func(err error) {}
|
||||
}
|
||||
// gRPC might call Up on a stale address.
|
||||
// Prevent updating pinAddr with a stale address.
|
||||
if !hasAddr(b.addrs, addr.Addr) {
|
||||
return func(err error) {}
|
||||
}
|
||||
if b.pinAddr != "" {
|
||||
return func(err error) {}
|
||||
}
|
||||
// notify waiting Get()s and pin first connected address
|
||||
close(b.upc)
|
||||
b.downc = make(chan struct{})
|
||||
b.pinAddr = addr.Addr
|
||||
// notify client that a connection is up
|
||||
b.readyOnce.Do(func() { close(b.readyc) })
|
||||
return func(err error) {
|
||||
b.mu.Lock()
|
||||
b.upc = make(chan struct{})
|
||||
close(b.downc)
|
||||
b.pinAddr = ""
|
||||
b.mu.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
func (b *simpleBalancer) Get(ctx context.Context, opts grpc.BalancerGetOptions) (grpc.Address, func(), error) {
|
||||
var (
|
||||
addr string
|
||||
closed bool
|
||||
)
|
||||
|
||||
// If opts.BlockingWait is false (for fail-fast RPCs), it should return
|
||||
// an address it has notified via Notify immediately instead of blocking.
|
||||
if !opts.BlockingWait {
|
||||
b.mu.RLock()
|
||||
closed = b.closed
|
||||
addr = b.pinAddr
|
||||
b.mu.RUnlock()
|
||||
if closed {
|
||||
return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing
|
||||
}
|
||||
if addr == "" {
|
||||
return grpc.Address{Addr: ""}, nil, ErrNoAddrAvilable
|
||||
}
|
||||
return grpc.Address{Addr: addr}, func() {}, nil
|
||||
}
|
||||
|
||||
for {
|
||||
b.mu.RLock()
|
||||
ch := b.upc
|
||||
b.mu.RUnlock()
|
||||
select {
|
||||
case <-ch:
|
||||
case <-b.donec:
|
||||
return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing
|
||||
case <-ctx.Done():
|
||||
return grpc.Address{Addr: ""}, nil, ctx.Err()
|
||||
}
|
||||
b.mu.RLock()
|
||||
closed = b.closed
|
||||
addr = b.pinAddr
|
||||
b.mu.RUnlock()
|
||||
// Close() which sets b.closed = true can be called before Get(), Get() must exit if balancer is closed.
|
||||
if closed {
|
||||
return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing
|
||||
}
|
||||
if addr != "" {
|
||||
break
|
||||
}
|
||||
}
|
||||
return grpc.Address{Addr: addr}, func() {}, nil
|
||||
}
|
||||
|
||||
func (b *simpleBalancer) Notify() <-chan []grpc.Address { return b.notifyCh }
|
||||
|
||||
func (b *simpleBalancer) Close() error {
|
||||
b.mu.Lock()
|
||||
// In case gRPC calls close twice. TODO: remove the checking
|
||||
// when we are sure that gRPC wont call close twice.
|
||||
if b.closed {
|
||||
b.mu.Unlock()
|
||||
<-b.donec
|
||||
return nil
|
||||
}
|
||||
b.closed = true
|
||||
close(b.stopc)
|
||||
b.pinAddr = ""
|
||||
|
||||
// In the case of following scenario:
|
||||
// 1. upc is not closed; no pinned address
|
||||
// 2. client issues an rpc, calling invoke(), which calls Get(), enters for loop, blocks
|
||||
// 3. clientconn.Close() calls balancer.Close(); closed = true
|
||||
// 4. for loop in Get() never exits since ctx is the context passed in by the client and may not be canceled
|
||||
// we must close upc so Get() exits from blocking on upc
|
||||
select {
|
||||
case <-b.upc:
|
||||
default:
|
||||
// terminate all waiting Get()s
|
||||
close(b.upc)
|
||||
}
|
||||
|
||||
b.mu.Unlock()
|
||||
|
||||
// wait for updateNotifyLoop to finish
|
||||
<-b.donec
|
||||
close(b.notifyCh)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getHost(ep string) string {
|
||||
url, uerr := url.Parse(ep)
|
||||
if uerr != nil || !strings.Contains(ep, "://") {
|
||||
return ep
|
||||
}
|
||||
return url.Host
|
||||
}
|
||||
|
|
@ -15,6 +15,7 @@
|
|||
package clientv3
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
|
@ -27,11 +28,12 @@ import (
|
|||
|
||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/keepalive"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
var (
|
||||
|
|
@ -51,21 +53,22 @@ type Client struct {
|
|||
conn *grpc.ClientConn
|
||||
dialerrc chan error
|
||||
|
||||
cfg Config
|
||||
creds *credentials.TransportCredentials
|
||||
balancer *simpleBalancer
|
||||
retryWrapper retryRpcFunc
|
||||
retryAuthWrapper retryRpcFunc
|
||||
cfg Config
|
||||
creds *credentials.TransportCredentials
|
||||
balancer *healthBalancer
|
||||
mu *sync.Mutex
|
||||
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
|
||||
// Username is a username for authentication
|
||||
// Username is a user name for authentication.
|
||||
Username string
|
||||
// Password is a password for authentication
|
||||
// Password is a password for authentication.
|
||||
Password string
|
||||
// tokenCred is an instance of WithPerRPCCredentials()'s argument
|
||||
tokenCred *authTokenCredential
|
||||
|
||||
callOpts []grpc.CallOption
|
||||
}
|
||||
|
||||
// New creates a new etcdv3 client from a given configuration.
|
||||
|
|
@ -116,8 +119,23 @@ func (c *Client) Endpoints() (eps []string) {
|
|||
|
||||
// SetEndpoints updates client's endpoints.
|
||||
func (c *Client) SetEndpoints(eps ...string) {
|
||||
c.mu.Lock()
|
||||
c.cfg.Endpoints = eps
|
||||
c.balancer.updateAddrs(eps)
|
||||
c.mu.Unlock()
|
||||
c.balancer.updateAddrs(eps...)
|
||||
|
||||
// updating notifyCh can trigger new connections,
|
||||
// need update addrs if all connections are down
|
||||
// or addrs does not include pinAddr.
|
||||
c.balancer.mu.RLock()
|
||||
update := !hasAddr(c.balancer.addrs, c.balancer.pinAddr)
|
||||
c.balancer.mu.RUnlock()
|
||||
if update {
|
||||
select {
|
||||
case c.balancer.updateAddrsC <- notifyNext:
|
||||
case <-c.balancer.stopc:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Sync synchronizes client's endpoints with the known endpoints from the etcd membership.
|
||||
|
|
@ -144,8 +162,10 @@ func (c *Client) autoSync() {
|
|||
case <-c.ctx.Done():
|
||||
return
|
||||
case <-time.After(c.cfg.AutoSyncInterval):
|
||||
ctx, _ := context.WithTimeout(c.ctx, 5*time.Second)
|
||||
if err := c.Sync(ctx); err != nil && err != c.ctx.Err() {
|
||||
ctx, cancel := context.WithTimeout(c.ctx, 5*time.Second)
|
||||
err := c.Sync(ctx)
|
||||
cancel()
|
||||
if err != nil && err != c.ctx.Err() {
|
||||
logger.Println("Auto sync endpoints failed:", err)
|
||||
}
|
||||
}
|
||||
|
|
@ -174,7 +194,7 @@ func parseEndpoint(endpoint string) (proto string, host string, scheme string) {
|
|||
host = endpoint
|
||||
url, uerr := url.Parse(endpoint)
|
||||
if uerr != nil || !strings.Contains(endpoint, "://") {
|
||||
return
|
||||
return proto, host, scheme
|
||||
}
|
||||
scheme = url.Scheme
|
||||
|
||||
|
|
@ -188,7 +208,7 @@ func parseEndpoint(endpoint string) (proto string, host string, scheme string) {
|
|||
default:
|
||||
proto, host = "", ""
|
||||
}
|
||||
return
|
||||
return proto, host, scheme
|
||||
}
|
||||
|
||||
func (c *Client) processCreds(scheme string) (creds *credentials.TransportCredentials) {
|
||||
|
|
@ -207,7 +227,7 @@ func (c *Client) processCreds(scheme string) (creds *credentials.TransportCreden
|
|||
default:
|
||||
creds = nil
|
||||
}
|
||||
return
|
||||
return creds
|
||||
}
|
||||
|
||||
// dialSetupOpts gives the dial opts prior to any authentication
|
||||
|
|
@ -215,10 +235,17 @@ func (c *Client) dialSetupOpts(endpoint string, dopts ...grpc.DialOption) (opts
|
|||
if c.cfg.DialTimeout > 0 {
|
||||
opts = []grpc.DialOption{grpc.WithTimeout(c.cfg.DialTimeout)}
|
||||
}
|
||||
if c.cfg.DialKeepAliveTime > 0 {
|
||||
params := keepalive.ClientParameters{
|
||||
Time: c.cfg.DialKeepAliveTime,
|
||||
Timeout: c.cfg.DialKeepAliveTimeout,
|
||||
}
|
||||
opts = append(opts, grpc.WithKeepaliveParams(params))
|
||||
}
|
||||
opts = append(opts, dopts...)
|
||||
|
||||
f := func(host string, t time.Duration) (net.Conn, error) {
|
||||
proto, host, _ := parseEndpoint(c.balancer.getEndpoint(host))
|
||||
proto, host, _ := parseEndpoint(c.balancer.endpoint(host))
|
||||
if host == "" && endpoint != "" {
|
||||
// dialing an endpoint not in the balancer; use
|
||||
// endpoint passed into dial
|
||||
|
|
@ -270,7 +297,7 @@ func (c *Client) getToken(ctx context.Context) error {
|
|||
endpoint := c.cfg.Endpoints[i]
|
||||
host := getHost(endpoint)
|
||||
// use dial options without dopts to avoid reusing the client balancer
|
||||
auth, err = newAuthenticator(host, c.dialSetupOpts(endpoint))
|
||||
auth, err = newAuthenticator(host, c.dialSetupOpts(endpoint), c)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
|
@ -311,7 +338,7 @@ func (c *Client) dial(endpoint string, dopts ...grpc.DialOption) (*grpc.ClientCo
|
|||
if err != nil {
|
||||
if toErr(ctx, err) != rpctypes.ErrAuthNotEnabled {
|
||||
if err == ctx.Err() && ctx.Err() != c.ctx.Err() {
|
||||
err = grpc.ErrClientConnTimeout
|
||||
err = context.DeadlineExceeded
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -360,15 +387,37 @@ func newClient(cfg *Config) (*Client, error) {
|
|||
creds: creds,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
mu: new(sync.Mutex),
|
||||
callOpts: defaultCallOpts,
|
||||
}
|
||||
if cfg.Username != "" && cfg.Password != "" {
|
||||
client.Username = cfg.Username
|
||||
client.Password = cfg.Password
|
||||
}
|
||||
if cfg.MaxCallSendMsgSize > 0 || cfg.MaxCallRecvMsgSize > 0 {
|
||||
if cfg.MaxCallRecvMsgSize > 0 && cfg.MaxCallSendMsgSize > cfg.MaxCallRecvMsgSize {
|
||||
return nil, fmt.Errorf("gRPC message recv limit (%d bytes) must be greater than send limit (%d bytes)", cfg.MaxCallRecvMsgSize, cfg.MaxCallSendMsgSize)
|
||||
}
|
||||
callOpts := []grpc.CallOption{
|
||||
defaultFailFast,
|
||||
defaultMaxCallSendMsgSize,
|
||||
defaultMaxCallRecvMsgSize,
|
||||
}
|
||||
if cfg.MaxCallSendMsgSize > 0 {
|
||||
callOpts[1] = grpc.MaxCallSendMsgSize(cfg.MaxCallSendMsgSize)
|
||||
}
|
||||
if cfg.MaxCallRecvMsgSize > 0 {
|
||||
callOpts[2] = grpc.MaxCallRecvMsgSize(cfg.MaxCallRecvMsgSize)
|
||||
}
|
||||
client.callOpts = callOpts
|
||||
}
|
||||
|
||||
client.balancer = newHealthBalancer(cfg.Endpoints, cfg.DialTimeout, func(ep string) (bool, error) {
|
||||
return grpcHealthCheck(client, ep)
|
||||
})
|
||||
|
||||
client.balancer = newSimpleBalancer(cfg.Endpoints)
|
||||
// use Endpoints[0] so that for https:// without any tls config given, then
|
||||
// grpc will assume the ServerName is in the endpoint.
|
||||
// grpc will assume the certificate server name is the endpoint host.
|
||||
conn, err := client.dial(cfg.Endpoints[0], grpc.WithBalancer(client.balancer))
|
||||
if err != nil {
|
||||
client.cancel()
|
||||
|
|
@ -376,21 +425,19 @@ func newClient(cfg *Config) (*Client, error) {
|
|||
return nil, err
|
||||
}
|
||||
client.conn = conn
|
||||
client.retryWrapper = client.newRetryWrapper()
|
||||
client.retryAuthWrapper = client.newAuthRetryWrapper()
|
||||
|
||||
// wait for a connection
|
||||
if cfg.DialTimeout > 0 {
|
||||
hasConn := false
|
||||
waitc := time.After(cfg.DialTimeout)
|
||||
select {
|
||||
case <-client.balancer.readyc:
|
||||
case <-client.balancer.ready():
|
||||
hasConn = true
|
||||
case <-ctx.Done():
|
||||
case <-waitc:
|
||||
}
|
||||
if !hasConn {
|
||||
err := grpc.ErrClientConnTimeout
|
||||
err := context.DeadlineExceeded
|
||||
select {
|
||||
case err = <-client.dialerrc:
|
||||
default:
|
||||
|
|
@ -425,7 +472,7 @@ func (c *Client) checkVersion() (err error) {
|
|||
errc := make(chan error, len(c.cfg.Endpoints))
|
||||
ctx, cancel := context.WithCancel(c.ctx)
|
||||
if c.cfg.DialTimeout > 0 {
|
||||
ctx, _ = context.WithTimeout(ctx, c.cfg.DialTimeout)
|
||||
ctx, cancel = context.WithTimeout(ctx, c.cfg.DialTimeout)
|
||||
}
|
||||
wg.Add(len(c.cfg.Endpoints))
|
||||
for _, ep := range c.cfg.Endpoints {
|
||||
|
|
@ -440,7 +487,7 @@ func (c *Client) checkVersion() (err error) {
|
|||
vs := strings.Split(resp.Version, ".")
|
||||
maj, min := 0, 0
|
||||
if len(vs) >= 2 {
|
||||
maj, rerr = strconv.Atoi(vs[0])
|
||||
maj, _ = strconv.Atoi(vs[0])
|
||||
min, rerr = strconv.Atoi(vs[1])
|
||||
}
|
||||
if maj < 3 || (maj == 3 && min < 2) {
|
||||
|
|
@ -472,14 +519,14 @@ func isHaltErr(ctx context.Context, err error) bool {
|
|||
if err == nil {
|
||||
return false
|
||||
}
|
||||
code := grpc.Code(err)
|
||||
ev, _ := status.FromError(err)
|
||||
// Unavailable codes mean the system will be right back.
|
||||
// (e.g., can't connect, lost leader)
|
||||
// Treat Internal codes as if something failed, leaving the
|
||||
// system in an inconsistent state, but retrying could make progress.
|
||||
// (e.g., failed in middle of send, corrupted frame)
|
||||
// TODO: are permanent Internal errors possible from grpc?
|
||||
return code != codes.Unavailable && code != codes.Internal
|
||||
return ev.Code() != codes.Unavailable && ev.Code() != codes.Internal
|
||||
}
|
||||
|
||||
func toErr(ctx context.Context, err error) error {
|
||||
|
|
@ -490,7 +537,8 @@ func toErr(ctx context.Context, err error) error {
|
|||
if _, ok := err.(rpctypes.EtcdError); ok {
|
||||
return err
|
||||
}
|
||||
code := grpc.Code(err)
|
||||
ev, _ := status.FromError(err)
|
||||
code := ev.Code()
|
||||
switch code {
|
||||
case codes.DeadlineExceeded:
|
||||
fallthrough
|
||||
|
|
@ -499,7 +547,6 @@ func toErr(ctx context.Context, err error) error {
|
|||
err = ctx.Err()
|
||||
}
|
||||
case codes.Unavailable:
|
||||
err = ErrNoAvailableEndpoints
|
||||
case codes.FailedPrecondition:
|
||||
err = grpc.ErrClientConnClosing
|
||||
}
|
||||
|
|
|
|||
|
|
@ -15,8 +15,10 @@
|
|||
package clientv3
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
|
|
@ -43,20 +45,29 @@ type Cluster interface {
|
|||
}
|
||||
|
||||
type cluster struct {
|
||||
remote pb.ClusterClient
|
||||
remote pb.ClusterClient
|
||||
callOpts []grpc.CallOption
|
||||
}
|
||||
|
||||
func NewCluster(c *Client) Cluster {
|
||||
return &cluster{remote: RetryClusterClient(c)}
|
||||
api := &cluster{remote: RetryClusterClient(c)}
|
||||
if c != nil {
|
||||
api.callOpts = c.callOpts
|
||||
}
|
||||
return api
|
||||
}
|
||||
|
||||
func NewClusterFromClusterClient(remote pb.ClusterClient) Cluster {
|
||||
return &cluster{remote: remote}
|
||||
func NewClusterFromClusterClient(remote pb.ClusterClient, c *Client) Cluster {
|
||||
api := &cluster{remote: remote}
|
||||
if c != nil {
|
||||
api.callOpts = c.callOpts
|
||||
}
|
||||
return api
|
||||
}
|
||||
|
||||
func (c *cluster) MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) {
|
||||
r := &pb.MemberAddRequest{PeerURLs: peerAddrs}
|
||||
resp, err := c.remote.MemberAdd(ctx, r)
|
||||
resp, err := c.remote.MemberAdd(ctx, r, c.callOpts...)
|
||||
if err != nil {
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
|
|
@ -65,7 +76,7 @@ func (c *cluster) MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAdd
|
|||
|
||||
func (c *cluster) MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error) {
|
||||
r := &pb.MemberRemoveRequest{ID: id}
|
||||
resp, err := c.remote.MemberRemove(ctx, r)
|
||||
resp, err := c.remote.MemberRemove(ctx, r, c.callOpts...)
|
||||
if err != nil {
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
|
|
@ -74,27 +85,19 @@ func (c *cluster) MemberRemove(ctx context.Context, id uint64) (*MemberRemoveRes
|
|||
|
||||
func (c *cluster) MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error) {
|
||||
// it is safe to retry on update.
|
||||
for {
|
||||
r := &pb.MemberUpdateRequest{ID: id, PeerURLs: peerAddrs}
|
||||
resp, err := c.remote.MemberUpdate(ctx, r, grpc.FailFast(false))
|
||||
if err == nil {
|
||||
return (*MemberUpdateResponse)(resp), nil
|
||||
}
|
||||
if isHaltErr(ctx, err) {
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
r := &pb.MemberUpdateRequest{ID: id, PeerURLs: peerAddrs}
|
||||
resp, err := c.remote.MemberUpdate(ctx, r, c.callOpts...)
|
||||
if err == nil {
|
||||
return (*MemberUpdateResponse)(resp), nil
|
||||
}
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (c *cluster) MemberList(ctx context.Context) (*MemberListResponse, error) {
|
||||
// it is safe to retry on list.
|
||||
for {
|
||||
resp, err := c.remote.MemberList(ctx, &pb.MemberListRequest{}, grpc.FailFast(false))
|
||||
if err == nil {
|
||||
return (*MemberListResponse)(resp), nil
|
||||
}
|
||||
if isHaltErr(ctx, err) {
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
resp, err := c.remote.MemberList(ctx, &pb.MemberListRequest{}, c.callOpts...)
|
||||
if err == nil {
|
||||
return (*MemberListResponse)(resp), nil
|
||||
}
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -44,10 +44,8 @@ func (op CompactOp) toRequest() *pb.CompactionRequest {
|
|||
return &pb.CompactionRequest{Revision: op.revision, Physical: op.physical}
|
||||
}
|
||||
|
||||
// WithCompactPhysical makes compact RPC call wait until
|
||||
// the compaction is physically applied to the local database
|
||||
// such that compacted entries are totally removed from the
|
||||
// backend database.
|
||||
// WithCompactPhysical makes Compact wait until all compacted entries are
|
||||
// removed from the etcd server's storage.
|
||||
func WithCompactPhysical() CompactOption {
|
||||
return func(op *CompactOp) { op.physical = true }
|
||||
}
|
||||
|
|
|
|||
|
|
@ -60,6 +60,8 @@ func Compare(cmp Cmp, result string, v interface{}) Cmp {
|
|||
cmp.TargetUnion = &pb.Compare_CreateRevision{CreateRevision: mustInt64(v)}
|
||||
case pb.Compare_MOD:
|
||||
cmp.TargetUnion = &pb.Compare_ModRevision{ModRevision: mustInt64(v)}
|
||||
case pb.Compare_LEASE:
|
||||
cmp.TargetUnion = &pb.Compare_Lease{Lease: mustInt64orLeaseID(v)}
|
||||
default:
|
||||
panic("Unknown compare type")
|
||||
}
|
||||
|
|
@ -82,6 +84,12 @@ func ModRevision(key string) Cmp {
|
|||
return Cmp{Key: []byte(key), Target: pb.Compare_MOD}
|
||||
}
|
||||
|
||||
// LeaseValue compares a key's LeaseID to a value of your choosing. The empty
|
||||
// LeaseID is 0, otherwise known as `NoLease`.
|
||||
func LeaseValue(key string) Cmp {
|
||||
return Cmp{Key: []byte(key), Target: pb.Compare_LEASE}
|
||||
}
|
||||
|
||||
// KeyBytes returns the byte slice holding with the comparison key.
|
||||
func (cmp *Cmp) KeyBytes() []byte { return cmp.Key }
|
||||
|
||||
|
|
@ -111,6 +119,7 @@ func (cmp Cmp) WithPrefix() Cmp {
|
|||
return cmp
|
||||
}
|
||||
|
||||
// mustInt64 panics if val isn't an int or int64. It returns an int64 otherwise.
|
||||
func mustInt64(val interface{}) int64 {
|
||||
if v, ok := val.(int64); ok {
|
||||
return v
|
||||
|
|
@ -120,3 +129,12 @@ func mustInt64(val interface{}) int64 {
|
|||
}
|
||||
panic("bad value")
|
||||
}
|
||||
|
||||
// mustInt64orLeaseID panics if val isn't a LeaseID, int or int64. It returns an
|
||||
// int64 otherwise.
|
||||
func mustInt64orLeaseID(val interface{}) int64 {
|
||||
if v, ok := val.(LeaseID); ok {
|
||||
return int64(v)
|
||||
}
|
||||
return mustInt64(val)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -15,10 +15,10 @@
|
|||
package clientv3
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
|
|
@ -33,10 +33,31 @@ type Config struct {
|
|||
// DialTimeout is the timeout for failing to establish a connection.
|
||||
DialTimeout time.Duration `json:"dial-timeout"`
|
||||
|
||||
// DialKeepAliveTime is the time after which client pings the server to see if
|
||||
// transport is alive.
|
||||
DialKeepAliveTime time.Duration `json:"dial-keep-alive-time"`
|
||||
|
||||
// DialKeepAliveTimeout is the time that the client waits for a response for the
|
||||
// keep-alive probe. If the response is not received in this time, the connection is closed.
|
||||
DialKeepAliveTimeout time.Duration `json:"dial-keep-alive-timeout"`
|
||||
|
||||
// MaxCallSendMsgSize is the client-side request send limit in bytes.
|
||||
// If 0, it defaults to 2.0 MiB (2 * 1024 * 1024).
|
||||
// Make sure that "MaxCallSendMsgSize" < server-side default send/recv limit.
|
||||
// ("--max-request-bytes" flag to etcd or "embed.Config.MaxRequestBytes").
|
||||
MaxCallSendMsgSize int
|
||||
|
||||
// MaxCallRecvMsgSize is the client-side response receive limit.
|
||||
// If 0, it defaults to "math.MaxInt32", because range response can
|
||||
// easily exceed request send limits.
|
||||
// Make sure that "MaxCallRecvMsgSize" >= server-side default send/recv limit.
|
||||
// ("--max-request-bytes" flag to etcd or "embed.Config.MaxRequestBytes").
|
||||
MaxCallRecvMsgSize int
|
||||
|
||||
// TLS holds the client secure credentials, if any.
|
||||
TLS *tls.Config
|
||||
|
||||
// Username is a username for authentication.
|
||||
// Username is a user name for authentication.
|
||||
Username string `json:"username"`
|
||||
|
||||
// Password is a password for authentication.
|
||||
|
|
|
|||
|
|
@ -16,6 +16,22 @@
|
|||
//
|
||||
// Create client using `clientv3.New`:
|
||||
//
|
||||
// // expect dial time-out on ipv4 blackhole
|
||||
// _, err := clientv3.New(clientv3.Config{
|
||||
// Endpoints: []string{"http://254.0.0.1:12345"},
|
||||
// DialTimeout: 2 * time.Second
|
||||
// })
|
||||
//
|
||||
// // etcd clientv3 >= v3.2.10, grpc/grpc-go >= v1.7.3
|
||||
// if err == context.DeadlineExceeded {
|
||||
// // handle errors
|
||||
// }
|
||||
//
|
||||
// // etcd clientv3 <= v3.2.9, grpc/grpc-go <= v1.2.1
|
||||
// if err == grpc.ErrClientConnTimeout {
|
||||
// // handle errors
|
||||
// }
|
||||
//
|
||||
// cli, err := clientv3.New(clientv3.Config{
|
||||
// Endpoints: []string{"localhost:2379", "localhost:22379", "localhost:32379"},
|
||||
// DialTimeout: 5 * time.Second,
|
||||
|
|
@ -28,7 +44,7 @@
|
|||
// Make sure to close the client after using it. If the client is not closed, the
|
||||
// connection will have leaky goroutines.
|
||||
//
|
||||
// To specify client request timeout, pass context.WithTimeout to APIs:
|
||||
// To specify a client request timeout, wrap the context with context.WithTimeout:
|
||||
//
|
||||
// ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||
// resp, err := kvc.Put(ctx, "sample_key", "sample_value")
|
||||
|
|
@ -41,10 +57,11 @@
|
|||
// The Client has internal state (watchers and leases), so Clients should be reused instead of created as needed.
|
||||
// Clients are safe for concurrent use by multiple goroutines.
|
||||
//
|
||||
// etcd client returns 2 types of errors:
|
||||
// etcd client returns 3 types of errors:
|
||||
//
|
||||
// 1. context error: canceled or deadline exceeded.
|
||||
// 2. gRPC error: see https://github.com/coreos/etcd/blob/master/etcdserver/api/v3rpc/rpctypes/error.go
|
||||
// 1. context error: canceled or deadline exceeded.
|
||||
// 2. gRPC status error: e.g. when clock drifts in server-side before client's context deadline exceeded.
|
||||
// 3. gRPC error: see https://github.com/coreos/etcd/blob/master/etcdserver/api/v3rpc/rpctypes/error.go
|
||||
//
|
||||
// Here is the example code to handle client errors:
|
||||
//
|
||||
|
|
@ -54,6 +71,12 @@
|
|||
// // ctx is canceled by another routine
|
||||
// } else if err == context.DeadlineExceeded {
|
||||
// // ctx is attached with a deadline and it exceeded
|
||||
// } else if ev, ok := status.FromError(err); ok {
|
||||
// code := ev.Code()
|
||||
// if code == codes.DeadlineExceeded {
|
||||
// // server-side context might have timed-out first (due to clock skew)
|
||||
// // while original client-side context is not timed-out yet
|
||||
// }
|
||||
// } else if verr, ok := err.(*v3rpc.ErrEmptyKey); ok {
|
||||
// // process (verr.Errors)
|
||||
// } else {
|
||||
|
|
@ -61,4 +84,14 @@
|
|||
// }
|
||||
// }
|
||||
//
|
||||
// go func() { cli.Close() }()
|
||||
// _, err := kvc.Get(ctx, "a")
|
||||
// if err != nil {
|
||||
// if err == context.Canceled {
|
||||
// // grpc balancer calls 'Get' with an inflight client.Close
|
||||
// } else if err == grpc.ErrClientConnClosing {
|
||||
// // grpc balancer calls 'Get' after client.Close.
|
||||
// }
|
||||
// }
|
||||
//
|
||||
package clientv3
|
||||
|
|
|
|||
|
|
@ -0,0 +1,46 @@
|
|||
// Copyright 2017 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
"math"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
var (
|
||||
// Disable gRPC internal retrial logic
|
||||
// TODO: enable when gRPC retry is stable (FailFast=false)
|
||||
// Reference:
|
||||
// - https://github.com/grpc/grpc-go/issues/1532
|
||||
// - https://github.com/grpc/proposal/blob/master/A6-client-retries.md
|
||||
defaultFailFast = grpc.FailFast(true)
|
||||
|
||||
// client-side request send limit, gRPC default is math.MaxInt32
|
||||
// Make sure that "client-side send limit < server-side default send/recv limit"
|
||||
// Same value as "embed.DefaultMaxRequestBytes" plus gRPC overhead bytes
|
||||
defaultMaxCallSendMsgSize = grpc.MaxCallSendMsgSize(2 * 1024 * 1024)
|
||||
|
||||
// client-side response receive limit, gRPC default is 4MB
|
||||
// Make sure that "client-side receive limit >= server-side default send/recv limit"
|
||||
// because range response can easily exceed request send limits
|
||||
// Default to math.MaxInt32; writes exceeding server-side send limit fails anyway
|
||||
defaultMaxCallRecvMsgSize = grpc.MaxCallRecvMsgSize(math.MaxInt32)
|
||||
)
|
||||
|
||||
// defaultCallOpts defines a list of default "gRPC.CallOption".
|
||||
// Some options are exposed to "clientv3.Config".
|
||||
// Defaults will be overridden by the settings in "clientv3.Config".
|
||||
var defaultCallOpts = []grpc.CallOption{defaultFailFast, defaultMaxCallSendMsgSize, defaultMaxCallRecvMsgSize}
|
||||
|
|
@ -0,0 +1,609 @@
|
|||
// Copyright 2017 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net/url"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
healthpb "google.golang.org/grpc/health/grpc_health_v1"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
const (
|
||||
minHealthRetryDuration = 3 * time.Second
|
||||
unknownService = "unknown service grpc.health.v1.Health"
|
||||
)
|
||||
|
||||
// ErrNoAddrAvilable is returned by Get() when the balancer does not have
|
||||
// any active connection to endpoints at the time.
|
||||
// This error is returned only when opts.BlockingWait is true.
|
||||
var ErrNoAddrAvilable = status.Error(codes.Unavailable, "there is no address available")
|
||||
|
||||
type healthCheckFunc func(ep string) (bool, error)
|
||||
|
||||
type notifyMsg int
|
||||
|
||||
const (
|
||||
notifyReset notifyMsg = iota
|
||||
notifyNext
|
||||
)
|
||||
|
||||
// healthBalancer does the bare minimum to expose multiple eps
|
||||
// to the grpc reconnection code path
|
||||
type healthBalancer struct {
|
||||
// addrs are the client's endpoint addresses for grpc
|
||||
addrs []grpc.Address
|
||||
|
||||
// eps holds the raw endpoints from the client
|
||||
eps []string
|
||||
|
||||
// notifyCh notifies grpc of the set of addresses for connecting
|
||||
notifyCh chan []grpc.Address
|
||||
|
||||
// readyc closes once the first connection is up
|
||||
readyc chan struct{}
|
||||
readyOnce sync.Once
|
||||
|
||||
// healthCheck checks an endpoint's health.
|
||||
healthCheck healthCheckFunc
|
||||
healthCheckTimeout time.Duration
|
||||
|
||||
unhealthyMu sync.RWMutex
|
||||
unhealthyHostPorts map[string]time.Time
|
||||
|
||||
// mu protects all fields below.
|
||||
mu sync.RWMutex
|
||||
|
||||
// upc closes when pinAddr transitions from empty to non-empty or the balancer closes.
|
||||
upc chan struct{}
|
||||
|
||||
// downc closes when grpc calls down() on pinAddr
|
||||
downc chan struct{}
|
||||
|
||||
// stopc is closed to signal updateNotifyLoop should stop.
|
||||
stopc chan struct{}
|
||||
stopOnce sync.Once
|
||||
wg sync.WaitGroup
|
||||
|
||||
// donec closes when all goroutines are exited
|
||||
donec chan struct{}
|
||||
|
||||
// updateAddrsC notifies updateNotifyLoop to update addrs.
|
||||
updateAddrsC chan notifyMsg
|
||||
|
||||
// grpc issues TLS cert checks using the string passed into dial so
|
||||
// that string must be the host. To recover the full scheme://host URL,
|
||||
// have a map from hosts to the original endpoint.
|
||||
hostPort2ep map[string]string
|
||||
|
||||
// pinAddr is the currently pinned address; set to the empty string on
|
||||
// initialization and shutdown.
|
||||
pinAddr string
|
||||
|
||||
closed bool
|
||||
}
|
||||
|
||||
func newHealthBalancer(eps []string, timeout time.Duration, hc healthCheckFunc) *healthBalancer {
|
||||
notifyCh := make(chan []grpc.Address)
|
||||
addrs := eps2addrs(eps)
|
||||
hb := &healthBalancer{
|
||||
addrs: addrs,
|
||||
eps: eps,
|
||||
notifyCh: notifyCh,
|
||||
readyc: make(chan struct{}),
|
||||
healthCheck: hc,
|
||||
unhealthyHostPorts: make(map[string]time.Time),
|
||||
upc: make(chan struct{}),
|
||||
stopc: make(chan struct{}),
|
||||
downc: make(chan struct{}),
|
||||
donec: make(chan struct{}),
|
||||
updateAddrsC: make(chan notifyMsg),
|
||||
hostPort2ep: getHostPort2ep(eps),
|
||||
}
|
||||
if timeout < minHealthRetryDuration {
|
||||
timeout = minHealthRetryDuration
|
||||
}
|
||||
hb.healthCheckTimeout = timeout
|
||||
|
||||
close(hb.downc)
|
||||
go hb.updateNotifyLoop()
|
||||
hb.wg.Add(1)
|
||||
go func() {
|
||||
defer hb.wg.Done()
|
||||
hb.updateUnhealthy()
|
||||
}()
|
||||
return hb
|
||||
}
|
||||
|
||||
func (b *healthBalancer) Start(target string, config grpc.BalancerConfig) error { return nil }
|
||||
|
||||
func (b *healthBalancer) ConnectNotify() <-chan struct{} {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
return b.upc
|
||||
}
|
||||
|
||||
func (b *healthBalancer) ready() <-chan struct{} { return b.readyc }
|
||||
|
||||
func (b *healthBalancer) endpoint(hostPort string) string {
|
||||
b.mu.RLock()
|
||||
defer b.mu.RUnlock()
|
||||
return b.hostPort2ep[hostPort]
|
||||
}
|
||||
|
||||
func (b *healthBalancer) pinned() string {
|
||||
b.mu.RLock()
|
||||
defer b.mu.RUnlock()
|
||||
return b.pinAddr
|
||||
}
|
||||
|
||||
func (b *healthBalancer) hostPortError(hostPort string, err error) {
|
||||
if b.endpoint(hostPort) == "" {
|
||||
logger.Lvl(4).Infof("clientv3/balancer: %q is stale (skip marking as unhealthy on %q)", hostPort, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
b.unhealthyMu.Lock()
|
||||
b.unhealthyHostPorts[hostPort] = time.Now()
|
||||
b.unhealthyMu.Unlock()
|
||||
logger.Lvl(4).Infof("clientv3/balancer: %q is marked unhealthy (%q)", hostPort, err.Error())
|
||||
}
|
||||
|
||||
func (b *healthBalancer) removeUnhealthy(hostPort, msg string) {
|
||||
if b.endpoint(hostPort) == "" {
|
||||
logger.Lvl(4).Infof("clientv3/balancer: %q was not in unhealthy (%q)", hostPort, msg)
|
||||
return
|
||||
}
|
||||
|
||||
b.unhealthyMu.Lock()
|
||||
delete(b.unhealthyHostPorts, hostPort)
|
||||
b.unhealthyMu.Unlock()
|
||||
logger.Lvl(4).Infof("clientv3/balancer: %q is removed from unhealthy (%q)", hostPort, msg)
|
||||
}
|
||||
|
||||
func (b *healthBalancer) countUnhealthy() (count int) {
|
||||
b.unhealthyMu.RLock()
|
||||
count = len(b.unhealthyHostPorts)
|
||||
b.unhealthyMu.RUnlock()
|
||||
return count
|
||||
}
|
||||
|
||||
func (b *healthBalancer) isUnhealthy(hostPort string) (unhealthy bool) {
|
||||
b.unhealthyMu.RLock()
|
||||
_, unhealthy = b.unhealthyHostPorts[hostPort]
|
||||
b.unhealthyMu.RUnlock()
|
||||
return unhealthy
|
||||
}
|
||||
|
||||
func (b *healthBalancer) cleanupUnhealthy() {
|
||||
b.unhealthyMu.Lock()
|
||||
for k, v := range b.unhealthyHostPorts {
|
||||
if time.Since(v) > b.healthCheckTimeout {
|
||||
delete(b.unhealthyHostPorts, k)
|
||||
logger.Lvl(4).Infof("clientv3/balancer: removed %q from unhealthy after %v", k, b.healthCheckTimeout)
|
||||
}
|
||||
}
|
||||
b.unhealthyMu.Unlock()
|
||||
}
|
||||
|
||||
func (b *healthBalancer) liveAddrs() ([]grpc.Address, map[string]struct{}) {
|
||||
unhealthyCnt := b.countUnhealthy()
|
||||
|
||||
b.mu.RLock()
|
||||
defer b.mu.RUnlock()
|
||||
|
||||
hbAddrs := b.addrs
|
||||
if len(b.addrs) == 1 || unhealthyCnt == 0 || unhealthyCnt == len(b.addrs) {
|
||||
liveHostPorts := make(map[string]struct{}, len(b.hostPort2ep))
|
||||
for k := range b.hostPort2ep {
|
||||
liveHostPorts[k] = struct{}{}
|
||||
}
|
||||
return hbAddrs, liveHostPorts
|
||||
}
|
||||
|
||||
addrs := make([]grpc.Address, 0, len(b.addrs)-unhealthyCnt)
|
||||
liveHostPorts := make(map[string]struct{}, len(addrs))
|
||||
for _, addr := range b.addrs {
|
||||
if !b.isUnhealthy(addr.Addr) {
|
||||
addrs = append(addrs, addr)
|
||||
liveHostPorts[addr.Addr] = struct{}{}
|
||||
}
|
||||
}
|
||||
return addrs, liveHostPorts
|
||||
}
|
||||
|
||||
func (b *healthBalancer) updateUnhealthy() {
|
||||
for {
|
||||
select {
|
||||
case <-time.After(b.healthCheckTimeout):
|
||||
b.cleanupUnhealthy()
|
||||
pinned := b.pinned()
|
||||
if pinned == "" || b.isUnhealthy(pinned) {
|
||||
select {
|
||||
case b.updateAddrsC <- notifyNext:
|
||||
case <-b.stopc:
|
||||
return
|
||||
}
|
||||
}
|
||||
case <-b.stopc:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *healthBalancer) updateAddrs(eps ...string) {
|
||||
np := getHostPort2ep(eps)
|
||||
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
match := len(np) == len(b.hostPort2ep)
|
||||
if match {
|
||||
for k, v := range np {
|
||||
if b.hostPort2ep[k] != v {
|
||||
match = false
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if match {
|
||||
// same endpoints, so no need to update address
|
||||
return
|
||||
}
|
||||
|
||||
b.hostPort2ep = np
|
||||
b.addrs, b.eps = eps2addrs(eps), eps
|
||||
|
||||
b.unhealthyMu.Lock()
|
||||
b.unhealthyHostPorts = make(map[string]time.Time)
|
||||
b.unhealthyMu.Unlock()
|
||||
}
|
||||
|
||||
func (b *healthBalancer) next() {
|
||||
b.mu.RLock()
|
||||
downc := b.downc
|
||||
b.mu.RUnlock()
|
||||
select {
|
||||
case b.updateAddrsC <- notifyNext:
|
||||
case <-b.stopc:
|
||||
}
|
||||
// wait until disconnect so new RPCs are not issued on old connection
|
||||
select {
|
||||
case <-downc:
|
||||
case <-b.stopc:
|
||||
}
|
||||
}
|
||||
|
||||
func (b *healthBalancer) updateNotifyLoop() {
|
||||
defer close(b.donec)
|
||||
|
||||
for {
|
||||
b.mu.RLock()
|
||||
upc, downc, addr := b.upc, b.downc, b.pinAddr
|
||||
b.mu.RUnlock()
|
||||
// downc or upc should be closed
|
||||
select {
|
||||
case <-downc:
|
||||
downc = nil
|
||||
default:
|
||||
}
|
||||
select {
|
||||
case <-upc:
|
||||
upc = nil
|
||||
default:
|
||||
}
|
||||
switch {
|
||||
case downc == nil && upc == nil:
|
||||
// stale
|
||||
select {
|
||||
case <-b.stopc:
|
||||
return
|
||||
default:
|
||||
}
|
||||
case downc == nil:
|
||||
b.notifyAddrs(notifyReset)
|
||||
select {
|
||||
case <-upc:
|
||||
case msg := <-b.updateAddrsC:
|
||||
b.notifyAddrs(msg)
|
||||
case <-b.stopc:
|
||||
return
|
||||
}
|
||||
case upc == nil:
|
||||
select {
|
||||
// close connections that are not the pinned address
|
||||
case b.notifyCh <- []grpc.Address{{Addr: addr}}:
|
||||
case <-downc:
|
||||
case <-b.stopc:
|
||||
return
|
||||
}
|
||||
select {
|
||||
case <-downc:
|
||||
b.notifyAddrs(notifyReset)
|
||||
case msg := <-b.updateAddrsC:
|
||||
b.notifyAddrs(msg)
|
||||
case <-b.stopc:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *healthBalancer) notifyAddrs(msg notifyMsg) {
|
||||
if msg == notifyNext {
|
||||
select {
|
||||
case b.notifyCh <- []grpc.Address{}:
|
||||
case <-b.stopc:
|
||||
return
|
||||
}
|
||||
}
|
||||
b.mu.RLock()
|
||||
pinAddr := b.pinAddr
|
||||
downc := b.downc
|
||||
b.mu.RUnlock()
|
||||
addrs, hostPorts := b.liveAddrs()
|
||||
|
||||
var waitDown bool
|
||||
if pinAddr != "" {
|
||||
_, ok := hostPorts[pinAddr]
|
||||
waitDown = !ok
|
||||
}
|
||||
|
||||
select {
|
||||
case b.notifyCh <- addrs:
|
||||
if waitDown {
|
||||
select {
|
||||
case <-downc:
|
||||
case <-b.stopc:
|
||||
}
|
||||
}
|
||||
case <-b.stopc:
|
||||
}
|
||||
}
|
||||
|
||||
func (b *healthBalancer) Up(addr grpc.Address) func(error) {
|
||||
if !b.mayPin(addr) {
|
||||
return func(err error) {}
|
||||
}
|
||||
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
// gRPC might call Up after it called Close. We add this check
|
||||
// to "fix" it up at application layer. Otherwise, will panic
|
||||
// if b.upc is already closed.
|
||||
if b.closed {
|
||||
return func(err error) {}
|
||||
}
|
||||
|
||||
// gRPC might call Up on a stale address.
|
||||
// Prevent updating pinAddr with a stale address.
|
||||
if !hasAddr(b.addrs, addr.Addr) {
|
||||
return func(err error) {}
|
||||
}
|
||||
|
||||
if b.pinAddr != "" {
|
||||
logger.Lvl(4).Infof("clientv3/balancer: %q is up but not pinned (already pinned %q)", addr.Addr, b.pinAddr)
|
||||
return func(err error) {}
|
||||
}
|
||||
|
||||
// notify waiting Get()s and pin first connected address
|
||||
close(b.upc)
|
||||
b.downc = make(chan struct{})
|
||||
b.pinAddr = addr.Addr
|
||||
logger.Lvl(4).Infof("clientv3/balancer: pin %q", addr.Addr)
|
||||
|
||||
// notify client that a connection is up
|
||||
b.readyOnce.Do(func() { close(b.readyc) })
|
||||
|
||||
return func(err error) {
|
||||
// If connected to a black hole endpoint or a killed server, the gRPC ping
|
||||
// timeout will induce a network I/O error, and retrying until success;
|
||||
// finding healthy endpoint on retry could take several timeouts and redials.
|
||||
// To avoid wasting retries, gray-list unhealthy endpoints.
|
||||
b.hostPortError(addr.Addr, err)
|
||||
|
||||
b.mu.Lock()
|
||||
b.upc = make(chan struct{})
|
||||
close(b.downc)
|
||||
b.pinAddr = ""
|
||||
b.mu.Unlock()
|
||||
logger.Lvl(4).Infof("clientv3/balancer: unpin %q (%q)", addr.Addr, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func (b *healthBalancer) mayPin(addr grpc.Address) bool {
|
||||
if b.endpoint(addr.Addr) == "" { // stale host:port
|
||||
return false
|
||||
}
|
||||
|
||||
b.unhealthyMu.RLock()
|
||||
unhealthyCnt := len(b.unhealthyHostPorts)
|
||||
failedTime, bad := b.unhealthyHostPorts[addr.Addr]
|
||||
b.unhealthyMu.RUnlock()
|
||||
|
||||
b.mu.RLock()
|
||||
skip := len(b.addrs) == 1 || unhealthyCnt == 0 || len(b.addrs) == unhealthyCnt
|
||||
b.mu.RUnlock()
|
||||
if skip || !bad {
|
||||
return true
|
||||
}
|
||||
|
||||
// prevent isolated member's endpoint from being infinitely retried, as follows:
|
||||
// 1. keepalive pings detects GoAway with http2.ErrCodeEnhanceYourCalm
|
||||
// 2. balancer 'Up' unpins with grpc: failed with network I/O error
|
||||
// 3. grpc-healthcheck still SERVING, thus retry to pin
|
||||
// instead, return before grpc-healthcheck if failed within healthcheck timeout
|
||||
if elapsed := time.Since(failedTime); elapsed < b.healthCheckTimeout {
|
||||
logger.Lvl(4).Infof("clientv3/balancer: %q is up but not pinned (failed %v ago, require minimum %v after failure)", addr.Addr, elapsed, b.healthCheckTimeout)
|
||||
return false
|
||||
}
|
||||
|
||||
if ok, _ := b.healthCheck(addr.Addr); ok {
|
||||
b.removeUnhealthy(addr.Addr, "health check success")
|
||||
return true
|
||||
}
|
||||
|
||||
b.hostPortError(addr.Addr, errors.New("health check failed"))
|
||||
return false
|
||||
}
|
||||
|
||||
func (b *healthBalancer) Get(ctx context.Context, opts grpc.BalancerGetOptions) (grpc.Address, func(), error) {
|
||||
var (
|
||||
addr string
|
||||
closed bool
|
||||
)
|
||||
|
||||
// If opts.BlockingWait is false (for fail-fast RPCs), it should return
|
||||
// an address it has notified via Notify immediately instead of blocking.
|
||||
if !opts.BlockingWait {
|
||||
b.mu.RLock()
|
||||
closed = b.closed
|
||||
addr = b.pinAddr
|
||||
b.mu.RUnlock()
|
||||
if closed {
|
||||
return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing
|
||||
}
|
||||
if addr == "" {
|
||||
return grpc.Address{Addr: ""}, nil, ErrNoAddrAvilable
|
||||
}
|
||||
return grpc.Address{Addr: addr}, func() {}, nil
|
||||
}
|
||||
|
||||
for {
|
||||
b.mu.RLock()
|
||||
ch := b.upc
|
||||
b.mu.RUnlock()
|
||||
select {
|
||||
case <-ch:
|
||||
case <-b.donec:
|
||||
return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing
|
||||
case <-ctx.Done():
|
||||
return grpc.Address{Addr: ""}, nil, ctx.Err()
|
||||
}
|
||||
b.mu.RLock()
|
||||
closed = b.closed
|
||||
addr = b.pinAddr
|
||||
b.mu.RUnlock()
|
||||
// Close() which sets b.closed = true can be called before Get(), Get() must exit if balancer is closed.
|
||||
if closed {
|
||||
return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing
|
||||
}
|
||||
if addr != "" {
|
||||
break
|
||||
}
|
||||
}
|
||||
return grpc.Address{Addr: addr}, func() {}, nil
|
||||
}
|
||||
|
||||
func (b *healthBalancer) Notify() <-chan []grpc.Address { return b.notifyCh }
|
||||
|
||||
func (b *healthBalancer) Close() error {
|
||||
b.mu.Lock()
|
||||
// In case gRPC calls close twice. TODO: remove the checking
|
||||
// when we are sure that gRPC wont call close twice.
|
||||
if b.closed {
|
||||
b.mu.Unlock()
|
||||
<-b.donec
|
||||
return nil
|
||||
}
|
||||
b.closed = true
|
||||
b.stopOnce.Do(func() { close(b.stopc) })
|
||||
b.pinAddr = ""
|
||||
|
||||
// In the case of following scenario:
|
||||
// 1. upc is not closed; no pinned address
|
||||
// 2. client issues an RPC, calling invoke(), which calls Get(), enters for loop, blocks
|
||||
// 3. client.conn.Close() calls balancer.Close(); closed = true
|
||||
// 4. for loop in Get() never exits since ctx is the context passed in by the client and may not be canceled
|
||||
// we must close upc so Get() exits from blocking on upc
|
||||
select {
|
||||
case <-b.upc:
|
||||
default:
|
||||
// terminate all waiting Get()s
|
||||
close(b.upc)
|
||||
}
|
||||
|
||||
b.mu.Unlock()
|
||||
b.wg.Wait()
|
||||
|
||||
// wait for updateNotifyLoop to finish
|
||||
<-b.donec
|
||||
close(b.notifyCh)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func grpcHealthCheck(client *Client, ep string) (bool, error) {
|
||||
conn, err := client.dial(ep)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer conn.Close()
|
||||
cli := healthpb.NewHealthClient(conn)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||
resp, err := cli.Check(ctx, &healthpb.HealthCheckRequest{})
|
||||
cancel()
|
||||
if err != nil {
|
||||
if s, ok := status.FromError(err); ok && s.Code() == codes.Unavailable {
|
||||
if s.Message() == unknownService { // etcd < v3.3.0
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
return resp.Status == healthpb.HealthCheckResponse_SERVING, nil
|
||||
}
|
||||
|
||||
func hasAddr(addrs []grpc.Address, targetAddr string) bool {
|
||||
for _, addr := range addrs {
|
||||
if targetAddr == addr.Addr {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func getHost(ep string) string {
|
||||
url, uerr := url.Parse(ep)
|
||||
if uerr != nil || !strings.Contains(ep, "://") {
|
||||
return ep
|
||||
}
|
||||
return url.Host
|
||||
}
|
||||
|
||||
func eps2addrs(eps []string) []grpc.Address {
|
||||
addrs := make([]grpc.Address, len(eps))
|
||||
for i := range eps {
|
||||
addrs[i].Addr = getHost(eps[i])
|
||||
}
|
||||
return addrs
|
||||
}
|
||||
|
||||
func getHostPort2ep(eps []string) map[string]string {
|
||||
hm := make(map[string]string, len(eps))
|
||||
for i := range eps {
|
||||
_, host, _ := parseEndpoint(eps[i])
|
||||
hm[host] = eps[i]
|
||||
}
|
||||
return hm
|
||||
}
|
||||
|
|
@ -15,8 +15,10 @@
|
|||
package clientv3
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
|
|
@ -74,16 +76,38 @@ func (op OpResponse) Get() *GetResponse { return op.get }
|
|||
func (op OpResponse) Del() *DeleteResponse { return op.del }
|
||||
func (op OpResponse) Txn() *TxnResponse { return op.txn }
|
||||
|
||||
func (resp *PutResponse) OpResponse() OpResponse {
|
||||
return OpResponse{put: resp}
|
||||
}
|
||||
func (resp *GetResponse) OpResponse() OpResponse {
|
||||
return OpResponse{get: resp}
|
||||
}
|
||||
func (resp *DeleteResponse) OpResponse() OpResponse {
|
||||
return OpResponse{del: resp}
|
||||
}
|
||||
func (resp *TxnResponse) OpResponse() OpResponse {
|
||||
return OpResponse{txn: resp}
|
||||
}
|
||||
|
||||
type kv struct {
|
||||
remote pb.KVClient
|
||||
remote pb.KVClient
|
||||
callOpts []grpc.CallOption
|
||||
}
|
||||
|
||||
func NewKV(c *Client) KV {
|
||||
return &kv{remote: RetryKVClient(c)}
|
||||
api := &kv{remote: RetryKVClient(c)}
|
||||
if c != nil {
|
||||
api.callOpts = c.callOpts
|
||||
}
|
||||
return api
|
||||
}
|
||||
|
||||
func NewKVFromKVClient(remote pb.KVClient) KV {
|
||||
return &kv{remote: remote}
|
||||
func NewKVFromKVClient(remote pb.KVClient, c *Client) KV {
|
||||
api := &kv{remote: remote}
|
||||
if c != nil {
|
||||
api.callOpts = c.callOpts
|
||||
}
|
||||
return api
|
||||
}
|
||||
|
||||
func (kv *kv) Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error) {
|
||||
|
|
@ -102,7 +126,7 @@ func (kv *kv) Delete(ctx context.Context, key string, opts ...OpOption) (*Delete
|
|||
}
|
||||
|
||||
func (kv *kv) Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error) {
|
||||
resp, err := kv.remote.Compact(ctx, OpCompact(rev, opts...).toRequest())
|
||||
resp, err := kv.remote.Compact(ctx, OpCompact(rev, opts...).toRequest(), kv.callOpts...)
|
||||
if err != nil {
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
|
|
@ -111,59 +135,43 @@ func (kv *kv) Compact(ctx context.Context, rev int64, opts ...CompactOption) (*C
|
|||
|
||||
func (kv *kv) Txn(ctx context.Context) Txn {
|
||||
return &txn{
|
||||
kv: kv,
|
||||
ctx: ctx,
|
||||
kv: kv,
|
||||
ctx: ctx,
|
||||
callOpts: kv.callOpts,
|
||||
}
|
||||
}
|
||||
|
||||
func (kv *kv) Do(ctx context.Context, op Op) (OpResponse, error) {
|
||||
for {
|
||||
resp, err := kv.do(ctx, op)
|
||||
if err == nil {
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
if isHaltErr(ctx, err) {
|
||||
return resp, toErr(ctx, err)
|
||||
}
|
||||
// do not retry on modifications
|
||||
if op.isWrite() {
|
||||
return resp, toErr(ctx, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (kv *kv) do(ctx context.Context, op Op) (OpResponse, error) {
|
||||
var err error
|
||||
switch op.t {
|
||||
case tRange:
|
||||
var resp *pb.RangeResponse
|
||||
resp, err = kv.remote.Range(ctx, op.toRangeRequest(), grpc.FailFast(false))
|
||||
resp, err = kv.remote.Range(ctx, op.toRangeRequest(), kv.callOpts...)
|
||||
if err == nil {
|
||||
return OpResponse{get: (*GetResponse)(resp)}, nil
|
||||
}
|
||||
case tPut:
|
||||
var resp *pb.PutResponse
|
||||
r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV, IgnoreValue: op.ignoreValue, IgnoreLease: op.ignoreLease}
|
||||
resp, err = kv.remote.Put(ctx, r)
|
||||
resp, err = kv.remote.Put(ctx, r, kv.callOpts...)
|
||||
if err == nil {
|
||||
return OpResponse{put: (*PutResponse)(resp)}, nil
|
||||
}
|
||||
case tDeleteRange:
|
||||
var resp *pb.DeleteRangeResponse
|
||||
r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV}
|
||||
resp, err = kv.remote.DeleteRange(ctx, r)
|
||||
resp, err = kv.remote.DeleteRange(ctx, r, kv.callOpts...)
|
||||
if err == nil {
|
||||
return OpResponse{del: (*DeleteResponse)(resp)}, nil
|
||||
}
|
||||
case tTxn:
|
||||
var resp *pb.TxnResponse
|
||||
resp, err = kv.remote.Txn(ctx, op.toTxnRequest())
|
||||
resp, err = kv.remote.Txn(ctx, op.toTxnRequest(), kv.callOpts...)
|
||||
if err == nil {
|
||||
return OpResponse{txn: (*TxnResponse)(resp)}, nil
|
||||
}
|
||||
default:
|
||||
panic("Unknown op")
|
||||
}
|
||||
return OpResponse{}, err
|
||||
return OpResponse{}, toErr(ctx, err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -15,12 +15,13 @@
|
|||
package clientv3
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
|
@ -30,7 +31,7 @@ type (
|
|||
LeaseID int64
|
||||
)
|
||||
|
||||
// LeaseGrantResponse is used to convert the protobuf grant response.
|
||||
// LeaseGrantResponse wraps the protobuf message LeaseGrantResponse.
|
||||
type LeaseGrantResponse struct {
|
||||
*pb.ResponseHeader
|
||||
ID LeaseID
|
||||
|
|
@ -38,19 +39,19 @@ type LeaseGrantResponse struct {
|
|||
Error string
|
||||
}
|
||||
|
||||
// LeaseKeepAliveResponse is used to convert the protobuf keepalive response.
|
||||
// LeaseKeepAliveResponse wraps the protobuf message LeaseKeepAliveResponse.
|
||||
type LeaseKeepAliveResponse struct {
|
||||
*pb.ResponseHeader
|
||||
ID LeaseID
|
||||
TTL int64
|
||||
}
|
||||
|
||||
// LeaseTimeToLiveResponse is used to convert the protobuf lease timetolive response.
|
||||
// LeaseTimeToLiveResponse wraps the protobuf message LeaseTimeToLiveResponse.
|
||||
type LeaseTimeToLiveResponse struct {
|
||||
*pb.ResponseHeader
|
||||
ID LeaseID `json:"id"`
|
||||
|
||||
// TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds.
|
||||
// TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds. Expired lease will return -1.
|
||||
TTL int64 `json:"ttl"`
|
||||
|
||||
// GrantedTTL is the initial granted time in seconds upon lease creation/renewal.
|
||||
|
|
@ -60,6 +61,18 @@ type LeaseTimeToLiveResponse struct {
|
|||
Keys [][]byte `json:"keys"`
|
||||
}
|
||||
|
||||
// LeaseStatus represents a lease status.
|
||||
type LeaseStatus struct {
|
||||
ID LeaseID `json:"id"`
|
||||
// TODO: TTL int64
|
||||
}
|
||||
|
||||
// LeaseLeasesResponse wraps the protobuf message LeaseLeasesResponse.
|
||||
type LeaseLeasesResponse struct {
|
||||
*pb.ResponseHeader
|
||||
Leases []LeaseStatus `json:"leases"`
|
||||
}
|
||||
|
||||
const (
|
||||
// defaultTTL is the assumed lease TTL used for the first keepalive
|
||||
// deadline before the actual TTL is known to the client.
|
||||
|
|
@ -98,11 +111,32 @@ type Lease interface {
|
|||
// TimeToLive retrieves the lease information of the given lease ID.
|
||||
TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error)
|
||||
|
||||
// KeepAlive keeps the given lease alive forever.
|
||||
// Leases retrieves all leases.
|
||||
Leases(ctx context.Context) (*LeaseLeasesResponse, error)
|
||||
|
||||
// KeepAlive keeps the given lease alive forever. If the keepalive response
|
||||
// posted to the channel is not consumed immediately, the lease client will
|
||||
// continue sending keep alive requests to the etcd server at least every
|
||||
// second until latest response is consumed.
|
||||
//
|
||||
// The returned "LeaseKeepAliveResponse" channel closes if underlying keep
|
||||
// alive stream is interrupted in some way the client cannot handle itself;
|
||||
// given context "ctx" is canceled or timed out. "LeaseKeepAliveResponse"
|
||||
// from this closed channel is nil.
|
||||
//
|
||||
// If client keep alive loop halts with an unexpected error (e.g. "etcdserver:
|
||||
// no leader") or canceled by the caller (e.g. context.Canceled), the error
|
||||
// is returned. Otherwise, it retries.
|
||||
//
|
||||
// TODO(v4.0): post errors to last keep alive message before closing
|
||||
// (see https://github.com/coreos/etcd/pull/7866)
|
||||
KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error)
|
||||
|
||||
// KeepAliveOnce renews the lease once. In most of the cases, Keepalive
|
||||
// should be used instead of KeepAliveOnce.
|
||||
// KeepAliveOnce renews the lease once. The response corresponds to the
|
||||
// first message from calling KeepAlive. If the response has a recoverable
|
||||
// error, KeepAliveOnce will retry the RPC with a new keep alive message.
|
||||
//
|
||||
// In most of the cases, Keepalive should be used instead of KeepAliveOnce.
|
||||
KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error)
|
||||
|
||||
// Close releases all resources Lease keeps for efficient communication
|
||||
|
|
@ -133,6 +167,8 @@ type lessor struct {
|
|||
|
||||
// firstKeepAliveOnce ensures stream starts after first KeepAlive call.
|
||||
firstKeepAliveOnce sync.Once
|
||||
|
||||
callOpts []grpc.CallOption
|
||||
}
|
||||
|
||||
// keepAlive multiplexes a keepalive for a lease over multiple channels
|
||||
|
|
@ -148,10 +184,10 @@ type keepAlive struct {
|
|||
}
|
||||
|
||||
func NewLease(c *Client) Lease {
|
||||
return NewLeaseFromLeaseClient(RetryLeaseClient(c), c.cfg.DialTimeout+time.Second)
|
||||
return NewLeaseFromLeaseClient(RetryLeaseClient(c), c, c.cfg.DialTimeout+time.Second)
|
||||
}
|
||||
|
||||
func NewLeaseFromLeaseClient(remote pb.LeaseClient, keepAliveTimeout time.Duration) Lease {
|
||||
func NewLeaseFromLeaseClient(remote pb.LeaseClient, c *Client, keepAliveTimeout time.Duration) Lease {
|
||||
l := &lessor{
|
||||
donec: make(chan struct{}),
|
||||
keepAlives: make(map[LeaseID]*keepAlive),
|
||||
|
|
@ -161,62 +197,64 @@ func NewLeaseFromLeaseClient(remote pb.LeaseClient, keepAliveTimeout time.Durati
|
|||
if l.firstKeepAliveTimeout == time.Second {
|
||||
l.firstKeepAliveTimeout = defaultTTL
|
||||
}
|
||||
if c != nil {
|
||||
l.callOpts = c.callOpts
|
||||
}
|
||||
reqLeaderCtx := WithRequireLeader(context.Background())
|
||||
l.stopCtx, l.stopCancel = context.WithCancel(reqLeaderCtx)
|
||||
return l
|
||||
}
|
||||
|
||||
func (l *lessor) Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error) {
|
||||
for {
|
||||
r := &pb.LeaseGrantRequest{TTL: ttl}
|
||||
resp, err := l.remote.LeaseGrant(ctx, r)
|
||||
if err == nil {
|
||||
gresp := &LeaseGrantResponse{
|
||||
ResponseHeader: resp.GetHeader(),
|
||||
ID: LeaseID(resp.ID),
|
||||
TTL: resp.TTL,
|
||||
Error: resp.Error,
|
||||
}
|
||||
return gresp, nil
|
||||
}
|
||||
if isHaltErr(ctx, err) {
|
||||
return nil, toErr(ctx, err)
|
||||
r := &pb.LeaseGrantRequest{TTL: ttl}
|
||||
resp, err := l.remote.LeaseGrant(ctx, r, l.callOpts...)
|
||||
if err == nil {
|
||||
gresp := &LeaseGrantResponse{
|
||||
ResponseHeader: resp.GetHeader(),
|
||||
ID: LeaseID(resp.ID),
|
||||
TTL: resp.TTL,
|
||||
Error: resp.Error,
|
||||
}
|
||||
return gresp, nil
|
||||
}
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (l *lessor) Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error) {
|
||||
for {
|
||||
r := &pb.LeaseRevokeRequest{ID: int64(id)}
|
||||
resp, err := l.remote.LeaseRevoke(ctx, r)
|
||||
|
||||
if err == nil {
|
||||
return (*LeaseRevokeResponse)(resp), nil
|
||||
}
|
||||
if isHaltErr(ctx, err) {
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
r := &pb.LeaseRevokeRequest{ID: int64(id)}
|
||||
resp, err := l.remote.LeaseRevoke(ctx, r, l.callOpts...)
|
||||
if err == nil {
|
||||
return (*LeaseRevokeResponse)(resp), nil
|
||||
}
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (l *lessor) TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error) {
|
||||
for {
|
||||
r := toLeaseTimeToLiveRequest(id, opts...)
|
||||
resp, err := l.remote.LeaseTimeToLive(ctx, r, grpc.FailFast(false))
|
||||
if err == nil {
|
||||
gresp := &LeaseTimeToLiveResponse{
|
||||
ResponseHeader: resp.GetHeader(),
|
||||
ID: LeaseID(resp.ID),
|
||||
TTL: resp.TTL,
|
||||
GrantedTTL: resp.GrantedTTL,
|
||||
Keys: resp.Keys,
|
||||
}
|
||||
return gresp, nil
|
||||
}
|
||||
if isHaltErr(ctx, err) {
|
||||
return nil, toErr(ctx, err)
|
||||
r := toLeaseTimeToLiveRequest(id, opts...)
|
||||
resp, err := l.remote.LeaseTimeToLive(ctx, r, l.callOpts...)
|
||||
if err == nil {
|
||||
gresp := &LeaseTimeToLiveResponse{
|
||||
ResponseHeader: resp.GetHeader(),
|
||||
ID: LeaseID(resp.ID),
|
||||
TTL: resp.TTL,
|
||||
GrantedTTL: resp.GrantedTTL,
|
||||
Keys: resp.Keys,
|
||||
}
|
||||
return gresp, nil
|
||||
}
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (l *lessor) Leases(ctx context.Context) (*LeaseLeasesResponse, error) {
|
||||
resp, err := l.remote.LeaseLeases(ctx, &pb.LeaseLeasesRequest{}, l.callOpts...)
|
||||
if err == nil {
|
||||
leases := make([]LeaseStatus, len(resp.Leases))
|
||||
for i := range resp.Leases {
|
||||
leases[i] = LeaseStatus{ID: LeaseID(resp.Leases[i].ID)}
|
||||
}
|
||||
return &LeaseLeasesResponse{ResponseHeader: resp.GetHeader(), Leases: leases}, nil
|
||||
}
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) {
|
||||
|
|
@ -314,7 +352,7 @@ func (l *lessor) keepAliveCtxCloser(id LeaseID, ctx context.Context, donec <-cha
|
|||
}
|
||||
}
|
||||
|
||||
// closeRequireLeader scans all keep alives for ctxs that have require leader
|
||||
// closeRequireLeader scans keepAlives for ctxs that have require leader
|
||||
// and closes the associated channels.
|
||||
func (l *lessor) closeRequireLeader() {
|
||||
l.mu.Lock()
|
||||
|
|
@ -357,7 +395,7 @@ func (l *lessor) keepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAlive
|
|||
cctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
stream, err := l.remote.LeaseKeepAlive(cctx, grpc.FailFast(false))
|
||||
stream, err := l.remote.LeaseKeepAlive(cctx, l.callOpts...)
|
||||
if err != nil {
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
|
|
@ -401,7 +439,6 @@ func (l *lessor) recvKeepAliveLoop() (gerr error) {
|
|||
} else {
|
||||
for {
|
||||
resp, err := stream.Recv()
|
||||
|
||||
if err != nil {
|
||||
if canceledByCaller(l.stopCtx, err) {
|
||||
return err
|
||||
|
|
@ -426,10 +463,10 @@ func (l *lessor) recvKeepAliveLoop() (gerr error) {
|
|||
}
|
||||
}
|
||||
|
||||
// resetRecv opens a new lease stream and starts sending LeaseKeepAliveRequests
|
||||
// resetRecv opens a new lease stream and starts sending keep alive requests.
|
||||
func (l *lessor) resetRecv() (pb.Lease_LeaseKeepAliveClient, error) {
|
||||
sctx, cancel := context.WithCancel(l.stopCtx)
|
||||
stream, err := l.remote.LeaseKeepAlive(sctx, grpc.FailFast(false))
|
||||
stream, err := l.remote.LeaseKeepAlive(sctx, l.callOpts...)
|
||||
if err != nil {
|
||||
cancel()
|
||||
return nil, err
|
||||
|
|
@ -505,7 +542,7 @@ func (l *lessor) deadlineLoop() {
|
|||
}
|
||||
}
|
||||
|
||||
// sendKeepAliveLoop sends LeaseKeepAliveRequests for the lifetime of a lease stream
|
||||
// sendKeepAliveLoop sends keep alive requests for the lifetime of the given stream.
|
||||
func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) {
|
||||
for {
|
||||
var tosend []LeaseID
|
||||
|
|
|
|||
|
|
@ -16,67 +16,120 @@ package clientv3
|
|||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"sync"
|
||||
|
||||
"google.golang.org/grpc/grpclog"
|
||||
)
|
||||
|
||||
// Logger is the logger used by client library.
|
||||
// It implements grpclog.Logger interface.
|
||||
type Logger grpclog.Logger
|
||||
// It implements grpclog.LoggerV2 interface.
|
||||
type Logger interface {
|
||||
grpclog.LoggerV2
|
||||
|
||||
// Lvl returns logger if logger's verbosity level >= "lvl".
|
||||
// Otherwise, logger that discards all logs.
|
||||
Lvl(lvl int) Logger
|
||||
|
||||
// to satisfy capnslog
|
||||
|
||||
Print(args ...interface{})
|
||||
Printf(format string, args ...interface{})
|
||||
Println(args ...interface{})
|
||||
}
|
||||
|
||||
var (
|
||||
logger settableLogger
|
||||
loggerMu sync.RWMutex
|
||||
logger Logger
|
||||
)
|
||||
|
||||
type settableLogger struct {
|
||||
l grpclog.Logger
|
||||
l grpclog.LoggerV2
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
func init() {
|
||||
// disable client side logs by default
|
||||
logger.mu.Lock()
|
||||
logger.l = log.New(ioutil.Discard, "", 0)
|
||||
|
||||
// logger has to override the grpclog at initialization so that
|
||||
// any changes to the grpclog go through logger with locking
|
||||
// instead of through SetLogger
|
||||
//
|
||||
// now updates only happen through settableLogger.set
|
||||
grpclog.SetLogger(&logger)
|
||||
logger.mu.Unlock()
|
||||
logger = &settableLogger{}
|
||||
SetLogger(grpclog.NewLoggerV2(ioutil.Discard, ioutil.Discard, ioutil.Discard))
|
||||
}
|
||||
|
||||
// SetLogger sets client-side Logger. By default, logs are disabled.
|
||||
func SetLogger(l Logger) {
|
||||
logger.set(l)
|
||||
// SetLogger sets client-side Logger.
|
||||
func SetLogger(l grpclog.LoggerV2) {
|
||||
loggerMu.Lock()
|
||||
logger = NewLogger(l)
|
||||
// override grpclog so that any changes happen with locking
|
||||
grpclog.SetLoggerV2(logger)
|
||||
loggerMu.Unlock()
|
||||
}
|
||||
|
||||
// GetLogger returns the current logger.
|
||||
func GetLogger() Logger {
|
||||
return logger.get()
|
||||
loggerMu.RLock()
|
||||
l := logger
|
||||
loggerMu.RUnlock()
|
||||
return l
|
||||
}
|
||||
|
||||
func (s *settableLogger) set(l Logger) {
|
||||
s.mu.Lock()
|
||||
logger.l = l
|
||||
s.mu.Unlock()
|
||||
// NewLogger returns a new Logger with grpclog.LoggerV2.
|
||||
func NewLogger(gl grpclog.LoggerV2) Logger {
|
||||
return &settableLogger{l: gl}
|
||||
}
|
||||
|
||||
func (s *settableLogger) get() Logger {
|
||||
func (s *settableLogger) get() grpclog.LoggerV2 {
|
||||
s.mu.RLock()
|
||||
l := logger.l
|
||||
l := s.l
|
||||
s.mu.RUnlock()
|
||||
return l
|
||||
}
|
||||
|
||||
// implement the grpclog.Logger interface
|
||||
// implement the grpclog.LoggerV2 interface
|
||||
|
||||
func (s *settableLogger) Info(args ...interface{}) { s.get().Info(args...) }
|
||||
func (s *settableLogger) Infof(format string, args ...interface{}) { s.get().Infof(format, args...) }
|
||||
func (s *settableLogger) Infoln(args ...interface{}) { s.get().Infoln(args...) }
|
||||
func (s *settableLogger) Warning(args ...interface{}) { s.get().Warning(args...) }
|
||||
func (s *settableLogger) Warningf(format string, args ...interface{}) {
|
||||
s.get().Warningf(format, args...)
|
||||
}
|
||||
func (s *settableLogger) Warningln(args ...interface{}) { s.get().Warningln(args...) }
|
||||
func (s *settableLogger) Error(args ...interface{}) { s.get().Error(args...) }
|
||||
func (s *settableLogger) Errorf(format string, args ...interface{}) {
|
||||
s.get().Errorf(format, args...)
|
||||
}
|
||||
func (s *settableLogger) Errorln(args ...interface{}) { s.get().Errorln(args...) }
|
||||
func (s *settableLogger) Fatal(args ...interface{}) { s.get().Fatal(args...) }
|
||||
func (s *settableLogger) Fatalf(format string, args ...interface{}) { s.get().Fatalf(format, args...) }
|
||||
func (s *settableLogger) Fatalln(args ...interface{}) { s.get().Fatalln(args...) }
|
||||
func (s *settableLogger) Print(args ...interface{}) { s.get().Print(args...) }
|
||||
func (s *settableLogger) Printf(format string, args ...interface{}) { s.get().Printf(format, args...) }
|
||||
func (s *settableLogger) Println(args ...interface{}) { s.get().Println(args...) }
|
||||
func (s *settableLogger) Print(args ...interface{}) { s.get().Info(args...) }
|
||||
func (s *settableLogger) Printf(format string, args ...interface{}) { s.get().Infof(format, args...) }
|
||||
func (s *settableLogger) Println(args ...interface{}) { s.get().Infoln(args...) }
|
||||
func (s *settableLogger) V(l int) bool { return s.get().V(l) }
|
||||
func (s *settableLogger) Lvl(lvl int) Logger {
|
||||
s.mu.RLock()
|
||||
l := s.l
|
||||
s.mu.RUnlock()
|
||||
if l.V(lvl) {
|
||||
return s
|
||||
}
|
||||
return &noLogger{}
|
||||
}
|
||||
|
||||
type noLogger struct{}
|
||||
|
||||
func (*noLogger) Info(args ...interface{}) {}
|
||||
func (*noLogger) Infof(format string, args ...interface{}) {}
|
||||
func (*noLogger) Infoln(args ...interface{}) {}
|
||||
func (*noLogger) Warning(args ...interface{}) {}
|
||||
func (*noLogger) Warningf(format string, args ...interface{}) {}
|
||||
func (*noLogger) Warningln(args ...interface{}) {}
|
||||
func (*noLogger) Error(args ...interface{}) {}
|
||||
func (*noLogger) Errorf(format string, args ...interface{}) {}
|
||||
func (*noLogger) Errorln(args ...interface{}) {}
|
||||
func (*noLogger) Fatal(args ...interface{}) {}
|
||||
func (*noLogger) Fatalf(format string, args ...interface{}) {}
|
||||
func (*noLogger) Fatalln(args ...interface{}) {}
|
||||
func (*noLogger) Print(args ...interface{}) {}
|
||||
func (*noLogger) Printf(format string, args ...interface{}) {}
|
||||
func (*noLogger) Println(args ...interface{}) {}
|
||||
func (*noLogger) V(l int) bool { return false }
|
||||
func (ng *noLogger) Lvl(lvl int) Logger { return ng }
|
||||
|
|
|
|||
|
|
@ -15,11 +15,11 @@
|
|||
package clientv3
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
|
|
@ -28,6 +28,8 @@ type (
|
|||
AlarmResponse pb.AlarmResponse
|
||||
AlarmMember pb.AlarmMember
|
||||
StatusResponse pb.StatusResponse
|
||||
HashKVResponse pb.HashKVResponse
|
||||
MoveLeaderResponse pb.MoveLeaderResponse
|
||||
)
|
||||
|
||||
type Maintenance interface {
|
||||
|
|
@ -37,7 +39,7 @@ type Maintenance interface {
|
|||
// AlarmDisarm disarms a given alarm.
|
||||
AlarmDisarm(ctx context.Context, m *AlarmMember) (*AlarmResponse, error)
|
||||
|
||||
// Defragment defragments storage backend of the etcd member with given endpoint.
|
||||
// Defragment releases wasted space from internal fragmentation on a given etcd member.
|
||||
// Defragment is only needed when deleting a large number of keys and want to reclaim
|
||||
// the resources.
|
||||
// Defragment is an expensive operation. User should avoid defragmenting multiple members
|
||||
|
|
@ -49,36 +51,54 @@ type Maintenance interface {
|
|||
// Status gets the status of the endpoint.
|
||||
Status(ctx context.Context, endpoint string) (*StatusResponse, error)
|
||||
|
||||
// Snapshot provides a reader for a snapshot of a backend.
|
||||
// HashKV returns a hash of the KV state at the time of the RPC.
|
||||
// If revision is zero, the hash is computed on all keys. If the revision
|
||||
// is non-zero, the hash is computed on all keys at or below the given revision.
|
||||
HashKV(ctx context.Context, endpoint string, rev int64) (*HashKVResponse, error)
|
||||
|
||||
// Snapshot provides a reader for a point-in-time snapshot of etcd.
|
||||
Snapshot(ctx context.Context) (io.ReadCloser, error)
|
||||
|
||||
// MoveLeader requests current leader to transfer its leadership to the transferee.
|
||||
// Request must be made to the leader.
|
||||
MoveLeader(ctx context.Context, transfereeID uint64) (*MoveLeaderResponse, error)
|
||||
}
|
||||
|
||||
type maintenance struct {
|
||||
dial func(endpoint string) (pb.MaintenanceClient, func(), error)
|
||||
remote pb.MaintenanceClient
|
||||
dial func(endpoint string) (pb.MaintenanceClient, func(), error)
|
||||
remote pb.MaintenanceClient
|
||||
callOpts []grpc.CallOption
|
||||
}
|
||||
|
||||
func NewMaintenance(c *Client) Maintenance {
|
||||
return &maintenance{
|
||||
api := &maintenance{
|
||||
dial: func(endpoint string) (pb.MaintenanceClient, func(), error) {
|
||||
conn, err := c.dial(endpoint)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
cancel := func() { conn.Close() }
|
||||
return pb.NewMaintenanceClient(conn), cancel, nil
|
||||
return RetryMaintenanceClient(c, conn), cancel, nil
|
||||
},
|
||||
remote: pb.NewMaintenanceClient(c.conn),
|
||||
remote: RetryMaintenanceClient(c, c.conn),
|
||||
}
|
||||
if c != nil {
|
||||
api.callOpts = c.callOpts
|
||||
}
|
||||
return api
|
||||
}
|
||||
|
||||
func NewMaintenanceFromMaintenanceClient(remote pb.MaintenanceClient) Maintenance {
|
||||
return &maintenance{
|
||||
func NewMaintenanceFromMaintenanceClient(remote pb.MaintenanceClient, c *Client) Maintenance {
|
||||
api := &maintenance{
|
||||
dial: func(string) (pb.MaintenanceClient, func(), error) {
|
||||
return remote, func() {}, nil
|
||||
},
|
||||
remote: remote,
|
||||
}
|
||||
if c != nil {
|
||||
api.callOpts = c.callOpts
|
||||
}
|
||||
return api
|
||||
}
|
||||
|
||||
func (m *maintenance) AlarmList(ctx context.Context) (*AlarmResponse, error) {
|
||||
|
|
@ -87,15 +107,11 @@ func (m *maintenance) AlarmList(ctx context.Context) (*AlarmResponse, error) {
|
|||
MemberID: 0, // all
|
||||
Alarm: pb.AlarmType_NONE, // all
|
||||
}
|
||||
for {
|
||||
resp, err := m.remote.Alarm(ctx, req, grpc.FailFast(false))
|
||||
if err == nil {
|
||||
return (*AlarmResponse)(resp), nil
|
||||
}
|
||||
if isHaltErr(ctx, err) {
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
resp, err := m.remote.Alarm(ctx, req, m.callOpts...)
|
||||
if err == nil {
|
||||
return (*AlarmResponse)(resp), nil
|
||||
}
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmResponse, error) {
|
||||
|
|
@ -121,7 +137,7 @@ func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmR
|
|||
return &ret, nil
|
||||
}
|
||||
|
||||
resp, err := m.remote.Alarm(ctx, req, grpc.FailFast(false))
|
||||
resp, err := m.remote.Alarm(ctx, req, m.callOpts...)
|
||||
if err == nil {
|
||||
return (*AlarmResponse)(resp), nil
|
||||
}
|
||||
|
|
@ -134,7 +150,7 @@ func (m *maintenance) Defragment(ctx context.Context, endpoint string) (*Defragm
|
|||
return nil, toErr(ctx, err)
|
||||
}
|
||||
defer cancel()
|
||||
resp, err := remote.Defragment(ctx, &pb.DefragmentRequest{}, grpc.FailFast(false))
|
||||
resp, err := remote.Defragment(ctx, &pb.DefragmentRequest{}, m.callOpts...)
|
||||
if err != nil {
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
|
|
@ -147,15 +163,28 @@ func (m *maintenance) Status(ctx context.Context, endpoint string) (*StatusRespo
|
|||
return nil, toErr(ctx, err)
|
||||
}
|
||||
defer cancel()
|
||||
resp, err := remote.Status(ctx, &pb.StatusRequest{}, grpc.FailFast(false))
|
||||
resp, err := remote.Status(ctx, &pb.StatusRequest{}, m.callOpts...)
|
||||
if err != nil {
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
return (*StatusResponse)(resp), nil
|
||||
}
|
||||
|
||||
func (m *maintenance) HashKV(ctx context.Context, endpoint string, rev int64) (*HashKVResponse, error) {
|
||||
remote, cancel, err := m.dial(endpoint)
|
||||
if err != nil {
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
defer cancel()
|
||||
resp, err := remote.HashKV(ctx, &pb.HashKVRequest{Revision: rev}, m.callOpts...)
|
||||
if err != nil {
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
return (*HashKVResponse)(resp), nil
|
||||
}
|
||||
|
||||
func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) {
|
||||
ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}, grpc.FailFast(false))
|
||||
ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}, m.callOpts...)
|
||||
if err != nil {
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
|
|
@ -178,5 +207,20 @@ func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) {
|
|||
}
|
||||
pw.Close()
|
||||
}()
|
||||
return pr, nil
|
||||
return &snapshotReadCloser{ctx: ctx, ReadCloser: pr}, nil
|
||||
}
|
||||
|
||||
type snapshotReadCloser struct {
|
||||
ctx context.Context
|
||||
io.ReadCloser
|
||||
}
|
||||
|
||||
func (rc *snapshotReadCloser) Read(p []byte) (n int, err error) {
|
||||
n, err = rc.ReadCloser.Read(p)
|
||||
return n, toErr(rc.ctx, err)
|
||||
}
|
||||
|
||||
func (m *maintenance) MoveLeader(ctx context.Context, transfereeID uint64) (*MoveLeaderResponse, error) {
|
||||
resp, err := m.remote.MoveLeader(ctx, &pb.MoveLeaderRequest{TargetID: transfereeID}, m.callOpts...)
|
||||
return (*MoveLeaderResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -75,7 +75,7 @@ type Op struct {
|
|||
elseOps []Op
|
||||
}
|
||||
|
||||
// accesors / mutators
|
||||
// accessors / mutators
|
||||
|
||||
func (op Op) IsTxn() bool { return op.t == tTxn }
|
||||
func (op Op) Txn() ([]Cmp, []Op, []Op) { return op.cmps, op.thenOps, op.elseOps }
|
||||
|
|
@ -89,6 +89,39 @@ func (op *Op) WithKeyBytes(key []byte) { op.key = key }
|
|||
// RangeBytes returns the byte slice holding with the Op's range end, if any.
|
||||
func (op Op) RangeBytes() []byte { return op.end }
|
||||
|
||||
// Rev returns the requested revision, if any.
|
||||
func (op Op) Rev() int64 { return op.rev }
|
||||
|
||||
// IsPut returns true iff the operation is a Put.
|
||||
func (op Op) IsPut() bool { return op.t == tPut }
|
||||
|
||||
// IsGet returns true iff the operation is a Get.
|
||||
func (op Op) IsGet() bool { return op.t == tRange }
|
||||
|
||||
// IsDelete returns true iff the operation is a Delete.
|
||||
func (op Op) IsDelete() bool { return op.t == tDeleteRange }
|
||||
|
||||
// IsSerializable returns true if the serializable field is true.
|
||||
func (op Op) IsSerializable() bool { return op.serializable == true }
|
||||
|
||||
// IsKeysOnly returns whether keysOnly is set.
|
||||
func (op Op) IsKeysOnly() bool { return op.keysOnly == true }
|
||||
|
||||
// IsCountOnly returns whether countOnly is set.
|
||||
func (op Op) IsCountOnly() bool { return op.countOnly == true }
|
||||
|
||||
// MinModRev returns the operation's minimum modify revision.
|
||||
func (op Op) MinModRev() int64 { return op.minModRev }
|
||||
|
||||
// MaxModRev returns the operation's maximum modify revision.
|
||||
func (op Op) MaxModRev() int64 { return op.maxModRev }
|
||||
|
||||
// MinCreateRev returns the operation's minimum create revision.
|
||||
func (op Op) MinCreateRev() int64 { return op.minCreateRev }
|
||||
|
||||
// MaxCreateRev returns the operation's maximum create revision.
|
||||
func (op Op) MaxCreateRev() int64 { return op.maxCreateRev }
|
||||
|
||||
// WithRangeBytes sets the byte slice for the Op's range end.
|
||||
func (op *Op) WithRangeBytes(end []byte) { op.end = end }
|
||||
|
||||
|
|
@ -291,9 +324,9 @@ func WithSort(target SortTarget, order SortOrder) OpOption {
|
|||
if target == SortByKey && order == SortAscend {
|
||||
// If order != SortNone, server fetches the entire key-space,
|
||||
// and then applies the sort and limit, if provided.
|
||||
// Since current mvcc.Range implementation returns results
|
||||
// sorted by keys in lexicographically ascending order,
|
||||
// client should ignore SortOrder if the target is SortByKey.
|
||||
// Since by default the server returns results sorted by keys
|
||||
// in lexicographically ascending order, the client should ignore
|
||||
// SortOrder if the target is SortByKey.
|
||||
order = SortNone
|
||||
}
|
||||
op.sort = &SortOption{target, order}
|
||||
|
|
@ -434,7 +467,7 @@ func WithPrevKV() OpOption {
|
|||
}
|
||||
|
||||
// WithIgnoreValue updates the key using its current value.
|
||||
// Empty value should be passed when ignore_value is set.
|
||||
// This option can not be combined with non-empty values.
|
||||
// Returns an error if the key does not exist.
|
||||
func WithIgnoreValue() OpOption {
|
||||
return func(op *Op) {
|
||||
|
|
@ -443,7 +476,7 @@ func WithIgnoreValue() OpOption {
|
|||
}
|
||||
|
||||
// WithIgnoreLease updates the key using its current lease.
|
||||
// Empty lease should be passed when ignore_lease is set.
|
||||
// This option can not be combined with WithLease.
|
||||
// Returns an error if the key does not exist.
|
||||
func WithIgnoreLease() OpOption {
|
||||
return func(op *Op) {
|
||||
|
|
@ -468,8 +501,7 @@ func (op *LeaseOp) applyOpts(opts []LeaseOption) {
|
|||
}
|
||||
}
|
||||
|
||||
// WithAttachedKeys requests lease timetolive API to return
|
||||
// attached keys of given lease ID.
|
||||
// WithAttachedKeys makes TimeToLive list the keys attached to the given lease ID.
|
||||
func WithAttachedKeys() LeaseOption {
|
||||
return func(op *LeaseOp) { op.attachedKeys = true }
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2013-2015 CoreOS, Inc.
|
||||
// Copyright 2017 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
|
@ -12,27 +12,19 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package semver
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
"sort"
|
||||
)
|
||||
import "context"
|
||||
|
||||
type Versions []*Version
|
||||
|
||||
func (s Versions) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
func (s Versions) Swap(i, j int) {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
|
||||
func (s Versions) Less(i, j int) bool {
|
||||
return s[i].LessThan(*s[j])
|
||||
}
|
||||
|
||||
// Sort sorts the given slice of Version
|
||||
func Sort(versions []*Version) {
|
||||
sort.Sort(Versions(versions))
|
||||
// TODO: remove this when "FailFast=false" is fixed.
|
||||
// See https://github.com/grpc/grpc-go/issues/1532.
|
||||
func readyWait(rpcCtx, clientCtx context.Context, ready <-chan struct{}) error {
|
||||
select {
|
||||
case <-ready:
|
||||
return nil
|
||||
case <-rpcCtx.Done():
|
||||
return rpcCtx.Err()
|
||||
case <-clientCtx.Done():
|
||||
return clientCtx.Err()
|
||||
}
|
||||
}
|
||||
|
|
@ -15,279 +15,482 @@
|
|||
package clientv3
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
type retryPolicy uint8
|
||||
|
||||
const (
|
||||
repeatable retryPolicy = iota
|
||||
nonRepeatable
|
||||
)
|
||||
|
||||
type rpcFunc func(ctx context.Context) error
|
||||
type retryRpcFunc func(context.Context, rpcFunc) error
|
||||
type retryRPCFunc func(context.Context, rpcFunc, retryPolicy) error
|
||||
type retryStopErrFunc func(error) bool
|
||||
|
||||
func (c *Client) newRetryWrapper() retryRpcFunc {
|
||||
return func(rpcCtx context.Context, f rpcFunc) error {
|
||||
// immutable requests (e.g. Get) should be retried unless it's
|
||||
// an obvious server-side error (e.g. rpctypes.ErrRequestTooLarge).
|
||||
//
|
||||
// "isRepeatableStopError" returns "true" when an immutable request
|
||||
// is interrupted by server-side or gRPC-side error and its status
|
||||
// code is not transient (!= codes.Unavailable).
|
||||
//
|
||||
// Returning "true" means retry should stop, since client cannot
|
||||
// handle itself even with retries.
|
||||
func isRepeatableStopError(err error) bool {
|
||||
eErr := rpctypes.Error(err)
|
||||
// always stop retry on etcd errors
|
||||
if serverErr, ok := eErr.(rpctypes.EtcdError); ok && serverErr.Code() != codes.Unavailable {
|
||||
return true
|
||||
}
|
||||
// only retry if unavailable
|
||||
ev, _ := status.FromError(err)
|
||||
return ev.Code() != codes.Unavailable
|
||||
}
|
||||
|
||||
// mutable requests (e.g. Put, Delete, Txn) should only be retried
|
||||
// when the status code is codes.Unavailable when initial connection
|
||||
// has not been established (no pinned endpoint).
|
||||
//
|
||||
// "isNonRepeatableStopError" returns "true" when a mutable request
|
||||
// is interrupted by non-transient error that client cannot handle itself,
|
||||
// or transient error while the connection has already been established
|
||||
// (pinned endpoint exists).
|
||||
//
|
||||
// Returning "true" means retry should stop, otherwise it violates
|
||||
// write-at-most-once semantics.
|
||||
func isNonRepeatableStopError(err error) bool {
|
||||
ev, _ := status.FromError(err)
|
||||
if ev.Code() != codes.Unavailable {
|
||||
return true
|
||||
}
|
||||
desc := rpctypes.ErrorDesc(err)
|
||||
return desc != "there is no address available" && desc != "there is no connection available"
|
||||
}
|
||||
|
||||
func (c *Client) newRetryWrapper() retryRPCFunc {
|
||||
return func(rpcCtx context.Context, f rpcFunc, rp retryPolicy) error {
|
||||
var isStop retryStopErrFunc
|
||||
switch rp {
|
||||
case repeatable:
|
||||
isStop = isRepeatableStopError
|
||||
case nonRepeatable:
|
||||
isStop = isNonRepeatableStopError
|
||||
}
|
||||
for {
|
||||
if err := readyWait(rpcCtx, c.ctx, c.balancer.ConnectNotify()); err != nil {
|
||||
return err
|
||||
}
|
||||
pinned := c.balancer.pinned()
|
||||
err := f(rpcCtx)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
logger.Lvl(4).Infof("clientv3/retry: error %q on pinned endpoint %q", err.Error(), pinned)
|
||||
|
||||
eErr := rpctypes.Error(err)
|
||||
// always stop retry on etcd errors
|
||||
if _, ok := eErr.(rpctypes.EtcdError); ok {
|
||||
return err
|
||||
if s, ok := status.FromError(err); ok && (s.Code() == codes.Unavailable || s.Code() == codes.DeadlineExceeded || s.Code() == codes.Internal) {
|
||||
// mark this before endpoint switch is triggered
|
||||
c.balancer.hostPortError(pinned, err)
|
||||
c.balancer.next()
|
||||
logger.Lvl(4).Infof("clientv3/retry: switching from %q due to error %q", pinned, err.Error())
|
||||
}
|
||||
|
||||
// only retry if unavailable
|
||||
if grpc.Code(err) != codes.Unavailable {
|
||||
if isStop(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
select {
|
||||
case <-c.balancer.ConnectNotify():
|
||||
case <-rpcCtx.Done():
|
||||
return rpcCtx.Err()
|
||||
case <-c.ctx.Done():
|
||||
return c.ctx.Err()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Client) newAuthRetryWrapper() retryRpcFunc {
|
||||
return func(rpcCtx context.Context, f rpcFunc) error {
|
||||
func (c *Client) newAuthRetryWrapper(retryf retryRPCFunc) retryRPCFunc {
|
||||
return func(rpcCtx context.Context, f rpcFunc, rp retryPolicy) error {
|
||||
for {
|
||||
err := f(rpcCtx)
|
||||
pinned := c.balancer.pinned()
|
||||
err := retryf(rpcCtx, f, rp)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
logger.Lvl(4).Infof("clientv3/auth-retry: error %q on pinned endpoint %q", err.Error(), pinned)
|
||||
// always stop retry on etcd errors other than invalid auth token
|
||||
if rpctypes.Error(err) == rpctypes.ErrInvalidAuthToken {
|
||||
gterr := c.getToken(rpcCtx)
|
||||
if gterr != nil {
|
||||
logger.Lvl(4).Infof("clientv3/auth-retry: cannot retry due to error %q(%q) on pinned endpoint %q", err.Error(), gterr.Error(), pinned)
|
||||
return err // return the original error for simplicity
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// RetryKVClient implements a KVClient that uses the client's FailFast retry policy.
|
||||
func RetryKVClient(c *Client) pb.KVClient {
|
||||
retryWrite := &retryWriteKVClient{pb.NewKVClient(c.conn), c.retryWrapper}
|
||||
return &retryKVClient{&retryWriteKVClient{retryWrite, c.retryAuthWrapper}}
|
||||
}
|
||||
|
||||
type retryKVClient struct {
|
||||
*retryWriteKVClient
|
||||
kc pb.KVClient
|
||||
retryf retryRPCFunc
|
||||
}
|
||||
|
||||
// RetryKVClient implements a KVClient.
|
||||
func RetryKVClient(c *Client) pb.KVClient {
|
||||
return &retryKVClient{
|
||||
kc: pb.NewKVClient(c.conn),
|
||||
retryf: c.newAuthRetryWrapper(c.newRetryWrapper()),
|
||||
}
|
||||
}
|
||||
func (rkv *retryKVClient) Range(ctx context.Context, in *pb.RangeRequest, opts ...grpc.CallOption) (resp *pb.RangeResponse, err error) {
|
||||
err = rkv.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rkv.retryWriteKVClient.Range(rctx, in, opts...)
|
||||
resp, err = rkv.kc.Range(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
}, repeatable)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
type retryWriteKVClient struct {
|
||||
pb.KVClient
|
||||
retryf retryRpcFunc
|
||||
}
|
||||
|
||||
func (rkv *retryWriteKVClient) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (resp *pb.PutResponse, err error) {
|
||||
func (rkv *retryKVClient) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (resp *pb.PutResponse, err error) {
|
||||
err = rkv.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rkv.KVClient.Put(rctx, in, opts...)
|
||||
resp, err = rkv.kc.Put(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
}, nonRepeatable)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rkv *retryWriteKVClient) DeleteRange(ctx context.Context, in *pb.DeleteRangeRequest, opts ...grpc.CallOption) (resp *pb.DeleteRangeResponse, err error) {
|
||||
func (rkv *retryKVClient) DeleteRange(ctx context.Context, in *pb.DeleteRangeRequest, opts ...grpc.CallOption) (resp *pb.DeleteRangeResponse, err error) {
|
||||
err = rkv.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rkv.KVClient.DeleteRange(rctx, in, opts...)
|
||||
resp, err = rkv.kc.DeleteRange(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
}, nonRepeatable)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rkv *retryWriteKVClient) Txn(ctx context.Context, in *pb.TxnRequest, opts ...grpc.CallOption) (resp *pb.TxnResponse, err error) {
|
||||
func (rkv *retryKVClient) Txn(ctx context.Context, in *pb.TxnRequest, opts ...grpc.CallOption) (resp *pb.TxnResponse, err error) {
|
||||
// TODO: "repeatable" for read-only txn
|
||||
err = rkv.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rkv.KVClient.Txn(rctx, in, opts...)
|
||||
resp, err = rkv.kc.Txn(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
}, nonRepeatable)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rkv *retryWriteKVClient) Compact(ctx context.Context, in *pb.CompactionRequest, opts ...grpc.CallOption) (resp *pb.CompactionResponse, err error) {
|
||||
func (rkv *retryKVClient) Compact(ctx context.Context, in *pb.CompactionRequest, opts ...grpc.CallOption) (resp *pb.CompactionResponse, err error) {
|
||||
err = rkv.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rkv.KVClient.Compact(rctx, in, opts...)
|
||||
resp, err = rkv.kc.Compact(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
}, nonRepeatable)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
type retryLeaseClient struct {
|
||||
pb.LeaseClient
|
||||
retryf retryRpcFunc
|
||||
lc pb.LeaseClient
|
||||
retryf retryRPCFunc
|
||||
}
|
||||
|
||||
// RetryLeaseClient implements a LeaseClient that uses the client's FailFast retry policy.
|
||||
// RetryLeaseClient implements a LeaseClient.
|
||||
func RetryLeaseClient(c *Client) pb.LeaseClient {
|
||||
retry := &retryLeaseClient{pb.NewLeaseClient(c.conn), c.retryWrapper}
|
||||
return &retryLeaseClient{retry, c.retryAuthWrapper}
|
||||
return &retryLeaseClient{
|
||||
lc: pb.NewLeaseClient(c.conn),
|
||||
retryf: c.newAuthRetryWrapper(c.newRetryWrapper()),
|
||||
}
|
||||
}
|
||||
|
||||
func (rlc *retryLeaseClient) LeaseTimeToLive(ctx context.Context, in *pb.LeaseTimeToLiveRequest, opts ...grpc.CallOption) (resp *pb.LeaseTimeToLiveResponse, err error) {
|
||||
err = rlc.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rlc.lc.LeaseTimeToLive(rctx, in, opts...)
|
||||
return err
|
||||
}, repeatable)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rlc *retryLeaseClient) LeaseLeases(ctx context.Context, in *pb.LeaseLeasesRequest, opts ...grpc.CallOption) (resp *pb.LeaseLeasesResponse, err error) {
|
||||
err = rlc.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rlc.lc.LeaseLeases(rctx, in, opts...)
|
||||
return err
|
||||
}, repeatable)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rlc *retryLeaseClient) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRequest, opts ...grpc.CallOption) (resp *pb.LeaseGrantResponse, err error) {
|
||||
err = rlc.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rlc.LeaseClient.LeaseGrant(rctx, in, opts...)
|
||||
resp, err = rlc.lc.LeaseGrant(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
}, repeatable)
|
||||
return resp, err
|
||||
|
||||
}
|
||||
|
||||
func (rlc *retryLeaseClient) LeaseRevoke(ctx context.Context, in *pb.LeaseRevokeRequest, opts ...grpc.CallOption) (resp *pb.LeaseRevokeResponse, err error) {
|
||||
err = rlc.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rlc.LeaseClient.LeaseRevoke(rctx, in, opts...)
|
||||
resp, err = rlc.lc.LeaseRevoke(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
}, repeatable)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
type retryClusterClient struct {
|
||||
pb.ClusterClient
|
||||
retryf retryRpcFunc
|
||||
func (rlc *retryLeaseClient) LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (stream pb.Lease_LeaseKeepAliveClient, err error) {
|
||||
err = rlc.retryf(ctx, func(rctx context.Context) error {
|
||||
stream, err = rlc.lc.LeaseKeepAlive(rctx, opts...)
|
||||
return err
|
||||
}, repeatable)
|
||||
return stream, err
|
||||
}
|
||||
|
||||
// RetryClusterClient implements a ClusterClient that uses the client's FailFast retry policy.
|
||||
type retryClusterClient struct {
|
||||
cc pb.ClusterClient
|
||||
retryf retryRPCFunc
|
||||
}
|
||||
|
||||
// RetryClusterClient implements a ClusterClient.
|
||||
func RetryClusterClient(c *Client) pb.ClusterClient {
|
||||
return &retryClusterClient{pb.NewClusterClient(c.conn), c.retryWrapper}
|
||||
return &retryClusterClient{
|
||||
cc: pb.NewClusterClient(c.conn),
|
||||
retryf: c.newRetryWrapper(),
|
||||
}
|
||||
}
|
||||
|
||||
func (rcc *retryClusterClient) MemberList(ctx context.Context, in *pb.MemberListRequest, opts ...grpc.CallOption) (resp *pb.MemberListResponse, err error) {
|
||||
err = rcc.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rcc.cc.MemberList(rctx, in, opts...)
|
||||
return err
|
||||
}, repeatable)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rcc *retryClusterClient) MemberAdd(ctx context.Context, in *pb.MemberAddRequest, opts ...grpc.CallOption) (resp *pb.MemberAddResponse, err error) {
|
||||
err = rcc.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rcc.ClusterClient.MemberAdd(rctx, in, opts...)
|
||||
resp, err = rcc.cc.MemberAdd(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
}, nonRepeatable)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rcc *retryClusterClient) MemberRemove(ctx context.Context, in *pb.MemberRemoveRequest, opts ...grpc.CallOption) (resp *pb.MemberRemoveResponse, err error) {
|
||||
err = rcc.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rcc.ClusterClient.MemberRemove(rctx, in, opts...)
|
||||
resp, err = rcc.cc.MemberRemove(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
}, nonRepeatable)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rcc *retryClusterClient) MemberUpdate(ctx context.Context, in *pb.MemberUpdateRequest, opts ...grpc.CallOption) (resp *pb.MemberUpdateResponse, err error) {
|
||||
err = rcc.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rcc.ClusterClient.MemberUpdate(rctx, in, opts...)
|
||||
resp, err = rcc.cc.MemberUpdate(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
}, nonRepeatable)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
type retryMaintenanceClient struct {
|
||||
mc pb.MaintenanceClient
|
||||
retryf retryRPCFunc
|
||||
}
|
||||
|
||||
// RetryMaintenanceClient implements a Maintenance.
|
||||
func RetryMaintenanceClient(c *Client, conn *grpc.ClientConn) pb.MaintenanceClient {
|
||||
return &retryMaintenanceClient{
|
||||
mc: pb.NewMaintenanceClient(conn),
|
||||
retryf: c.newRetryWrapper(),
|
||||
}
|
||||
}
|
||||
|
||||
func (rmc *retryMaintenanceClient) Alarm(ctx context.Context, in *pb.AlarmRequest, opts ...grpc.CallOption) (resp *pb.AlarmResponse, err error) {
|
||||
err = rmc.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rmc.mc.Alarm(rctx, in, opts...)
|
||||
return err
|
||||
}, repeatable)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rmc *retryMaintenanceClient) Status(ctx context.Context, in *pb.StatusRequest, opts ...grpc.CallOption) (resp *pb.StatusResponse, err error) {
|
||||
err = rmc.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rmc.mc.Status(rctx, in, opts...)
|
||||
return err
|
||||
}, repeatable)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rmc *retryMaintenanceClient) Hash(ctx context.Context, in *pb.HashRequest, opts ...grpc.CallOption) (resp *pb.HashResponse, err error) {
|
||||
err = rmc.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rmc.mc.Hash(rctx, in, opts...)
|
||||
return err
|
||||
}, repeatable)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rmc *retryMaintenanceClient) HashKV(ctx context.Context, in *pb.HashKVRequest, opts ...grpc.CallOption) (resp *pb.HashKVResponse, err error) {
|
||||
err = rmc.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rmc.mc.HashKV(rctx, in, opts...)
|
||||
return err
|
||||
}, repeatable)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rmc *retryMaintenanceClient) Snapshot(ctx context.Context, in *pb.SnapshotRequest, opts ...grpc.CallOption) (stream pb.Maintenance_SnapshotClient, err error) {
|
||||
err = rmc.retryf(ctx, func(rctx context.Context) error {
|
||||
stream, err = rmc.mc.Snapshot(rctx, in, opts...)
|
||||
return err
|
||||
}, repeatable)
|
||||
return stream, err
|
||||
}
|
||||
|
||||
func (rmc *retryMaintenanceClient) MoveLeader(ctx context.Context, in *pb.MoveLeaderRequest, opts ...grpc.CallOption) (resp *pb.MoveLeaderResponse, err error) {
|
||||
err = rmc.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rmc.mc.MoveLeader(rctx, in, opts...)
|
||||
return err
|
||||
}, repeatable)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rmc *retryMaintenanceClient) Defragment(ctx context.Context, in *pb.DefragmentRequest, opts ...grpc.CallOption) (resp *pb.DefragmentResponse, err error) {
|
||||
err = rmc.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rmc.mc.Defragment(rctx, in, opts...)
|
||||
return err
|
||||
}, nonRepeatable)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
type retryAuthClient struct {
|
||||
pb.AuthClient
|
||||
retryf retryRpcFunc
|
||||
ac pb.AuthClient
|
||||
retryf retryRPCFunc
|
||||
}
|
||||
|
||||
// RetryAuthClient implements a AuthClient that uses the client's FailFast retry policy.
|
||||
// RetryAuthClient implements a AuthClient.
|
||||
func RetryAuthClient(c *Client) pb.AuthClient {
|
||||
return &retryAuthClient{pb.NewAuthClient(c.conn), c.retryWrapper}
|
||||
return &retryAuthClient{
|
||||
ac: pb.NewAuthClient(c.conn),
|
||||
retryf: c.newRetryWrapper(),
|
||||
}
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) UserList(ctx context.Context, in *pb.AuthUserListRequest, opts ...grpc.CallOption) (resp *pb.AuthUserListResponse, err error) {
|
||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.ac.UserList(rctx, in, opts...)
|
||||
return err
|
||||
}, repeatable)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) UserGet(ctx context.Context, in *pb.AuthUserGetRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGetResponse, err error) {
|
||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.ac.UserGet(rctx, in, opts...)
|
||||
return err
|
||||
}, repeatable)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) RoleGet(ctx context.Context, in *pb.AuthRoleGetRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGetResponse, err error) {
|
||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.ac.RoleGet(rctx, in, opts...)
|
||||
return err
|
||||
}, repeatable)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) RoleList(ctx context.Context, in *pb.AuthRoleListRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleListResponse, err error) {
|
||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.ac.RoleList(rctx, in, opts...)
|
||||
return err
|
||||
}, repeatable)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) AuthEnable(ctx context.Context, in *pb.AuthEnableRequest, opts ...grpc.CallOption) (resp *pb.AuthEnableResponse, err error) {
|
||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.AuthClient.AuthEnable(rctx, in, opts...)
|
||||
resp, err = rac.ac.AuthEnable(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
}, nonRepeatable)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) AuthDisable(ctx context.Context, in *pb.AuthDisableRequest, opts ...grpc.CallOption) (resp *pb.AuthDisableResponse, err error) {
|
||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.AuthClient.AuthDisable(rctx, in, opts...)
|
||||
resp, err = rac.ac.AuthDisable(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
}, nonRepeatable)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) UserAdd(ctx context.Context, in *pb.AuthUserAddRequest, opts ...grpc.CallOption) (resp *pb.AuthUserAddResponse, err error) {
|
||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.AuthClient.UserAdd(rctx, in, opts...)
|
||||
resp, err = rac.ac.UserAdd(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
}, nonRepeatable)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) UserDelete(ctx context.Context, in *pb.AuthUserDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthUserDeleteResponse, err error) {
|
||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.AuthClient.UserDelete(rctx, in, opts...)
|
||||
resp, err = rac.ac.UserDelete(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
}, nonRepeatable)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) UserChangePassword(ctx context.Context, in *pb.AuthUserChangePasswordRequest, opts ...grpc.CallOption) (resp *pb.AuthUserChangePasswordResponse, err error) {
|
||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.AuthClient.UserChangePassword(rctx, in, opts...)
|
||||
resp, err = rac.ac.UserChangePassword(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
}, nonRepeatable)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) UserGrantRole(ctx context.Context, in *pb.AuthUserGrantRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGrantRoleResponse, err error) {
|
||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.AuthClient.UserGrantRole(rctx, in, opts...)
|
||||
resp, err = rac.ac.UserGrantRole(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
}, nonRepeatable)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) UserRevokeRole(ctx context.Context, in *pb.AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserRevokeRoleResponse, err error) {
|
||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.AuthClient.UserRevokeRole(rctx, in, opts...)
|
||||
resp, err = rac.ac.UserRevokeRole(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
}, nonRepeatable)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) RoleAdd(ctx context.Context, in *pb.AuthRoleAddRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleAddResponse, err error) {
|
||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.AuthClient.RoleAdd(rctx, in, opts...)
|
||||
resp, err = rac.ac.RoleAdd(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
}, nonRepeatable)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) RoleDelete(ctx context.Context, in *pb.AuthRoleDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleDeleteResponse, err error) {
|
||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.AuthClient.RoleDelete(rctx, in, opts...)
|
||||
resp, err = rac.ac.RoleDelete(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
}, nonRepeatable)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) RoleGrantPermission(ctx context.Context, in *pb.AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGrantPermissionResponse, err error) {
|
||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.AuthClient.RoleGrantPermission(rctx, in, opts...)
|
||||
resp, err = rac.ac.RoleGrantPermission(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
}, nonRepeatable)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) RoleRevokePermission(ctx context.Context, in *pb.AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleRevokePermissionResponse, err error) {
|
||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.AuthClient.RoleRevokePermission(rctx, in, opts...)
|
||||
resp, err = rac.ac.RoleRevokePermission(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
}, nonRepeatable)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) Authenticate(ctx context.Context, in *pb.AuthenticateRequest, opts ...grpc.CallOption) (resp *pb.AuthenticateResponse, err error) {
|
||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.ac.Authenticate(rctx, in, opts...)
|
||||
return err
|
||||
}, nonRepeatable)
|
||||
return resp, err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -15,16 +15,17 @@
|
|||
package clientv3
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
// Txn is the interface that wraps mini-transactions.
|
||||
//
|
||||
// Tx.If(
|
||||
// Txn(context.TODO()).If(
|
||||
// Compare(Value(k1), ">", v1),
|
||||
// Compare(Version(k1), "=", 2)
|
||||
// ).Then(
|
||||
|
|
@ -66,6 +67,8 @@ type txn struct {
|
|||
|
||||
sus []*pb.RequestOp
|
||||
fas []*pb.RequestOp
|
||||
|
||||
callOpts []grpc.CallOption
|
||||
}
|
||||
|
||||
func (txn *txn) If(cs ...Cmp) Txn {
|
||||
|
|
@ -135,30 +138,14 @@ func (txn *txn) Else(ops ...Op) Txn {
|
|||
func (txn *txn) Commit() (*TxnResponse, error) {
|
||||
txn.mu.Lock()
|
||||
defer txn.mu.Unlock()
|
||||
for {
|
||||
resp, err := txn.commit()
|
||||
if err == nil {
|
||||
return resp, err
|
||||
}
|
||||
if isHaltErr(txn.ctx, err) {
|
||||
return nil, toErr(txn.ctx, err)
|
||||
}
|
||||
if txn.isWrite {
|
||||
return nil, toErr(txn.ctx, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (txn *txn) commit() (*TxnResponse, error) {
|
||||
r := &pb.TxnRequest{Compare: txn.cmps, Success: txn.sus, Failure: txn.fas}
|
||||
|
||||
var opts []grpc.CallOption
|
||||
if !txn.isWrite {
|
||||
opts = []grpc.CallOption{grpc.FailFast(false)}
|
||||
}
|
||||
resp, err := txn.kv.remote.Txn(txn.ctx, r, opts...)
|
||||
var resp *pb.TxnResponse
|
||||
var err error
|
||||
resp, err = txn.kv.remote.Txn(txn.ctx, r, txn.callOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, toErr(txn.ctx, err)
|
||||
}
|
||||
return (*TxnResponse)(resp), nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -15,6 +15,7 @@
|
|||
package clientv3
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
|
@ -22,9 +23,11 @@ import (
|
|||
v3rpc "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
mvccpb "github.com/coreos/etcd/mvcc/mvccpb"
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -89,7 +92,7 @@ func (wr *WatchResponse) Err() error {
|
|||
return v3rpc.ErrCompacted
|
||||
case wr.Canceled:
|
||||
if len(wr.cancelReason) != 0 {
|
||||
return v3rpc.Error(grpc.Errorf(codes.FailedPrecondition, "%s", wr.cancelReason))
|
||||
return v3rpc.Error(status.Error(codes.FailedPrecondition, wr.cancelReason))
|
||||
}
|
||||
return v3rpc.ErrFutureRev
|
||||
}
|
||||
|
|
@ -103,7 +106,8 @@ func (wr *WatchResponse) IsProgressNotify() bool {
|
|||
|
||||
// watcher implements the Watcher interface
|
||||
type watcher struct {
|
||||
remote pb.WatchClient
|
||||
remote pb.WatchClient
|
||||
callOpts []grpc.CallOption
|
||||
|
||||
// mu protects the grpc streams map
|
||||
mu sync.RWMutex
|
||||
|
|
@ -114,8 +118,9 @@ type watcher struct {
|
|||
|
||||
// watchGrpcStream tracks all watch resources attached to a single grpc stream.
|
||||
type watchGrpcStream struct {
|
||||
owner *watcher
|
||||
remote pb.WatchClient
|
||||
owner *watcher
|
||||
remote pb.WatchClient
|
||||
callOpts []grpc.CallOption
|
||||
|
||||
// ctx controls internal remote.Watch requests
|
||||
ctx context.Context
|
||||
|
|
@ -134,7 +139,7 @@ type watchGrpcStream struct {
|
|||
respc chan *pb.WatchResponse
|
||||
// donec closes to broadcast shutdown
|
||||
donec chan struct{}
|
||||
// errc transmits errors from grpc Recv to the watch stream reconn logic
|
||||
// errc transmits errors from grpc Recv to the watch stream reconnect logic
|
||||
errc chan error
|
||||
// closingc gets the watcherStream of closing watchers
|
||||
closingc chan *watcherStream
|
||||
|
|
@ -186,14 +191,18 @@ type watcherStream struct {
|
|||
}
|
||||
|
||||
func NewWatcher(c *Client) Watcher {
|
||||
return NewWatchFromWatchClient(pb.NewWatchClient(c.conn))
|
||||
return NewWatchFromWatchClient(pb.NewWatchClient(c.conn), c)
|
||||
}
|
||||
|
||||
func NewWatchFromWatchClient(wc pb.WatchClient) Watcher {
|
||||
return &watcher{
|
||||
func NewWatchFromWatchClient(wc pb.WatchClient, c *Client) Watcher {
|
||||
w := &watcher{
|
||||
remote: wc,
|
||||
streams: make(map[string]*watchGrpcStream),
|
||||
}
|
||||
if c != nil {
|
||||
w.callOpts = c.callOpts
|
||||
}
|
||||
return w
|
||||
}
|
||||
|
||||
// never closes
|
||||
|
|
@ -212,17 +221,17 @@ func (w *watcher) newWatcherGrpcStream(inctx context.Context) *watchGrpcStream {
|
|||
wgs := &watchGrpcStream{
|
||||
owner: w,
|
||||
remote: w.remote,
|
||||
callOpts: w.callOpts,
|
||||
ctx: ctx,
|
||||
ctxKey: fmt.Sprintf("%v", inctx),
|
||||
ctxKey: streamKeyFromCtx(inctx),
|
||||
cancel: cancel,
|
||||
substreams: make(map[int64]*watcherStream),
|
||||
|
||||
respc: make(chan *pb.WatchResponse),
|
||||
reqc: make(chan *watchRequest),
|
||||
donec: make(chan struct{}),
|
||||
errc: make(chan error, 1),
|
||||
closingc: make(chan *watcherStream),
|
||||
resumec: make(chan struct{}),
|
||||
respc: make(chan *pb.WatchResponse),
|
||||
reqc: make(chan *watchRequest),
|
||||
donec: make(chan struct{}),
|
||||
errc: make(chan error, 1),
|
||||
closingc: make(chan *watcherStream),
|
||||
resumec: make(chan struct{}),
|
||||
}
|
||||
go wgs.run()
|
||||
return wgs
|
||||
|
|
@ -253,7 +262,7 @@ func (w *watcher) Watch(ctx context.Context, key string, opts ...OpOption) Watch
|
|||
}
|
||||
|
||||
ok := false
|
||||
ctxKey := fmt.Sprintf("%v", ctx)
|
||||
ctxKey := streamKeyFromCtx(ctx)
|
||||
|
||||
// find or allocate appropriate grpc watch stream
|
||||
w.mu.Lock()
|
||||
|
|
@ -434,7 +443,7 @@ func (w *watchGrpcStream) run() {
|
|||
initReq: *wreq,
|
||||
id: -1,
|
||||
outc: outc,
|
||||
// unbufffered so resumes won't cause repeat events
|
||||
// unbuffered so resumes won't cause repeat events
|
||||
recvc: make(chan *WatchResponse),
|
||||
}
|
||||
|
||||
|
|
@ -461,7 +470,7 @@ func (w *watchGrpcStream) run() {
|
|||
if ws := w.nextResume(); ws != nil {
|
||||
wc.Send(ws.initReq.toPB())
|
||||
}
|
||||
case pbresp.Canceled:
|
||||
case pbresp.Canceled && pbresp.CompactRevision == 0:
|
||||
delete(cancelSet, pbresp.WatchId)
|
||||
if ws, ok := w.substreams[pbresp.WatchId]; ok {
|
||||
// signal to stream goroutine to update closingc
|
||||
|
|
@ -486,7 +495,7 @@ func (w *watchGrpcStream) run() {
|
|||
req := &pb.WatchRequest{RequestUnion: cr}
|
||||
wc.Send(req)
|
||||
}
|
||||
// watch client failed to recv; spawn another if possible
|
||||
// watch client failed on Recv; spawn another if possible
|
||||
case err := <-w.errc:
|
||||
if isHaltErr(w.ctx, err) || toErr(w.ctx, err) == v3rpc.ErrNoLeader {
|
||||
closeErr = err
|
||||
|
|
@ -748,7 +757,7 @@ func (w *watchGrpcStream) waitCancelSubstreams(stopc <-chan struct{}) <-chan str
|
|||
return donec
|
||||
}
|
||||
|
||||
// joinSubstream waits for all substream goroutines to complete
|
||||
// joinSubstreams waits for all substream goroutines to complete.
|
||||
func (w *watchGrpcStream) joinSubstreams() {
|
||||
for _, ws := range w.substreams {
|
||||
<-ws.donec
|
||||
|
|
@ -760,7 +769,9 @@ func (w *watchGrpcStream) joinSubstreams() {
|
|||
}
|
||||
}
|
||||
|
||||
// openWatchClient retries opening a watchclient until retryConnection fails
|
||||
// openWatchClient retries opening a watch client until success or halt.
|
||||
// manually retry in case "ws==nil && err==nil"
|
||||
// TODO: remove FailFast=false
|
||||
func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error) {
|
||||
for {
|
||||
select {
|
||||
|
|
@ -771,7 +782,7 @@ func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error)
|
|||
return nil, err
|
||||
default:
|
||||
}
|
||||
if ws, err = w.remote.Watch(w.ctx, grpc.FailFast(false)); ws != nil && err == nil {
|
||||
if ws, err = w.remote.Watch(w.ctx, w.callOpts...); ws != nil && err == nil {
|
||||
break
|
||||
}
|
||||
if isHaltErr(w.ctx, err) {
|
||||
|
|
@ -781,7 +792,7 @@ func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error)
|
|||
return ws, nil
|
||||
}
|
||||
|
||||
// toPB converts an internal watch request structure to its protobuf messagefunc (wr *watchRequest)
|
||||
// toPB converts an internal watch request structure to its protobuf WatchRequest structure.
|
||||
func (wr *watchRequest) toPB() *pb.WatchRequest {
|
||||
req := &pb.WatchCreateRequest{
|
||||
StartRevision: wr.rev,
|
||||
|
|
@ -794,3 +805,10 @@ func (wr *watchRequest) toPB() *pb.WatchRequest {
|
|||
cr := &pb.WatchRequest_CreateRequest{CreateRequest: req}
|
||||
return &pb.WatchRequest{RequestUnion: cr}
|
||||
}
|
||||
|
||||
func streamKeyFromCtx(ctx context.Context) string {
|
||||
if md, ok := metadata.FromOutgoingContext(ctx); ok {
|
||||
return fmt.Sprintf("%+v", md)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
|
|
|||
|
|
@ -37,6 +37,7 @@ var (
|
|||
"3.0.0": {AuthCapability: true, V3rpcCapability: true},
|
||||
"3.1.0": {AuthCapability: true, V3rpcCapability: true},
|
||||
"3.2.0": {AuthCapability: true, V3rpcCapability: true},
|
||||
"3.3.0": {AuthCapability: true, V3rpcCapability: true},
|
||||
}
|
||||
|
||||
enableMapMu sync.RWMutex
|
||||
|
|
|
|||
|
|
@ -33,9 +33,6 @@ type Cluster interface {
|
|||
// Member retrieves a particular member based on ID, or nil if the
|
||||
// member does not exist in the cluster
|
||||
Member(id types.ID) *membership.Member
|
||||
// IsIDRemoved checks whether the given ID has been removed from this
|
||||
// cluster at some point in the past
|
||||
IsIDRemoved(id types.ID) bool
|
||||
// Version is the cluster-wide minimum major.minor version.
|
||||
Version() *semver.Version
|
||||
}
|
||||
|
|
|
|||
|
|
@ -15,9 +15,10 @@
|
|||
package v3rpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/coreos/etcd/etcdserver"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type AuthServer struct {
|
||||
|
|
|
|||
|
|
@ -16,10 +16,15 @@ package v3rpc
|
|||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"github.com/coreos/etcd/etcdserver"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
|
||||
"github.com/grpc-ecosystem/go-grpc-prometheus"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
|
|
@ -27,13 +32,16 @@ import (
|
|||
healthpb "google.golang.org/grpc/health/grpc_health_v1"
|
||||
)
|
||||
|
||||
const grpcOverheadBytes = 512 * 1024
|
||||
const (
|
||||
grpcOverheadBytes = 512 * 1024
|
||||
maxStreams = math.MaxUint32
|
||||
maxSendBytes = math.MaxInt32
|
||||
)
|
||||
|
||||
func init() {
|
||||
grpclog.SetLogger(plog)
|
||||
}
|
||||
// integration tests call this multiple times, which is racey in gRPC side
|
||||
var grpclogOnce sync.Once
|
||||
|
||||
func Server(s *etcdserver.EtcdServer, tls *tls.Config) *grpc.Server {
|
||||
func Server(s *etcdserver.EtcdServer, tls *tls.Config, gopts ...grpc.ServerOption) *grpc.Server {
|
||||
var opts []grpc.ServerOption
|
||||
opts = append(opts, grpc.CustomCodec(&codec{}))
|
||||
if tls != nil {
|
||||
|
|
@ -41,8 +49,10 @@ func Server(s *etcdserver.EtcdServer, tls *tls.Config) *grpc.Server {
|
|||
}
|
||||
opts = append(opts, grpc.UnaryInterceptor(newUnaryInterceptor(s)))
|
||||
opts = append(opts, grpc.StreamInterceptor(newStreamInterceptor(s)))
|
||||
opts = append(opts, grpc.MaxMsgSize(int(s.Cfg.MaxRequestBytes+grpcOverheadBytes)))
|
||||
grpcServer := grpc.NewServer(opts...)
|
||||
opts = append(opts, grpc.MaxRecvMsgSize(int(s.Cfg.MaxRequestBytes+grpcOverheadBytes)))
|
||||
opts = append(opts, grpc.MaxSendMsgSize(maxSendBytes))
|
||||
opts = append(opts, grpc.MaxConcurrentStreams(maxStreams))
|
||||
grpcServer := grpc.NewServer(append(opts, gopts...)...)
|
||||
|
||||
pb.RegisterKVServer(grpcServer, NewQuotaKVServer(s))
|
||||
pb.RegisterWatchServer(grpcServer, NewWatchServer(s))
|
||||
|
|
@ -58,5 +68,19 @@ func Server(s *etcdserver.EtcdServer, tls *tls.Config) *grpc.Server {
|
|||
hsrv.SetServingStatus("", healthpb.HealthCheckResponse_SERVING)
|
||||
healthpb.RegisterHealthServer(grpcServer, hsrv)
|
||||
|
||||
// set zero values for metrics registered for this grpc server
|
||||
grpc_prometheus.Register(grpcServer)
|
||||
|
||||
grpclogOnce.Do(func() {
|
||||
if s.Cfg.Debug {
|
||||
grpc.EnableTracing = true
|
||||
// enable info, warning, error
|
||||
grpclog.SetLoggerV2(grpclog.NewLoggerV2(os.Stderr, os.Stderr, os.Stderr))
|
||||
} else {
|
||||
// only discard info
|
||||
grpclog.SetLoggerV2(grpclog.NewLoggerV2(ioutil.Discard, os.Stderr, os.Stderr))
|
||||
}
|
||||
})
|
||||
|
||||
return grpcServer
|
||||
}
|
||||
|
|
|
|||
|
|
@ -15,6 +15,7 @@
|
|||
package v3rpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
|
|
@ -25,7 +26,6 @@ import (
|
|||
"github.com/coreos/etcd/raft"
|
||||
|
||||
prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
|
|
|||
|
|
@ -16,12 +16,14 @@
|
|||
package v3rpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/coreos/etcd/etcdserver"
|
||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/etcd/pkg/adt"
|
||||
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
var (
|
||||
|
|
|
|||
|
|
@ -15,13 +15,13 @@
|
|||
package v3rpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
"github.com/coreos/etcd/etcdserver"
|
||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/etcd/lease"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type LeaseServer struct {
|
||||
|
|
@ -68,6 +68,21 @@ func (ls *LeaseServer) LeaseTimeToLive(ctx context.Context, rr *pb.LeaseTimeToLi
|
|||
return resp, nil
|
||||
}
|
||||
|
||||
func (ls *LeaseServer) LeaseLeases(ctx context.Context, rr *pb.LeaseLeasesRequest) (*pb.LeaseLeasesResponse, error) {
|
||||
resp, err := ls.le.LeaseLeases(ctx, rr)
|
||||
if err != nil && err != lease.ErrLeaseNotFound {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
if err == lease.ErrLeaseNotFound {
|
||||
resp = &pb.LeaseLeasesResponse{
|
||||
Header: &pb.ResponseHeader{},
|
||||
Leases: []*pb.LeaseStatus{},
|
||||
}
|
||||
}
|
||||
ls.hdr.fill(resp.Header)
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (ls *LeaseServer) LeaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) (err error) {
|
||||
errc := make(chan error, 1)
|
||||
go func() {
|
||||
|
|
@ -92,6 +107,11 @@ func (ls *LeaseServer) leaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) erro
|
|||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
if isClientCtxErr(stream.Context().Err(), err) {
|
||||
plog.Debugf("failed to receive lease keepalive request from gRPC stream (%q)", err.Error())
|
||||
} else {
|
||||
plog.Warningf("failed to receive lease keepalive request from gRPC stream (%q)", err.Error())
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
@ -117,6 +137,11 @@ func (ls *LeaseServer) leaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) erro
|
|||
resp.TTL = ttl
|
||||
err = stream.Send(resp)
|
||||
if err != nil {
|
||||
if isClientCtxErr(stream.Context().Err(), err) {
|
||||
plog.Debugf("failed to send lease keepalive response to gRPC stream (%q)", err.Error())
|
||||
} else {
|
||||
plog.Warningf("failed to send lease keepalive response to gRPC stream (%q)", err.Error())
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -15,17 +15,18 @@
|
|||
package v3rpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"io"
|
||||
|
||||
"github.com/coreos/etcd/auth"
|
||||
"github.com/coreos/etcd/etcdserver"
|
||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/etcd/mvcc"
|
||||
"github.com/coreos/etcd/mvcc/backend"
|
||||
"github.com/coreos/etcd/pkg/types"
|
||||
"github.com/coreos/etcd/version"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type KVGetter interface {
|
||||
|
|
@ -40,9 +41,13 @@ type Alarmer interface {
|
|||
Alarm(ctx context.Context, ar *pb.AlarmRequest) (*pb.AlarmResponse, error)
|
||||
}
|
||||
|
||||
type LeaderTransferrer interface {
|
||||
MoveLeader(ctx context.Context, lead, target uint64) error
|
||||
}
|
||||
|
||||
type RaftStatusGetter interface {
|
||||
Index() uint64
|
||||
Term() uint64
|
||||
etcdserver.RaftTimer
|
||||
ID() types.ID
|
||||
Leader() types.ID
|
||||
}
|
||||
|
||||
|
|
@ -56,11 +61,12 @@ type maintenanceServer struct {
|
|||
kg KVGetter
|
||||
bg BackendGetter
|
||||
a Alarmer
|
||||
lt LeaderTransferrer
|
||||
hdr header
|
||||
}
|
||||
|
||||
func NewMaintenanceServer(s *etcdserver.EtcdServer) pb.MaintenanceServer {
|
||||
srv := &maintenanceServer{rg: s, kg: s, bg: s, a: s, hdr: newHeader(s)}
|
||||
srv := &maintenanceServer{rg: s, kg: s, bg: s, a: s, lt: s, hdr: newHeader(s)}
|
||||
return &authMaintenanceServer{srv, s}
|
||||
}
|
||||
|
||||
|
|
@ -130,6 +136,17 @@ func (ms *maintenanceServer) Hash(ctx context.Context, r *pb.HashRequest) (*pb.H
|
|||
return resp, nil
|
||||
}
|
||||
|
||||
func (ms *maintenanceServer) HashKV(ctx context.Context, r *pb.HashKVRequest) (*pb.HashKVResponse, error) {
|
||||
h, rev, compactRev, err := ms.kg.KV().HashByRev(r.Revision)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
|
||||
resp := &pb.HashKVResponse{Header: &pb.ResponseHeader{Revision: rev}, Hash: h, CompactRevision: compactRev}
|
||||
ms.hdr.fill(resp.Header)
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (ms *maintenanceServer) Alarm(ctx context.Context, ar *pb.AlarmRequest) (*pb.AlarmResponse, error) {
|
||||
return ms.a.Alarm(ctx, ar)
|
||||
}
|
||||
|
|
@ -147,6 +164,17 @@ func (ms *maintenanceServer) Status(ctx context.Context, ar *pb.StatusRequest) (
|
|||
return resp, nil
|
||||
}
|
||||
|
||||
func (ms *maintenanceServer) MoveLeader(ctx context.Context, tr *pb.MoveLeaderRequest) (*pb.MoveLeaderResponse, error) {
|
||||
if ms.rg.ID() != ms.rg.Leader() {
|
||||
return nil, rpctypes.ErrGRPCNotLeader
|
||||
}
|
||||
|
||||
if err := ms.lt.MoveLeader(ctx, uint64(ms.rg.Leader()), tr.TargetID); err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
return &pb.MoveLeaderResponse{}, nil
|
||||
}
|
||||
|
||||
type authMaintenanceServer struct {
|
||||
*maintenanceServer
|
||||
ag AuthGetter
|
||||
|
|
@ -185,6 +213,17 @@ func (ams *authMaintenanceServer) Hash(ctx context.Context, r *pb.HashRequest) (
|
|||
return ams.maintenanceServer.Hash(ctx, r)
|
||||
}
|
||||
|
||||
func (ams *authMaintenanceServer) HashKV(ctx context.Context, r *pb.HashKVRequest) (*pb.HashKVResponse, error) {
|
||||
if err := ams.isAuthenticated(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ams.maintenanceServer.HashKV(ctx, r)
|
||||
}
|
||||
|
||||
func (ams *authMaintenanceServer) Status(ctx context.Context, ar *pb.StatusRequest) (*pb.StatusResponse, error) {
|
||||
return ams.maintenanceServer.Status(ctx, ar)
|
||||
}
|
||||
|
||||
func (ams *authMaintenanceServer) MoveLeader(ctx context.Context, tr *pb.MoveLeaderRequest) (*pb.MoveLeaderResponse, error) {
|
||||
return ams.maintenanceServer.MoveLeader(ctx, tr)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -15,6 +15,7 @@
|
|||
package v3rpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/etcdserver"
|
||||
|
|
@ -23,20 +24,17 @@ import (
|
|||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/etcd/etcdserver/membership"
|
||||
"github.com/coreos/etcd/pkg/types"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type ClusterServer struct {
|
||||
cluster api.Cluster
|
||||
server etcdserver.Server
|
||||
raftTimer etcdserver.RaftTimer
|
||||
cluster api.Cluster
|
||||
server etcdserver.ServerV3
|
||||
}
|
||||
|
||||
func NewClusterServer(s *etcdserver.EtcdServer) *ClusterServer {
|
||||
func NewClusterServer(s etcdserver.ServerV3) *ClusterServer {
|
||||
return &ClusterServer{
|
||||
cluster: s.Cluster(),
|
||||
server: s,
|
||||
raftTimer: s,
|
||||
cluster: s.Cluster(),
|
||||
server: s,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -86,7 +84,7 @@ func (cs *ClusterServer) MemberList(ctx context.Context, r *pb.MemberListRequest
|
|||
}
|
||||
|
||||
func (cs *ClusterServer) header() *pb.ResponseHeader {
|
||||
return &pb.ResponseHeader{ClusterId: uint64(cs.cluster.ID()), MemberId: uint64(cs.server.ID()), RaftTerm: cs.raftTimer.Term()}
|
||||
return &pb.ResponseHeader{ClusterId: uint64(cs.cluster.ID()), MemberId: uint64(cs.server.ID()), RaftTerm: cs.server.Term()}
|
||||
}
|
||||
|
||||
func membersToProtoMembers(membs []*membership.Member) []*pb.Member {
|
||||
|
|
|
|||
|
|
@ -15,11 +15,12 @@
|
|||
package v3rpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/coreos/etcd/etcdserver"
|
||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/etcd/pkg/types"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type quotaKVServer struct {
|
||||
|
|
|
|||
|
|
@ -15,106 +15,112 @@
|
|||
package rpctypes
|
||||
|
||||
import (
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// server-side error
|
||||
var (
|
||||
// server-side error
|
||||
ErrGRPCEmptyKey = grpc.Errorf(codes.InvalidArgument, "etcdserver: key is not provided")
|
||||
ErrGRPCKeyNotFound = grpc.Errorf(codes.InvalidArgument, "etcdserver: key not found")
|
||||
ErrGRPCValueProvided = grpc.Errorf(codes.InvalidArgument, "etcdserver: value is provided")
|
||||
ErrGRPCLeaseProvided = grpc.Errorf(codes.InvalidArgument, "etcdserver: lease is provided")
|
||||
ErrGRPCTooManyOps = grpc.Errorf(codes.InvalidArgument, "etcdserver: too many operations in txn request")
|
||||
ErrGRPCDuplicateKey = grpc.Errorf(codes.InvalidArgument, "etcdserver: duplicate key given in txn request")
|
||||
ErrGRPCCompacted = grpc.Errorf(codes.OutOfRange, "etcdserver: mvcc: required revision has been compacted")
|
||||
ErrGRPCFutureRev = grpc.Errorf(codes.OutOfRange, "etcdserver: mvcc: required revision is a future revision")
|
||||
ErrGRPCNoSpace = grpc.Errorf(codes.ResourceExhausted, "etcdserver: mvcc: database space exceeded")
|
||||
ErrGRPCEmptyKey = status.New(codes.InvalidArgument, "etcdserver: key is not provided").Err()
|
||||
ErrGRPCKeyNotFound = status.New(codes.InvalidArgument, "etcdserver: key not found").Err()
|
||||
ErrGRPCValueProvided = status.New(codes.InvalidArgument, "etcdserver: value is provided").Err()
|
||||
ErrGRPCLeaseProvided = status.New(codes.InvalidArgument, "etcdserver: lease is provided").Err()
|
||||
ErrGRPCTooManyOps = status.New(codes.InvalidArgument, "etcdserver: too many operations in txn request").Err()
|
||||
ErrGRPCDuplicateKey = status.New(codes.InvalidArgument, "etcdserver: duplicate key given in txn request").Err()
|
||||
ErrGRPCCompacted = status.New(codes.OutOfRange, "etcdserver: mvcc: required revision has been compacted").Err()
|
||||
ErrGRPCFutureRev = status.New(codes.OutOfRange, "etcdserver: mvcc: required revision is a future revision").Err()
|
||||
ErrGRPCNoSpace = status.New(codes.ResourceExhausted, "etcdserver: mvcc: database space exceeded").Err()
|
||||
|
||||
ErrGRPCLeaseNotFound = grpc.Errorf(codes.NotFound, "etcdserver: requested lease not found")
|
||||
ErrGRPCLeaseExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: lease already exists")
|
||||
ErrGRPCLeaseNotFound = status.New(codes.NotFound, "etcdserver: requested lease not found").Err()
|
||||
ErrGRPCLeaseExist = status.New(codes.FailedPrecondition, "etcdserver: lease already exists").Err()
|
||||
|
||||
ErrGRPCMemberExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: member ID already exist")
|
||||
ErrGRPCPeerURLExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: Peer URLs already exists")
|
||||
ErrGRPCMemberNotEnoughStarted = grpc.Errorf(codes.FailedPrecondition, "etcdserver: re-configuration failed due to not enough started members")
|
||||
ErrGRPCMemberBadURLs = grpc.Errorf(codes.InvalidArgument, "etcdserver: given member URLs are invalid")
|
||||
ErrGRPCMemberNotFound = grpc.Errorf(codes.NotFound, "etcdserver: member not found")
|
||||
ErrGRPCMemberExist = status.New(codes.FailedPrecondition, "etcdserver: member ID already exist").Err()
|
||||
ErrGRPCPeerURLExist = status.New(codes.FailedPrecondition, "etcdserver: Peer URLs already exists").Err()
|
||||
ErrGRPCMemberNotEnoughStarted = status.New(codes.FailedPrecondition, "etcdserver: re-configuration failed due to not enough started members").Err()
|
||||
ErrGRPCMemberBadURLs = status.New(codes.InvalidArgument, "etcdserver: given member URLs are invalid").Err()
|
||||
ErrGRPCMemberNotFound = status.New(codes.NotFound, "etcdserver: member not found").Err()
|
||||
|
||||
ErrGRPCRequestTooLarge = grpc.Errorf(codes.InvalidArgument, "etcdserver: request is too large")
|
||||
ErrGRPCRequestTooManyRequests = grpc.Errorf(codes.ResourceExhausted, "etcdserver: too many requests")
|
||||
ErrGRPCRequestTooLarge = status.New(codes.InvalidArgument, "etcdserver: request is too large").Err()
|
||||
ErrGRPCRequestTooManyRequests = status.New(codes.ResourceExhausted, "etcdserver: too many requests").Err()
|
||||
|
||||
ErrGRPCRootUserNotExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: root user does not exist")
|
||||
ErrGRPCRootRoleNotExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: root user does not have root role")
|
||||
ErrGRPCUserAlreadyExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: user name already exists")
|
||||
ErrGRPCUserEmpty = grpc.Errorf(codes.InvalidArgument, "etcdserver: user name is empty")
|
||||
ErrGRPCUserNotFound = grpc.Errorf(codes.FailedPrecondition, "etcdserver: user name not found")
|
||||
ErrGRPCRoleAlreadyExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: role name already exists")
|
||||
ErrGRPCRoleNotFound = grpc.Errorf(codes.FailedPrecondition, "etcdserver: role name not found")
|
||||
ErrGRPCAuthFailed = grpc.Errorf(codes.InvalidArgument, "etcdserver: authentication failed, invalid user ID or password")
|
||||
ErrGRPCPermissionDenied = grpc.Errorf(codes.PermissionDenied, "etcdserver: permission denied")
|
||||
ErrGRPCRoleNotGranted = grpc.Errorf(codes.FailedPrecondition, "etcdserver: role is not granted to the user")
|
||||
ErrGRPCPermissionNotGranted = grpc.Errorf(codes.FailedPrecondition, "etcdserver: permission is not granted to the role")
|
||||
ErrGRPCAuthNotEnabled = grpc.Errorf(codes.FailedPrecondition, "etcdserver: authentication is not enabled")
|
||||
ErrGRPCInvalidAuthToken = grpc.Errorf(codes.Unauthenticated, "etcdserver: invalid auth token")
|
||||
ErrGRPCInvalidAuthMgmt = grpc.Errorf(codes.InvalidArgument, "etcdserver: invalid auth management")
|
||||
ErrGRPCRootUserNotExist = status.New(codes.FailedPrecondition, "etcdserver: root user does not exist").Err()
|
||||
ErrGRPCRootRoleNotExist = status.New(codes.FailedPrecondition, "etcdserver: root user does not have root role").Err()
|
||||
ErrGRPCUserAlreadyExist = status.New(codes.FailedPrecondition, "etcdserver: user name already exists").Err()
|
||||
ErrGRPCUserEmpty = status.New(codes.InvalidArgument, "etcdserver: user name is empty").Err()
|
||||
ErrGRPCUserNotFound = status.New(codes.FailedPrecondition, "etcdserver: user name not found").Err()
|
||||
ErrGRPCRoleAlreadyExist = status.New(codes.FailedPrecondition, "etcdserver: role name already exists").Err()
|
||||
ErrGRPCRoleNotFound = status.New(codes.FailedPrecondition, "etcdserver: role name not found").Err()
|
||||
ErrGRPCAuthFailed = status.New(codes.InvalidArgument, "etcdserver: authentication failed, invalid user ID or password").Err()
|
||||
ErrGRPCPermissionDenied = status.New(codes.PermissionDenied, "etcdserver: permission denied").Err()
|
||||
ErrGRPCRoleNotGranted = status.New(codes.FailedPrecondition, "etcdserver: role is not granted to the user").Err()
|
||||
ErrGRPCPermissionNotGranted = status.New(codes.FailedPrecondition, "etcdserver: permission is not granted to the role").Err()
|
||||
ErrGRPCAuthNotEnabled = status.New(codes.FailedPrecondition, "etcdserver: authentication is not enabled").Err()
|
||||
ErrGRPCInvalidAuthToken = status.New(codes.Unauthenticated, "etcdserver: invalid auth token").Err()
|
||||
ErrGRPCInvalidAuthMgmt = status.New(codes.InvalidArgument, "etcdserver: invalid auth management").Err()
|
||||
|
||||
ErrGRPCNoLeader = grpc.Errorf(codes.Unavailable, "etcdserver: no leader")
|
||||
ErrGRPCNotCapable = grpc.Errorf(codes.Unavailable, "etcdserver: not capable")
|
||||
ErrGRPCStopped = grpc.Errorf(codes.Unavailable, "etcdserver: server stopped")
|
||||
ErrGRPCTimeout = grpc.Errorf(codes.Unavailable, "etcdserver: request timed out")
|
||||
ErrGRPCTimeoutDueToLeaderFail = grpc.Errorf(codes.Unavailable, "etcdserver: request timed out, possibly due to previous leader failure")
|
||||
ErrGRPCTimeoutDueToConnectionLost = grpc.Errorf(codes.Unavailable, "etcdserver: request timed out, possibly due to connection lost")
|
||||
ErrGRPCUnhealthy = grpc.Errorf(codes.Unavailable, "etcdserver: unhealthy cluster")
|
||||
ErrGRPCNoLeader = status.New(codes.Unavailable, "etcdserver: no leader").Err()
|
||||
ErrGRPCNotLeader = status.New(codes.FailedPrecondition, "etcdserver: not leader").Err()
|
||||
ErrGRPCNotCapable = status.New(codes.Unavailable, "etcdserver: not capable").Err()
|
||||
ErrGRPCStopped = status.New(codes.Unavailable, "etcdserver: server stopped").Err()
|
||||
ErrGRPCTimeout = status.New(codes.Unavailable, "etcdserver: request timed out").Err()
|
||||
ErrGRPCTimeoutDueToLeaderFail = status.New(codes.Unavailable, "etcdserver: request timed out, possibly due to previous leader failure").Err()
|
||||
ErrGRPCTimeoutDueToConnectionLost = status.New(codes.Unavailable, "etcdserver: request timed out, possibly due to connection lost").Err()
|
||||
ErrGRPCUnhealthy = status.New(codes.Unavailable, "etcdserver: unhealthy cluster").Err()
|
||||
ErrGRPCCorrupt = status.New(codes.DataLoss, "etcdserver: corrupt cluster").Err()
|
||||
|
||||
errStringToError = map[string]error{
|
||||
grpc.ErrorDesc(ErrGRPCEmptyKey): ErrGRPCEmptyKey,
|
||||
grpc.ErrorDesc(ErrGRPCKeyNotFound): ErrGRPCKeyNotFound,
|
||||
grpc.ErrorDesc(ErrGRPCValueProvided): ErrGRPCValueProvided,
|
||||
grpc.ErrorDesc(ErrGRPCLeaseProvided): ErrGRPCLeaseProvided,
|
||||
ErrorDesc(ErrGRPCEmptyKey): ErrGRPCEmptyKey,
|
||||
ErrorDesc(ErrGRPCKeyNotFound): ErrGRPCKeyNotFound,
|
||||
ErrorDesc(ErrGRPCValueProvided): ErrGRPCValueProvided,
|
||||
ErrorDesc(ErrGRPCLeaseProvided): ErrGRPCLeaseProvided,
|
||||
|
||||
grpc.ErrorDesc(ErrGRPCTooManyOps): ErrGRPCTooManyOps,
|
||||
grpc.ErrorDesc(ErrGRPCDuplicateKey): ErrGRPCDuplicateKey,
|
||||
grpc.ErrorDesc(ErrGRPCCompacted): ErrGRPCCompacted,
|
||||
grpc.ErrorDesc(ErrGRPCFutureRev): ErrGRPCFutureRev,
|
||||
grpc.ErrorDesc(ErrGRPCNoSpace): ErrGRPCNoSpace,
|
||||
ErrorDesc(ErrGRPCTooManyOps): ErrGRPCTooManyOps,
|
||||
ErrorDesc(ErrGRPCDuplicateKey): ErrGRPCDuplicateKey,
|
||||
ErrorDesc(ErrGRPCCompacted): ErrGRPCCompacted,
|
||||
ErrorDesc(ErrGRPCFutureRev): ErrGRPCFutureRev,
|
||||
ErrorDesc(ErrGRPCNoSpace): ErrGRPCNoSpace,
|
||||
|
||||
grpc.ErrorDesc(ErrGRPCLeaseNotFound): ErrGRPCLeaseNotFound,
|
||||
grpc.ErrorDesc(ErrGRPCLeaseExist): ErrGRPCLeaseExist,
|
||||
ErrorDesc(ErrGRPCLeaseNotFound): ErrGRPCLeaseNotFound,
|
||||
ErrorDesc(ErrGRPCLeaseExist): ErrGRPCLeaseExist,
|
||||
|
||||
grpc.ErrorDesc(ErrGRPCMemberExist): ErrGRPCMemberExist,
|
||||
grpc.ErrorDesc(ErrGRPCPeerURLExist): ErrGRPCPeerURLExist,
|
||||
grpc.ErrorDesc(ErrGRPCMemberNotEnoughStarted): ErrGRPCMemberNotEnoughStarted,
|
||||
grpc.ErrorDesc(ErrGRPCMemberBadURLs): ErrGRPCMemberBadURLs,
|
||||
grpc.ErrorDesc(ErrGRPCMemberNotFound): ErrGRPCMemberNotFound,
|
||||
ErrorDesc(ErrGRPCMemberExist): ErrGRPCMemberExist,
|
||||
ErrorDesc(ErrGRPCPeerURLExist): ErrGRPCPeerURLExist,
|
||||
ErrorDesc(ErrGRPCMemberNotEnoughStarted): ErrGRPCMemberNotEnoughStarted,
|
||||
ErrorDesc(ErrGRPCMemberBadURLs): ErrGRPCMemberBadURLs,
|
||||
ErrorDesc(ErrGRPCMemberNotFound): ErrGRPCMemberNotFound,
|
||||
|
||||
grpc.ErrorDesc(ErrGRPCRequestTooLarge): ErrGRPCRequestTooLarge,
|
||||
grpc.ErrorDesc(ErrGRPCRequestTooManyRequests): ErrGRPCRequestTooManyRequests,
|
||||
ErrorDesc(ErrGRPCRequestTooLarge): ErrGRPCRequestTooLarge,
|
||||
ErrorDesc(ErrGRPCRequestTooManyRequests): ErrGRPCRequestTooManyRequests,
|
||||
|
||||
grpc.ErrorDesc(ErrGRPCRootUserNotExist): ErrGRPCRootUserNotExist,
|
||||
grpc.ErrorDesc(ErrGRPCRootRoleNotExist): ErrGRPCRootRoleNotExist,
|
||||
grpc.ErrorDesc(ErrGRPCUserAlreadyExist): ErrGRPCUserAlreadyExist,
|
||||
grpc.ErrorDesc(ErrGRPCUserEmpty): ErrGRPCUserEmpty,
|
||||
grpc.ErrorDesc(ErrGRPCUserNotFound): ErrGRPCUserNotFound,
|
||||
grpc.ErrorDesc(ErrGRPCRoleAlreadyExist): ErrGRPCRoleAlreadyExist,
|
||||
grpc.ErrorDesc(ErrGRPCRoleNotFound): ErrGRPCRoleNotFound,
|
||||
grpc.ErrorDesc(ErrGRPCAuthFailed): ErrGRPCAuthFailed,
|
||||
grpc.ErrorDesc(ErrGRPCPermissionDenied): ErrGRPCPermissionDenied,
|
||||
grpc.ErrorDesc(ErrGRPCRoleNotGranted): ErrGRPCRoleNotGranted,
|
||||
grpc.ErrorDesc(ErrGRPCPermissionNotGranted): ErrGRPCPermissionNotGranted,
|
||||
grpc.ErrorDesc(ErrGRPCAuthNotEnabled): ErrGRPCAuthNotEnabled,
|
||||
grpc.ErrorDesc(ErrGRPCInvalidAuthToken): ErrGRPCInvalidAuthToken,
|
||||
grpc.ErrorDesc(ErrGRPCInvalidAuthMgmt): ErrGRPCInvalidAuthMgmt,
|
||||
ErrorDesc(ErrGRPCRootUserNotExist): ErrGRPCRootUserNotExist,
|
||||
ErrorDesc(ErrGRPCRootRoleNotExist): ErrGRPCRootRoleNotExist,
|
||||
ErrorDesc(ErrGRPCUserAlreadyExist): ErrGRPCUserAlreadyExist,
|
||||
ErrorDesc(ErrGRPCUserEmpty): ErrGRPCUserEmpty,
|
||||
ErrorDesc(ErrGRPCUserNotFound): ErrGRPCUserNotFound,
|
||||
ErrorDesc(ErrGRPCRoleAlreadyExist): ErrGRPCRoleAlreadyExist,
|
||||
ErrorDesc(ErrGRPCRoleNotFound): ErrGRPCRoleNotFound,
|
||||
ErrorDesc(ErrGRPCAuthFailed): ErrGRPCAuthFailed,
|
||||
ErrorDesc(ErrGRPCPermissionDenied): ErrGRPCPermissionDenied,
|
||||
ErrorDesc(ErrGRPCRoleNotGranted): ErrGRPCRoleNotGranted,
|
||||
ErrorDesc(ErrGRPCPermissionNotGranted): ErrGRPCPermissionNotGranted,
|
||||
ErrorDesc(ErrGRPCAuthNotEnabled): ErrGRPCAuthNotEnabled,
|
||||
ErrorDesc(ErrGRPCInvalidAuthToken): ErrGRPCInvalidAuthToken,
|
||||
ErrorDesc(ErrGRPCInvalidAuthMgmt): ErrGRPCInvalidAuthMgmt,
|
||||
|
||||
grpc.ErrorDesc(ErrGRPCNoLeader): ErrGRPCNoLeader,
|
||||
grpc.ErrorDesc(ErrGRPCNotCapable): ErrGRPCNotCapable,
|
||||
grpc.ErrorDesc(ErrGRPCStopped): ErrGRPCStopped,
|
||||
grpc.ErrorDesc(ErrGRPCTimeout): ErrGRPCTimeout,
|
||||
grpc.ErrorDesc(ErrGRPCTimeoutDueToLeaderFail): ErrGRPCTimeoutDueToLeaderFail,
|
||||
grpc.ErrorDesc(ErrGRPCTimeoutDueToConnectionLost): ErrGRPCTimeoutDueToConnectionLost,
|
||||
grpc.ErrorDesc(ErrGRPCUnhealthy): ErrGRPCUnhealthy,
|
||||
ErrorDesc(ErrGRPCNoLeader): ErrGRPCNoLeader,
|
||||
ErrorDesc(ErrGRPCNotLeader): ErrGRPCNotLeader,
|
||||
ErrorDesc(ErrGRPCNotCapable): ErrGRPCNotCapable,
|
||||
ErrorDesc(ErrGRPCStopped): ErrGRPCStopped,
|
||||
ErrorDesc(ErrGRPCTimeout): ErrGRPCTimeout,
|
||||
ErrorDesc(ErrGRPCTimeoutDueToLeaderFail): ErrGRPCTimeoutDueToLeaderFail,
|
||||
ErrorDesc(ErrGRPCTimeoutDueToConnectionLost): ErrGRPCTimeoutDueToConnectionLost,
|
||||
ErrorDesc(ErrGRPCUnhealthy): ErrGRPCUnhealthy,
|
||||
ErrorDesc(ErrGRPCCorrupt): ErrGRPCCorrupt,
|
||||
}
|
||||
)
|
||||
|
||||
// client-side error
|
||||
// client-side error
|
||||
var (
|
||||
ErrEmptyKey = Error(ErrGRPCEmptyKey)
|
||||
ErrKeyNotFound = Error(ErrGRPCKeyNotFound)
|
||||
ErrValueProvided = Error(ErrGRPCValueProvided)
|
||||
|
|
@ -153,12 +159,14 @@ var (
|
|||
ErrInvalidAuthMgmt = Error(ErrGRPCInvalidAuthMgmt)
|
||||
|
||||
ErrNoLeader = Error(ErrGRPCNoLeader)
|
||||
ErrNotLeader = Error(ErrGRPCNotLeader)
|
||||
ErrNotCapable = Error(ErrGRPCNotCapable)
|
||||
ErrStopped = Error(ErrGRPCStopped)
|
||||
ErrTimeout = Error(ErrGRPCTimeout)
|
||||
ErrTimeoutDueToLeaderFail = Error(ErrGRPCTimeoutDueToLeaderFail)
|
||||
ErrTimeoutDueToConnectionLost = Error(ErrGRPCTimeoutDueToConnectionLost)
|
||||
ErrUnhealthy = Error(ErrGRPCUnhealthy)
|
||||
ErrCorrupt = Error(ErrGRPCCorrupt)
|
||||
)
|
||||
|
||||
// EtcdError defines gRPC server errors.
|
||||
|
|
@ -182,9 +190,23 @@ func Error(err error) error {
|
|||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
verr, ok := errStringToError[grpc.ErrorDesc(err)]
|
||||
verr, ok := errStringToError[ErrorDesc(err)]
|
||||
if !ok { // not gRPC error
|
||||
return err
|
||||
}
|
||||
return EtcdError{code: grpc.Code(verr), desc: grpc.ErrorDesc(verr)}
|
||||
ev, ok := status.FromError(verr)
|
||||
var desc string
|
||||
if ok {
|
||||
desc = ev.Message()
|
||||
} else {
|
||||
desc = verr.Error()
|
||||
}
|
||||
return EtcdError{code: ev.Code(), desc: desc}
|
||||
}
|
||||
|
||||
func ErrorDesc(err error) string {
|
||||
if s, ok := status.FromError(err); ok {
|
||||
return s.Message()
|
||||
}
|
||||
return err.Error()
|
||||
}
|
||||
|
|
|
|||
|
|
@ -15,14 +15,17 @@
|
|||
package v3rpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/coreos/etcd/auth"
|
||||
"github.com/coreos/etcd/etcdserver"
|
||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
"github.com/coreos/etcd/etcdserver/membership"
|
||||
"github.com/coreos/etcd/lease"
|
||||
"github.com/coreos/etcd/mvcc"
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
var toGRPCErrorMap = map[error]error{
|
||||
|
|
@ -39,12 +42,14 @@ var toGRPCErrorMap = map[error]error{
|
|||
etcdserver.ErrTooManyRequests: rpctypes.ErrTooManyRequests,
|
||||
|
||||
etcdserver.ErrNoLeader: rpctypes.ErrGRPCNoLeader,
|
||||
etcdserver.ErrNotLeader: rpctypes.ErrGRPCNotLeader,
|
||||
etcdserver.ErrStopped: rpctypes.ErrGRPCStopped,
|
||||
etcdserver.ErrTimeout: rpctypes.ErrGRPCTimeout,
|
||||
etcdserver.ErrTimeoutDueToLeaderFail: rpctypes.ErrGRPCTimeoutDueToLeaderFail,
|
||||
etcdserver.ErrTimeoutDueToConnectionLost: rpctypes.ErrGRPCTimeoutDueToConnectionLost,
|
||||
etcdserver.ErrUnhealthy: rpctypes.ErrGRPCUnhealthy,
|
||||
etcdserver.ErrKeyNotFound: rpctypes.ErrGRPCKeyNotFound,
|
||||
etcdserver.ErrCorrupt: rpctypes.ErrGRPCCorrupt,
|
||||
|
||||
lease.ErrLeaseNotFound: rpctypes.ErrGRPCLeaseNotFound,
|
||||
lease.ErrLeaseExists: rpctypes.ErrGRPCLeaseExist,
|
||||
|
|
@ -66,9 +71,26 @@ var toGRPCErrorMap = map[error]error{
|
|||
}
|
||||
|
||||
func togRPCError(err error) error {
|
||||
// let gRPC server convert to codes.Canceled, codes.DeadlineExceeded
|
||||
if err == context.Canceled || err == context.DeadlineExceeded {
|
||||
return err
|
||||
}
|
||||
grpcErr, ok := toGRPCErrorMap[err]
|
||||
if !ok {
|
||||
return grpc.Errorf(codes.Unknown, err.Error())
|
||||
return status.Error(codes.Unknown, err.Error())
|
||||
}
|
||||
return grpcErr
|
||||
}
|
||||
|
||||
func isClientCtxErr(ctxErr error, err error) bool {
|
||||
if ctxErr != nil {
|
||||
return true
|
||||
}
|
||||
|
||||
ev, ok := status.FromError(err)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
code := ev.Code()
|
||||
return code == codes.Canceled || code == codes.DeadlineExceeded
|
||||
}
|
||||
|
|
|
|||
|
|
@ -15,12 +15,11 @@
|
|||
package v3rpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/coreos/etcd/auth"
|
||||
"github.com/coreos/etcd/etcdserver"
|
||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
|
|
@ -141,6 +140,11 @@ func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) {
|
|||
// deadlock when calling sws.close().
|
||||
go func() {
|
||||
if rerr := sws.recvLoop(); rerr != nil {
|
||||
if isClientCtxErr(stream.Context().Err(), rerr) {
|
||||
plog.Debugf("failed to receive watch request from gRPC stream (%q)", rerr.Error())
|
||||
} else {
|
||||
plog.Warningf("failed to receive watch request from gRPC stream (%q)", rerr.Error())
|
||||
}
|
||||
errc <- rerr
|
||||
}
|
||||
}()
|
||||
|
|
@ -321,11 +325,13 @@ func (sws *serverWatchStream) sendLoop() {
|
|||
}
|
||||
}
|
||||
|
||||
canceled := wresp.CompactRevision != 0
|
||||
wr := &pb.WatchResponse{
|
||||
Header: sws.newResponseHeader(wresp.Revision),
|
||||
WatchId: int64(wresp.WatchID),
|
||||
Events: events,
|
||||
CompactRevision: wresp.CompactRevision,
|
||||
Canceled: canceled,
|
||||
}
|
||||
|
||||
if _, hasId := ids[wresp.WatchID]; !hasId {
|
||||
|
|
@ -337,6 +343,11 @@ func (sws *serverWatchStream) sendLoop() {
|
|||
|
||||
mvcc.ReportEventReceived(len(evs))
|
||||
if err := sws.gRPCStream.Send(wr); err != nil {
|
||||
if isClientCtxErr(sws.gRPCStream.Context().Err(), err) {
|
||||
plog.Debugf("failed to send watch response to gRPC stream (%q)", err.Error())
|
||||
} else {
|
||||
plog.Warningf("failed to send watch response to gRPC stream (%q)", err.Error())
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -353,6 +364,11 @@ func (sws *serverWatchStream) sendLoop() {
|
|||
}
|
||||
|
||||
if err := sws.gRPCStream.Send(c); err != nil {
|
||||
if isClientCtxErr(sws.gRPCStream.Context().Err(), err) {
|
||||
plog.Debugf("failed to send watch control response to gRPC stream (%q)", err.Error())
|
||||
} else {
|
||||
plog.Warningf("failed to send watch control response to gRPC stream (%q)", err.Error())
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -368,6 +384,11 @@ func (sws *serverWatchStream) sendLoop() {
|
|||
for _, v := range pending[wid] {
|
||||
mvcc.ReportEventReceived(len(v.Events))
|
||||
if err := sws.gRPCStream.Send(v); err != nil {
|
||||
if isClientCtxErr(sws.gRPCStream.Context().Err(), err) {
|
||||
plog.Debugf("failed to send pending watch response to gRPC stream (%q)", err.Error())
|
||||
} else {
|
||||
plog.Warningf("failed to send pending watch response to gRPC stream (%q)", err.Error())
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -16,16 +16,18 @@ package etcdserver
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/auth"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/etcd/lease"
|
||||
"github.com/coreos/etcd/mvcc"
|
||||
"github.com/coreos/etcd/mvcc/mvccpb"
|
||||
"github.com/coreos/etcd/pkg/types"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -223,8 +225,9 @@ func (a *applierV3backend) DeleteRange(txn mvcc.TxnWrite, dr *pb.DeleteRangeRequ
|
|||
return nil, err
|
||||
}
|
||||
if rr != nil {
|
||||
resp.PrevKvs = make([]*mvccpb.KeyValue, len(rr.KVs))
|
||||
for i := range rr.KVs {
|
||||
resp.PrevKvs = append(resp.PrevKvs, &rr.KVs[i])
|
||||
resp.PrevKvs[i] = &rr.KVs[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -318,11 +321,12 @@ func (a *applierV3backend) Range(txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.Rang
|
|||
|
||||
resp.Header.Revision = rr.Rev
|
||||
resp.Count = int64(rr.Count)
|
||||
resp.Kvs = make([]*mvccpb.KeyValue, len(rr.KVs))
|
||||
for i := range rr.KVs {
|
||||
if r.KeysOnly {
|
||||
rr.KVs[i].Value = nil
|
||||
}
|
||||
resp.Kvs = append(resp.Kvs, &rr.KVs[i])
|
||||
resp.Kvs[i] = &rr.KVs[i]
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
|
@ -423,7 +427,7 @@ func applyCompares(rv mvcc.ReadView, cmps []*pb.Compare) bool {
|
|||
// applyCompare applies the compare request.
|
||||
// If the comparison succeeds, it returns true. Otherwise, returns false.
|
||||
func applyCompare(rv mvcc.ReadView, c *pb.Compare) bool {
|
||||
// TOOD: possible optimizations
|
||||
// TODO: possible optimizations
|
||||
// * chunk reads for large ranges to conserve memory
|
||||
// * rewrite rules for common patterns:
|
||||
// ex. "[a, b) createrev > 0" => "limit 1 /\ kvs > 0"
|
||||
|
|
@ -473,6 +477,11 @@ func compareKV(c *pb.Compare, ckv mvccpb.KeyValue) bool {
|
|||
rev = tv.Version
|
||||
}
|
||||
result = compareInt64(ckv.Version, rev)
|
||||
case pb.Compare_LEASE:
|
||||
if tv, _ := c.TargetUnion.(*pb.Compare_Lease); tv != nil {
|
||||
rev = tv.Lease
|
||||
}
|
||||
result = compareInt64(ckv.Lease, rev)
|
||||
}
|
||||
switch c.Result {
|
||||
case pb.Compare_EQUAL:
|
||||
|
|
@ -572,9 +581,11 @@ func (a *applierV3backend) Alarm(ar *pb.AlarmRequest) (*pb.AlarmResponse, error)
|
|||
break
|
||||
}
|
||||
|
||||
plog.Warningf("alarm %v raised by peer %s", m.Alarm, types.ID(m.MemberID))
|
||||
switch m.Alarm {
|
||||
case pb.AlarmType_CORRUPT:
|
||||
a.s.applyV3 = newApplierV3Corrupt(a)
|
||||
case pb.AlarmType_NOSPACE:
|
||||
plog.Warningf("alarm raised %+v", m)
|
||||
a.s.applyV3 = newApplierV3Capped(a)
|
||||
default:
|
||||
plog.Errorf("unimplemented alarm activation (%+v)", m)
|
||||
|
|
@ -591,7 +602,8 @@ func (a *applierV3backend) Alarm(ar *pb.AlarmRequest) (*pb.AlarmResponse, error)
|
|||
}
|
||||
|
||||
switch m.Alarm {
|
||||
case pb.AlarmType_NOSPACE:
|
||||
case pb.AlarmType_NOSPACE, pb.AlarmType_CORRUPT:
|
||||
// TODO: check kv hash before deactivating CORRUPT?
|
||||
plog.Infof("alarm disarmed %+v", ar)
|
||||
a.s.applyV3 = a.s.newApplierV3()
|
||||
default:
|
||||
|
|
@ -641,7 +653,7 @@ func (a *applierV3backend) AuthDisable() (*pb.AuthDisableResponse, error) {
|
|||
}
|
||||
|
||||
func (a *applierV3backend) Authenticate(r *pb.InternalAuthenticateRequest) (*pb.AuthenticateResponse, error) {
|
||||
ctx := context.WithValue(context.WithValue(a.s.ctx, "index", a.s.consistIndex.ConsistentIndex()), "simpleToken", r.SimpleToken)
|
||||
ctx := context.WithValue(context.WithValue(a.s.ctx, auth.AuthenticateParamIndex{}, a.s.consistIndex.ConsistentIndex()), auth.AuthenticateParamSimpleTokenPrefix{}, r.SimpleToken)
|
||||
resp, err := a.s.AuthStore().Authenticate(ctx, r.Name, r.Password)
|
||||
if resp != nil {
|
||||
resp.Header = newHeader(a.s)
|
||||
|
|
|
|||
|
|
@ -189,6 +189,28 @@ func (aa *authApplierV3) checkLeasePuts(leaseID lease.LeaseID) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (aa *authApplierV3) UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) {
|
||||
err := aa.as.IsAdminPermitted(&aa.authInfo)
|
||||
if err != nil && r.Name != aa.authInfo.Username {
|
||||
aa.authInfo.Username = ""
|
||||
aa.authInfo.Revision = 0
|
||||
return &pb.AuthUserGetResponse{}, err
|
||||
}
|
||||
|
||||
return aa.applierV3.UserGet(r)
|
||||
}
|
||||
|
||||
func (aa *authApplierV3) RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) {
|
||||
err := aa.as.IsAdminPermitted(&aa.authInfo)
|
||||
if err != nil && !aa.as.HasRole(aa.authInfo.Username, r.Role) {
|
||||
aa.authInfo.Username = ""
|
||||
aa.authInfo.Revision = 0
|
||||
return &pb.AuthRoleGetResponse{}, err
|
||||
}
|
||||
|
||||
return aa.applierV3.RoleGet(r)
|
||||
}
|
||||
|
||||
func needAdminPermission(r *pb.InternalRaftRequest) bool {
|
||||
switch {
|
||||
case r.AuthEnable != nil:
|
||||
|
|
@ -203,16 +225,12 @@ func needAdminPermission(r *pb.InternalRaftRequest) bool {
|
|||
return true
|
||||
case r.AuthUserGrantRole != nil:
|
||||
return true
|
||||
case r.AuthUserGet != nil:
|
||||
return true
|
||||
case r.AuthUserRevokeRole != nil:
|
||||
return true
|
||||
case r.AuthRoleAdd != nil:
|
||||
return true
|
||||
case r.AuthRoleGrantPermission != nil:
|
||||
return true
|
||||
case r.AuthRoleGet != nil:
|
||||
return true
|
||||
case r.AuthRoleRevokePermission != nil:
|
||||
return true
|
||||
case r.AuthRoleDelete != nil:
|
||||
|
|
|
|||
|
|
@ -20,7 +20,6 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/coreos/etcd/etcdserver/api"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/etcd/etcdserver/membership"
|
||||
"github.com/coreos/etcd/pkg/pbutil"
|
||||
"github.com/coreos/etcd/store"
|
||||
|
|
@ -29,11 +28,11 @@ import (
|
|||
|
||||
// ApplierV2 is the interface for processing V2 raft messages
|
||||
type ApplierV2 interface {
|
||||
Delete(r *pb.Request) Response
|
||||
Post(r *pb.Request) Response
|
||||
Put(r *pb.Request) Response
|
||||
QGet(r *pb.Request) Response
|
||||
Sync(r *pb.Request) Response
|
||||
Delete(r *RequestV2) Response
|
||||
Post(r *RequestV2) Response
|
||||
Put(r *RequestV2) Response
|
||||
QGet(r *RequestV2) Response
|
||||
Sync(r *RequestV2) Response
|
||||
}
|
||||
|
||||
func NewApplierV2(s store.Store, c *membership.RaftCluster) ApplierV2 {
|
||||
|
|
@ -45,7 +44,7 @@ type applierV2store struct {
|
|||
cluster *membership.RaftCluster
|
||||
}
|
||||
|
||||
func (a *applierV2store) Delete(r *pb.Request) Response {
|
||||
func (a *applierV2store) Delete(r *RequestV2) Response {
|
||||
switch {
|
||||
case r.PrevIndex > 0 || r.PrevValue != "":
|
||||
return toResponse(a.store.CompareAndDelete(r.Path, r.PrevValue, r.PrevIndex))
|
||||
|
|
@ -54,12 +53,12 @@ func (a *applierV2store) Delete(r *pb.Request) Response {
|
|||
}
|
||||
}
|
||||
|
||||
func (a *applierV2store) Post(r *pb.Request) Response {
|
||||
return toResponse(a.store.Create(r.Path, r.Dir, r.Val, true, toTTLOptions(r)))
|
||||
func (a *applierV2store) Post(r *RequestV2) Response {
|
||||
return toResponse(a.store.Create(r.Path, r.Dir, r.Val, true, r.TTLOptions()))
|
||||
}
|
||||
|
||||
func (a *applierV2store) Put(r *pb.Request) Response {
|
||||
ttlOptions := toTTLOptions(r)
|
||||
func (a *applierV2store) Put(r *RequestV2) Response {
|
||||
ttlOptions := r.TTLOptions()
|
||||
exists, existsSet := pbutil.GetBool(r.PrevExist)
|
||||
switch {
|
||||
case existsSet:
|
||||
|
|
@ -96,19 +95,18 @@ func (a *applierV2store) Put(r *pb.Request) Response {
|
|||
}
|
||||
}
|
||||
|
||||
func (a *applierV2store) QGet(r *pb.Request) Response {
|
||||
func (a *applierV2store) QGet(r *RequestV2) Response {
|
||||
return toResponse(a.store.Get(r.Path, r.Recursive, r.Sorted))
|
||||
}
|
||||
|
||||
func (a *applierV2store) Sync(r *pb.Request) Response {
|
||||
func (a *applierV2store) Sync(r *RequestV2) Response {
|
||||
a.store.DeleteExpiredKeys(time.Unix(0, r.Time))
|
||||
return Response{}
|
||||
}
|
||||
|
||||
// applyV2Request interprets r as a call to store.X and returns a Response interpreted
|
||||
// from store.Event
|
||||
func (s *EtcdServer) applyV2Request(r *pb.Request) Response {
|
||||
toTTLOptions(r)
|
||||
func (s *EtcdServer) applyV2Request(r *RequestV2) Response {
|
||||
switch r.Method {
|
||||
case "POST":
|
||||
return s.applyV2.Post(r)
|
||||
|
|
@ -122,11 +120,11 @@ func (s *EtcdServer) applyV2Request(r *pb.Request) Response {
|
|||
return s.applyV2.Sync(r)
|
||||
default:
|
||||
// This should never be reached, but just in case:
|
||||
return Response{err: ErrUnknownMethod}
|
||||
return Response{Err: ErrUnknownMethod}
|
||||
}
|
||||
}
|
||||
|
||||
func toTTLOptions(r *pb.Request) store.TTLOptionSet {
|
||||
func (r *RequestV2) TTLOptions() store.TTLOptionSet {
|
||||
refresh, _ := pbutil.GetBool(r.Refresh)
|
||||
ttlOptions := store.TTLOptionSet{Refresh: refresh}
|
||||
if r.Expiration != 0 {
|
||||
|
|
@ -136,5 +134,5 @@ func toTTLOptions(r *pb.Request) store.TTLOptionSet {
|
|||
}
|
||||
|
||||
func toResponse(ev *store.Event, err error) Response {
|
||||
return Response{Event: ev, err: err}
|
||||
return Response{Event: ev, Err: err}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -15,14 +15,13 @@
|
|||
package etcdserver
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/coreos/etcd/pkg/netutil"
|
||||
"github.com/coreos/etcd/pkg/transport"
|
||||
"github.com/coreos/etcd/pkg/types"
|
||||
|
|
@ -52,7 +51,7 @@ type ServerConfig struct {
|
|||
ElectionTicks int
|
||||
BootstrapTimeout time.Duration
|
||||
|
||||
AutoCompactionRetention int
|
||||
AutoCompactionRetention time.Duration
|
||||
AutoCompactionMode string
|
||||
QuotaBackendBytes int64
|
||||
MaxTxnOps uint
|
||||
|
|
@ -66,6 +65,13 @@ type ServerConfig struct {
|
|||
ClientCertAuthEnabled bool
|
||||
|
||||
AuthToken string
|
||||
|
||||
// InitialCorruptCheck is true to check data corruption on boot
|
||||
// before serving any peer/client traffic.
|
||||
InitialCorruptCheck bool
|
||||
CorruptCheckTime time.Duration
|
||||
|
||||
Debug bool
|
||||
}
|
||||
|
||||
// VerifyBootstrap sanity-checks the initial config for bootstrap case
|
||||
|
|
@ -172,17 +178,16 @@ func (c *ServerConfig) ShouldDiscover() bool { return c.DiscoveryURL != "" }
|
|||
func (c *ServerConfig) ReqTimeout() time.Duration {
|
||||
// 5s for queue waiting, computation and disk IO delay
|
||||
// + 2 * election timeout for possible leader election
|
||||
return 5*time.Second + 2*time.Duration(c.ElectionTicks)*time.Duration(c.TickMs)*time.Millisecond
|
||||
return 5*time.Second + 2*time.Duration(c.ElectionTicks*int(c.TickMs))*time.Millisecond
|
||||
}
|
||||
|
||||
func (c *ServerConfig) electionTimeout() time.Duration {
|
||||
return time.Duration(c.ElectionTicks) * time.Duration(c.TickMs) * time.Millisecond
|
||||
return time.Duration(c.ElectionTicks*int(c.TickMs)) * time.Millisecond
|
||||
}
|
||||
|
||||
func (c *ServerConfig) peerDialTimeout() time.Duration {
|
||||
// 1s for queue wait and system delay
|
||||
// + one RTT, which is smaller than 1/5 election timeout
|
||||
return time.Second + time.Duration(c.ElectionTicks)*time.Duration(c.TickMs)*time.Millisecond/5
|
||||
// 1s for queue wait and election timeout
|
||||
return time.Second + time.Duration(c.ElectionTicks*int(c.TickMs))*time.Millisecond
|
||||
}
|
||||
|
||||
func (c *ServerConfig) PrintWithInitial() { c.print(true) }
|
||||
|
|
|
|||
|
|
@ -0,0 +1,262 @@
|
|||
// Copyright 2017 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package etcdserver
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/etcd/mvcc"
|
||||
"github.com/coreos/etcd/pkg/types"
|
||||
)
|
||||
|
||||
// CheckInitialHashKV compares initial hash values with its peers
|
||||
// before serving any peer/client traffic. Only mismatch when hashes
|
||||
// are different at requested revision, with same compact revision.
|
||||
func (s *EtcdServer) CheckInitialHashKV() error {
|
||||
if !s.Cfg.InitialCorruptCheck {
|
||||
return nil
|
||||
}
|
||||
|
||||
plog.Infof("%s starting initial corruption check with timeout %v...", s.ID(), s.Cfg.ReqTimeout())
|
||||
h, rev, crev, err := s.kv.HashByRev(0)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s failed to fetch hash (%v)", s.ID(), err)
|
||||
}
|
||||
peers := s.getPeerHashKVs(rev)
|
||||
mismatch := 0
|
||||
for _, p := range peers {
|
||||
if p.resp != nil {
|
||||
peerID := types.ID(p.resp.Header.MemberId)
|
||||
if h != p.resp.Hash {
|
||||
if crev == p.resp.CompactRevision {
|
||||
plog.Errorf("%s's hash %d != %s's hash %d (revision %d, peer revision %d, compact revision %d)", s.ID(), h, peerID, p.resp.Hash, rev, p.resp.Header.Revision, crev)
|
||||
mismatch++
|
||||
} else {
|
||||
plog.Warningf("%s cannot check hash of peer(%s): peer has a different compact revision %d (revision:%d)", s.ID(), peerID, p.resp.CompactRevision, rev)
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
if p.err != nil {
|
||||
switch p.err {
|
||||
case rpctypes.ErrFutureRev:
|
||||
plog.Warningf("%s cannot check the hash of peer(%q) at revision %d: peer is lagging behind(%q)", s.ID(), p.eps, rev, p.err.Error())
|
||||
case rpctypes.ErrCompacted:
|
||||
plog.Warningf("%s cannot check the hash of peer(%q) at revision %d: local node is lagging behind(%q)", s.ID(), p.eps, rev, p.err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
if mismatch > 0 {
|
||||
return fmt.Errorf("%s found data inconsistency with peers", s.ID())
|
||||
}
|
||||
|
||||
plog.Infof("%s succeeded on initial corruption checking: no corruption", s.ID())
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) monitorKVHash() {
|
||||
t := s.Cfg.CorruptCheckTime
|
||||
if t == 0 {
|
||||
return
|
||||
}
|
||||
plog.Infof("enabled corruption checking with %s interval", t)
|
||||
for {
|
||||
select {
|
||||
case <-s.stopping:
|
||||
return
|
||||
case <-time.After(t):
|
||||
}
|
||||
if !s.isLeader() {
|
||||
continue
|
||||
}
|
||||
if err := s.checkHashKV(); err != nil {
|
||||
plog.Debugf("check hash kv failed %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *EtcdServer) checkHashKV() error {
|
||||
h, rev, crev, err := s.kv.HashByRev(0)
|
||||
if err != nil {
|
||||
plog.Fatalf("failed to hash kv store (%v)", err)
|
||||
}
|
||||
peers := s.getPeerHashKVs(rev)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), s.Cfg.ReqTimeout())
|
||||
err = s.linearizableReadNotify(ctx)
|
||||
cancel()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
h2, rev2, crev2, err := s.kv.HashByRev(0)
|
||||
if err != nil {
|
||||
plog.Warningf("failed to hash kv store (%v)", err)
|
||||
return err
|
||||
}
|
||||
|
||||
alarmed := false
|
||||
mismatch := func(id uint64) {
|
||||
if alarmed {
|
||||
return
|
||||
}
|
||||
alarmed = true
|
||||
a := &pb.AlarmRequest{
|
||||
MemberID: uint64(id),
|
||||
Action: pb.AlarmRequest_ACTIVATE,
|
||||
Alarm: pb.AlarmType_CORRUPT,
|
||||
}
|
||||
s.goAttach(func() {
|
||||
s.raftRequest(s.ctx, pb.InternalRaftRequest{Alarm: a})
|
||||
})
|
||||
}
|
||||
|
||||
if h2 != h && rev2 == rev && crev == crev2 {
|
||||
plog.Warningf("mismatched hashes %d and %d for revision %d", h, h2, rev)
|
||||
mismatch(uint64(s.ID()))
|
||||
}
|
||||
|
||||
for _, p := range peers {
|
||||
if p.resp == nil {
|
||||
continue
|
||||
}
|
||||
id := p.resp.Header.MemberId
|
||||
|
||||
// leader expects follower's latest revision less than or equal to leader's
|
||||
if p.resp.Header.Revision > rev2 {
|
||||
plog.Warningf(
|
||||
"revision %d from member %v, expected at most %d",
|
||||
p.resp.Header.Revision,
|
||||
types.ID(id),
|
||||
rev2)
|
||||
mismatch(id)
|
||||
}
|
||||
|
||||
// leader expects follower's latest compact revision less than or equal to leader's
|
||||
if p.resp.CompactRevision > crev2 {
|
||||
plog.Warningf(
|
||||
"compact revision %d from member %v, expected at most %d",
|
||||
p.resp.CompactRevision,
|
||||
types.ID(id),
|
||||
crev2,
|
||||
)
|
||||
mismatch(id)
|
||||
}
|
||||
|
||||
// follower's compact revision is leader's old one, then hashes must match
|
||||
if p.resp.CompactRevision == crev && p.resp.Hash != h {
|
||||
plog.Warningf(
|
||||
"hash %d at revision %d from member %v, expected hash %d",
|
||||
p.resp.Hash,
|
||||
rev,
|
||||
types.ID(id),
|
||||
h,
|
||||
)
|
||||
mismatch(id)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type peerHashKVResp struct {
|
||||
resp *clientv3.HashKVResponse
|
||||
err error
|
||||
eps []string
|
||||
}
|
||||
|
||||
func (s *EtcdServer) getPeerHashKVs(rev int64) (resps []*peerHashKVResp) {
|
||||
// TODO: handle the case when "s.cluster.Members" have not
|
||||
// been populated (e.g. no snapshot to load from disk)
|
||||
mbs := s.cluster.Members()
|
||||
pURLs := make([][]string, len(mbs))
|
||||
for _, m := range mbs {
|
||||
if m.ID == s.ID() {
|
||||
continue
|
||||
}
|
||||
pURLs = append(pURLs, m.PeerURLs)
|
||||
}
|
||||
|
||||
for _, purls := range pURLs {
|
||||
if len(purls) == 0 {
|
||||
continue
|
||||
}
|
||||
cli, cerr := clientv3.New(clientv3.Config{
|
||||
DialTimeout: s.Cfg.ReqTimeout(),
|
||||
Endpoints: purls,
|
||||
})
|
||||
if cerr != nil {
|
||||
plog.Warningf("%s failed to create client to peer %q for hash checking (%q)", s.ID(), purls, cerr.Error())
|
||||
continue
|
||||
}
|
||||
|
||||
respsLen := len(resps)
|
||||
for _, c := range cli.Endpoints() {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), s.Cfg.ReqTimeout())
|
||||
var resp *clientv3.HashKVResponse
|
||||
resp, cerr = cli.HashKV(ctx, c, rev)
|
||||
cancel()
|
||||
if cerr == nil {
|
||||
resps = append(resps, &peerHashKVResp{resp: resp})
|
||||
break
|
||||
}
|
||||
plog.Warningf("%s hash-kv error %q on peer %q with revision %d", s.ID(), cerr.Error(), c, rev)
|
||||
}
|
||||
cli.Close()
|
||||
|
||||
if respsLen == len(resps) {
|
||||
resps = append(resps, &peerHashKVResp{err: cerr, eps: purls})
|
||||
}
|
||||
}
|
||||
return resps
|
||||
}
|
||||
|
||||
type applierV3Corrupt struct {
|
||||
applierV3
|
||||
}
|
||||
|
||||
func newApplierV3Corrupt(a applierV3) *applierV3Corrupt { return &applierV3Corrupt{a} }
|
||||
|
||||
func (a *applierV3Corrupt) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, error) {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
|
||||
func (a *applierV3Corrupt) Range(txn mvcc.TxnRead, p *pb.RangeRequest) (*pb.RangeResponse, error) {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
|
||||
func (a *applierV3Corrupt) DeleteRange(txn mvcc.TxnWrite, p *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
|
||||
func (a *applierV3Corrupt) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
|
||||
func (a *applierV3Corrupt) Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, error) {
|
||||
return nil, nil, ErrCorrupt
|
||||
}
|
||||
|
||||
func (a *applierV3Corrupt) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
|
||||
func (a *applierV3Corrupt) LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
|
|
@ -29,11 +29,13 @@ var (
|
|||
ErrTimeoutLeaderTransfer = errors.New("etcdserver: request timed out, leader transfer took too long")
|
||||
ErrNotEnoughStartedMembers = errors.New("etcdserver: re-configuration failed due to not enough started members")
|
||||
ErrNoLeader = errors.New("etcdserver: no leader")
|
||||
ErrNotLeader = errors.New("etcdserver: not leader")
|
||||
ErrRequestTooLarge = errors.New("etcdserver: request is too large")
|
||||
ErrNoSpace = errors.New("etcdserver: no space")
|
||||
ErrTooManyRequests = errors.New("etcdserver: too many requests")
|
||||
ErrUnhealthy = errors.New("etcdserver: unhealthy cluster")
|
||||
ErrKeyNotFound = errors.New("etcdserver: key not found")
|
||||
ErrCorrupt = errors.New("etcdserver: corrupt cluster")
|
||||
)
|
||||
|
||||
type DiscoveryError struct {
|
||||
|
|
|
|||
|
|
@ -1,6 +1,5 @@
|
|||
// Code generated by protoc-gen-gogo.
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: etcdserver.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package etcdserverpb is a generated protocol buffer package.
|
||||
|
|
@ -32,6 +31,8 @@
|
|||
CompactionRequest
|
||||
CompactionResponse
|
||||
HashRequest
|
||||
HashKVRequest
|
||||
HashKVResponse
|
||||
HashResponse
|
||||
SnapshotRequest
|
||||
SnapshotResponse
|
||||
|
|
@ -47,6 +48,9 @@
|
|||
LeaseKeepAliveResponse
|
||||
LeaseTimeToLiveRequest
|
||||
LeaseTimeToLiveResponse
|
||||
LeaseLeasesRequest
|
||||
LeaseStatus
|
||||
LeaseLeasesResponse
|
||||
Member
|
||||
MemberAddRequest
|
||||
MemberAddResponse
|
||||
|
|
@ -58,6 +62,8 @@
|
|||
MemberListResponse
|
||||
DefragmentRequest
|
||||
DefragmentResponse
|
||||
MoveLeaderRequest
|
||||
MoveLeaderResponse
|
||||
AlarmRequest
|
||||
AlarmMember
|
||||
AlarmResponse
|
||||
|
|
@ -105,6 +111,8 @@ import (
|
|||
|
||||
math "math"
|
||||
|
||||
_ "github.com/gogo/protobuf/gogoproto"
|
||||
|
||||
io "io"
|
||||
)
|
||||
|
||||
|
|
@ -311,24 +319,6 @@ func (m *Metadata) MarshalTo(dAtA []byte) (int, error) {
|
|||
return i, nil
|
||||
}
|
||||
|
||||
func encodeFixed64Etcdserver(dAtA []byte, offset int, v uint64) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
dAtA[offset+4] = uint8(v >> 32)
|
||||
dAtA[offset+5] = uint8(v >> 40)
|
||||
dAtA[offset+6] = uint8(v >> 48)
|
||||
dAtA[offset+7] = uint8(v >> 56)
|
||||
return offset + 8
|
||||
}
|
||||
func encodeFixed32Etcdserver(dAtA []byte, offset int, v uint32) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
return offset + 4
|
||||
}
|
||||
func encodeVarintEtcdserver(dAtA []byte, offset int, v uint64) int {
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
|
|
|
|||
|
|
@ -1,6 +1,5 @@
|
|||
// Code generated by protoc-gen-gogo.
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: raft_internal.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
package etcdserverpb
|
||||
|
||||
|
|
@ -11,6 +10,8 @@ import (
|
|||
|
||||
math "math"
|
||||
|
||||
_ "github.com/gogo/protobuf/gogoproto"
|
||||
|
||||
io "io"
|
||||
)
|
||||
|
||||
|
|
@ -505,24 +506,6 @@ func (m *InternalAuthenticateRequest) MarshalTo(dAtA []byte) (int, error) {
|
|||
return i, nil
|
||||
}
|
||||
|
||||
func encodeFixed64RaftInternal(dAtA []byte, offset int, v uint64) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
dAtA[offset+4] = uint8(v >> 32)
|
||||
dAtA[offset+5] = uint8(v >> 40)
|
||||
dAtA[offset+6] = uint8(v >> 48)
|
||||
dAtA[offset+7] = uint8(v >> 56)
|
||||
return offset + 8
|
||||
}
|
||||
func encodeFixed32RaftInternal(dAtA []byte, offset int, v uint32) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
return offset + 4
|
||||
}
|
||||
func encodeVarintRaftInternal(dAtA []byte, offset int, v uint64) int {
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
|
|
@ -18,6 +18,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/coreos/etcd/pkg/runtime"
|
||||
"github.com/coreos/etcd/version"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
|
|
@ -64,6 +65,13 @@ var (
|
|||
Name: "lease_expired_total",
|
||||
Help: "The total number of expired leases.",
|
||||
})
|
||||
currentVersion = prometheus.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Namespace: "etcd",
|
||||
Subsystem: "server",
|
||||
Name: "version",
|
||||
Help: "Which version is running. 1 for 'server_version' label with current version.",
|
||||
},
|
||||
[]string{"server_version"})
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
|
@ -74,6 +82,11 @@ func init() {
|
|||
prometheus.MustRegister(proposalsPending)
|
||||
prometheus.MustRegister(proposalsFailed)
|
||||
prometheus.MustRegister(leaseExpired)
|
||||
prometheus.MustRegister(currentVersion)
|
||||
|
||||
currentVersion.With(prometheus.Labels{
|
||||
"server_version": version.Version,
|
||||
}).Set(1)
|
||||
}
|
||||
|
||||
func monitorFileDescriptor(done <-chan struct{}) {
|
||||
|
|
|
|||
|
|
@ -416,7 +416,7 @@ func startNode(cfg ServerConfig, cl *membership.RaftCluster, ids []types.ID) (id
|
|||
raftStatus = n.Status
|
||||
raftStatusMu.Unlock()
|
||||
advanceTicksForElection(n, c.ElectionTick)
|
||||
return
|
||||
return id, n, s, w
|
||||
}
|
||||
|
||||
func restartNode(cfg ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *membership.RaftCluster, raft.Node, *raft.MemoryStorage, *wal.WAL) {
|
||||
|
|
|
|||
|
|
@ -15,6 +15,7 @@
|
|||
package etcdserver
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"expvar"
|
||||
"fmt"
|
||||
|
|
@ -38,6 +39,7 @@ import (
|
|||
"github.com/coreos/etcd/etcdserver/membership"
|
||||
"github.com/coreos/etcd/etcdserver/stats"
|
||||
"github.com/coreos/etcd/lease"
|
||||
"github.com/coreos/etcd/lease/leasehttp"
|
||||
"github.com/coreos/etcd/mvcc"
|
||||
"github.com/coreos/etcd/mvcc/backend"
|
||||
"github.com/coreos/etcd/pkg/fileutil"
|
||||
|
|
@ -54,9 +56,9 @@ import (
|
|||
"github.com/coreos/etcd/store"
|
||||
"github.com/coreos/etcd/version"
|
||||
"github.com/coreos/etcd/wal"
|
||||
|
||||
"github.com/coreos/go-semver/semver"
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -108,29 +110,33 @@ func init() {
|
|||
}
|
||||
|
||||
type Response struct {
|
||||
Term uint64
|
||||
Index uint64
|
||||
Event *store.Event
|
||||
Watcher store.Watcher
|
||||
err error
|
||||
Err error
|
||||
}
|
||||
|
||||
type Server interface {
|
||||
// Start performs any initialization of the Server necessary for it to
|
||||
// begin serving requests. It must be called before Do or Process.
|
||||
// Start must be non-blocking; any long-running server functionality
|
||||
// should be implemented in goroutines.
|
||||
Start()
|
||||
// Stop terminates the Server and performs any necessary finalization.
|
||||
// Do and Process cannot be called after Stop has been invoked.
|
||||
Stop()
|
||||
// ID returns the ID of the Server.
|
||||
type ServerV2 interface {
|
||||
Server
|
||||
// Do takes a V2 request and attempts to fulfill it, returning a Response.
|
||||
Do(ctx context.Context, r pb.Request) (Response, error)
|
||||
stats.Stats
|
||||
ClientCertAuthEnabled() bool
|
||||
}
|
||||
|
||||
type ServerV3 interface {
|
||||
Server
|
||||
ID() types.ID
|
||||
RaftTimer
|
||||
}
|
||||
|
||||
func (s *EtcdServer) ClientCertAuthEnabled() bool { return s.Cfg.ClientCertAuthEnabled }
|
||||
|
||||
type Server interface {
|
||||
// Leader returns the ID of the leader Server.
|
||||
Leader() types.ID
|
||||
// Do takes a request and attempts to fulfill it, returning a Response.
|
||||
Do(ctx context.Context, r pb.Request) (Response, error)
|
||||
// Process takes a raft message and applies it to the server's raft state
|
||||
// machine, respecting any timeout of the given context.
|
||||
Process(ctx context.Context, m raftpb.Message) error
|
||||
|
||||
// AddMember attempts to add a member into the cluster. It will return
|
||||
// ErrIDRemoved if member ID is removed from the cluster, or return
|
||||
// ErrIDExists if member ID exists in the cluster.
|
||||
|
|
@ -139,7 +145,6 @@ type Server interface {
|
|||
// return ErrIDRemoved if member ID is removed from the cluster, or return
|
||||
// ErrIDNotFound if member ID is not in the cluster.
|
||||
RemoveMember(ctx context.Context, id uint64) ([]*membership.Member, error)
|
||||
|
||||
// UpdateMember attempts to update an existing member in the cluster. It will
|
||||
// return ErrIDNotFound if the member ID does not exist.
|
||||
UpdateMember(ctx context.Context, updateMemb membership.Member) ([]*membership.Member, error)
|
||||
|
|
@ -159,6 +164,8 @@ type Server interface {
|
|||
// the leader is etcd 2.0. etcd 2.0 leader will not update clusterVersion since
|
||||
// this feature is introduced post 2.0.
|
||||
ClusterVersion() *semver.Version
|
||||
Cluster() api.Cluster
|
||||
Alarms() []*pb.AlarmMember
|
||||
}
|
||||
|
||||
// EtcdServer is the production implementation of the Server interface
|
||||
|
|
@ -514,9 +521,10 @@ func NewServer(cfg ServerConfig) (srv *EtcdServer, err error) {
|
|||
return srv, nil
|
||||
}
|
||||
|
||||
// Start prepares and starts server in a new goroutine. It is no longer safe to
|
||||
// modify a server's fields after it has been sent to Start.
|
||||
// It also starts a goroutine to publish its server information.
|
||||
// Start performs any initialization of the Server necessary for it to
|
||||
// begin serving requests. It must be called before Do or Process.
|
||||
// Start must be non-blocking; any long-running server functionality
|
||||
// should be implemented in goroutines.
|
||||
func (s *EtcdServer) Start() {
|
||||
s.start()
|
||||
s.goAttach(func() { s.publish(s.Cfg.ReqTimeout()) })
|
||||
|
|
@ -524,6 +532,7 @@ func (s *EtcdServer) Start() {
|
|||
s.goAttach(func() { monitorFileDescriptor(s.stopping) })
|
||||
s.goAttach(s.monitorVersions)
|
||||
s.goAttach(s.linearizableReadLoop)
|
||||
s.goAttach(s.monitorKVHash)
|
||||
}
|
||||
|
||||
// start prepares and starts server in a new goroutine. It is no longer safe to
|
||||
|
|
@ -575,14 +584,27 @@ func (s *EtcdServer) purgeFile() {
|
|||
|
||||
func (s *EtcdServer) ID() types.ID { return s.id }
|
||||
|
||||
func (s *EtcdServer) Cluster() *membership.RaftCluster { return s.cluster }
|
||||
|
||||
func (s *EtcdServer) RaftHandler() http.Handler { return s.r.transport.Handler() }
|
||||
|
||||
func (s *EtcdServer) Lessor() lease.Lessor { return s.lessor }
|
||||
func (s *EtcdServer) Cluster() api.Cluster { return s.cluster }
|
||||
|
||||
func (s *EtcdServer) ApplyWait() <-chan struct{} { return s.applyWait.Wait(s.getCommittedIndex()) }
|
||||
|
||||
type ServerPeer interface {
|
||||
ServerV2
|
||||
RaftHandler() http.Handler
|
||||
LeaseHandler() http.Handler
|
||||
}
|
||||
|
||||
func (s *EtcdServer) LeaseHandler() http.Handler {
|
||||
if s.lessor == nil {
|
||||
return nil
|
||||
}
|
||||
return leasehttp.NewHandler(s.lessor, s.ApplyWait)
|
||||
}
|
||||
|
||||
func (s *EtcdServer) RaftHandler() http.Handler { return s.r.transport.Handler() }
|
||||
|
||||
// Process takes a raft message and applies it to the server's raft state
|
||||
// machine, respecting any timeout of the given context.
|
||||
func (s *EtcdServer) Process(ctx context.Context, m raftpb.Message) error {
|
||||
if s.cluster.IsIDRemoved(types.ID(m.From)) {
|
||||
plog.Warningf("reject message from removed member %s", types.ID(m.From).String())
|
||||
|
|
@ -748,7 +770,9 @@ func (s *EtcdServer) run() {
|
|||
lid := lease.ID
|
||||
s.goAttach(func() {
|
||||
ctx := s.authStore.WithRoot(s.ctx)
|
||||
s.LeaseRevoke(ctx, &pb.LeaseRevokeRequest{ID: int64(lid)})
|
||||
if _, lerr := s.LeaseRevoke(ctx, &pb.LeaseRevokeRequest{ID: int64(lid)}); lerr != nil {
|
||||
plog.Warningf("failed to revoke %016x (%q)", lid, lerr.Error())
|
||||
}
|
||||
leaseExpired.Inc()
|
||||
<-c
|
||||
})
|
||||
|
|
@ -932,9 +956,8 @@ func (s *EtcdServer) isLeader() bool {
|
|||
return uint64(s.ID()) == s.Lead()
|
||||
}
|
||||
|
||||
// transferLeadership transfers the leader to the given transferee.
|
||||
// TODO: maybe expose to client?
|
||||
func (s *EtcdServer) transferLeadership(ctx context.Context, lead, transferee uint64) error {
|
||||
// MoveLeader transfers the leader to the given transferee.
|
||||
func (s *EtcdServer) MoveLeader(ctx context.Context, lead, transferee uint64) error {
|
||||
now := time.Now()
|
||||
interval := time.Duration(s.Cfg.TickMs) * time.Millisecond
|
||||
|
||||
|
|
@ -973,7 +996,7 @@ func (s *EtcdServer) TransferLeadership() error {
|
|||
|
||||
tm := s.Cfg.ReqTimeout()
|
||||
ctx, cancel := context.WithTimeout(s.ctx, tm)
|
||||
err := s.transferLeadership(ctx, s.Lead(), uint64(transferee))
|
||||
err := s.MoveLeader(ctx, s.Lead(), uint64(transferee))
|
||||
cancel()
|
||||
return err
|
||||
}
|
||||
|
|
@ -992,6 +1015,8 @@ func (s *EtcdServer) HardStop() {
|
|||
// Stop should be called after a Start(s), otherwise it will block forever.
|
||||
// When stopping leader, Stop transfers its leadership to one of its peers
|
||||
// before stopping the server.
|
||||
// Stop terminates the Server and performs any necessary finalization.
|
||||
// Do and Process cannot be called after Stop has been invoked.
|
||||
func (s *EtcdServer) Stop() {
|
||||
if err := s.TransferLeadership(); err != nil {
|
||||
plog.Warningf("%s failed to transfer leadership (%v)", s.ID(), err)
|
||||
|
|
@ -1322,12 +1347,13 @@ func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry) {
|
|||
var raftReq pb.InternalRaftRequest
|
||||
if !pbutil.MaybeUnmarshal(&raftReq, e.Data) { // backward compatible
|
||||
var r pb.Request
|
||||
pbutil.MustUnmarshal(&r, e.Data)
|
||||
s.w.Trigger(r.ID, s.applyV2Request(&r))
|
||||
rp := &r
|
||||
pbutil.MustUnmarshal(rp, e.Data)
|
||||
s.w.Trigger(r.ID, s.applyV2Request((*RequestV2)(rp)))
|
||||
return
|
||||
}
|
||||
if raftReq.V2 != nil {
|
||||
req := raftReq.V2
|
||||
req := (*RequestV2)(raftReq.V2)
|
||||
s.w.Trigger(req.ID, s.applyV2Request(req))
|
||||
return
|
||||
}
|
||||
|
|
@ -1367,8 +1393,7 @@ func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry) {
|
|||
Action: pb.AlarmRequest_ACTIVATE,
|
||||
Alarm: pb.AlarmType_NOSPACE,
|
||||
}
|
||||
r := pb.InternalRaftRequest{Alarm: a}
|
||||
s.processInternalRaftRequest(s.ctx, r)
|
||||
s.raftRequest(s.ctx, pb.InternalRaftRequest{Alarm: a})
|
||||
s.w.Trigger(id, ar)
|
||||
})
|
||||
}
|
||||
|
|
@ -1630,6 +1655,9 @@ func (s *EtcdServer) restoreAlarms() error {
|
|||
if len(as.Get(pb.AlarmType_NOSPACE)) > 0 {
|
||||
s.applyV3 = newApplierV3Capped(s.applyV3)
|
||||
}
|
||||
if len(as.Get(pb.AlarmType_CORRUPT)) > 0 {
|
||||
s.applyV3 = newApplierV3Corrupt(s.applyV3)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -1668,3 +1696,7 @@ func (s *EtcdServer) goAttach(f func()) {
|
|||
f()
|
||||
}()
|
||||
}
|
||||
|
||||
func (s *EtcdServer) Alarms() []*pb.AlarmMember {
|
||||
return s.alarmStore.Get(pb.AlarmType_NONE)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -94,5 +94,5 @@ func readWAL(waldir string, snap walpb.Snapshot) (w *wal.WAL, id, cid types.ID,
|
|||
pbutil.MustUnmarshal(&metadata, wmetadata)
|
||||
id = types.ID(metadata.NodeID)
|
||||
cid = types.ID(metadata.ClusterID)
|
||||
return
|
||||
return w, id, cid, st, ents
|
||||
}
|
||||
|
|
|
|||
|
|
@ -15,41 +15,86 @@
|
|||
package etcdserver
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"golang.org/x/net/context"
|
||||
"github.com/coreos/etcd/store"
|
||||
)
|
||||
|
||||
type v2API interface {
|
||||
Post(ctx context.Context, r *pb.Request) (Response, error)
|
||||
Put(ctx context.Context, r *pb.Request) (Response, error)
|
||||
Delete(ctx context.Context, r *pb.Request) (Response, error)
|
||||
QGet(ctx context.Context, r *pb.Request) (Response, error)
|
||||
Get(ctx context.Context, r *pb.Request) (Response, error)
|
||||
Head(ctx context.Context, r *pb.Request) (Response, error)
|
||||
type RequestV2 pb.Request
|
||||
|
||||
type RequestV2Handler interface {
|
||||
Post(ctx context.Context, r *RequestV2) (Response, error)
|
||||
Put(ctx context.Context, r *RequestV2) (Response, error)
|
||||
Delete(ctx context.Context, r *RequestV2) (Response, error)
|
||||
QGet(ctx context.Context, r *RequestV2) (Response, error)
|
||||
Get(ctx context.Context, r *RequestV2) (Response, error)
|
||||
Head(ctx context.Context, r *RequestV2) (Response, error)
|
||||
}
|
||||
|
||||
type v2apiStore struct{ s *EtcdServer }
|
||||
type reqV2HandlerEtcdServer struct {
|
||||
reqV2HandlerStore
|
||||
s *EtcdServer
|
||||
}
|
||||
|
||||
func (a *v2apiStore) Post(ctx context.Context, r *pb.Request) (Response, error) {
|
||||
type reqV2HandlerStore struct {
|
||||
store store.Store
|
||||
applier ApplierV2
|
||||
}
|
||||
|
||||
func NewStoreRequestV2Handler(s store.Store, applier ApplierV2) RequestV2Handler {
|
||||
return &reqV2HandlerStore{s, applier}
|
||||
}
|
||||
|
||||
func (a *reqV2HandlerStore) Post(ctx context.Context, r *RequestV2) (Response, error) {
|
||||
return a.applier.Post(r), nil
|
||||
}
|
||||
|
||||
func (a *reqV2HandlerStore) Put(ctx context.Context, r *RequestV2) (Response, error) {
|
||||
return a.applier.Put(r), nil
|
||||
}
|
||||
|
||||
func (a *reqV2HandlerStore) Delete(ctx context.Context, r *RequestV2) (Response, error) {
|
||||
return a.applier.Delete(r), nil
|
||||
}
|
||||
|
||||
func (a *reqV2HandlerStore) QGet(ctx context.Context, r *RequestV2) (Response, error) {
|
||||
return a.applier.QGet(r), nil
|
||||
}
|
||||
|
||||
func (a *reqV2HandlerStore) Get(ctx context.Context, r *RequestV2) (Response, error) {
|
||||
if r.Wait {
|
||||
wc, err := a.store.Watch(r.Path, r.Recursive, r.Stream, r.Since)
|
||||
return Response{Watcher: wc}, err
|
||||
}
|
||||
ev, err := a.store.Get(r.Path, r.Recursive, r.Sorted)
|
||||
return Response{Event: ev}, err
|
||||
}
|
||||
|
||||
func (a *reqV2HandlerStore) Head(ctx context.Context, r *RequestV2) (Response, error) {
|
||||
ev, err := a.store.Get(r.Path, r.Recursive, r.Sorted)
|
||||
return Response{Event: ev}, err
|
||||
}
|
||||
|
||||
func (a *reqV2HandlerEtcdServer) Post(ctx context.Context, r *RequestV2) (Response, error) {
|
||||
return a.processRaftRequest(ctx, r)
|
||||
}
|
||||
|
||||
func (a *v2apiStore) Put(ctx context.Context, r *pb.Request) (Response, error) {
|
||||
func (a *reqV2HandlerEtcdServer) Put(ctx context.Context, r *RequestV2) (Response, error) {
|
||||
return a.processRaftRequest(ctx, r)
|
||||
}
|
||||
|
||||
func (a *v2apiStore) Delete(ctx context.Context, r *pb.Request) (Response, error) {
|
||||
func (a *reqV2HandlerEtcdServer) Delete(ctx context.Context, r *RequestV2) (Response, error) {
|
||||
return a.processRaftRequest(ctx, r)
|
||||
}
|
||||
|
||||
func (a *v2apiStore) QGet(ctx context.Context, r *pb.Request) (Response, error) {
|
||||
func (a *reqV2HandlerEtcdServer) QGet(ctx context.Context, r *RequestV2) (Response, error) {
|
||||
return a.processRaftRequest(ctx, r)
|
||||
}
|
||||
|
||||
func (a *v2apiStore) processRaftRequest(ctx context.Context, r *pb.Request) (Response, error) {
|
||||
data, err := r.Marshal()
|
||||
func (a *reqV2HandlerEtcdServer) processRaftRequest(ctx context.Context, r *RequestV2) (Response, error) {
|
||||
data, err := ((*pb.Request)(r)).Marshal()
|
||||
if err != nil {
|
||||
return Response{}, err
|
||||
}
|
||||
|
|
@ -63,7 +108,7 @@ func (a *v2apiStore) processRaftRequest(ctx context.Context, r *pb.Request) (Res
|
|||
select {
|
||||
case x := <-ch:
|
||||
resp := x.(Response)
|
||||
return resp, resp.err
|
||||
return resp, resp.Err
|
||||
case <-ctx.Done():
|
||||
proposalsFailed.Inc()
|
||||
a.s.w.Trigger(r.ID, nil) // GC wait
|
||||
|
|
@ -73,53 +118,43 @@ func (a *v2apiStore) processRaftRequest(ctx context.Context, r *pb.Request) (Res
|
|||
return Response{}, ErrStopped
|
||||
}
|
||||
|
||||
func (a *v2apiStore) Get(ctx context.Context, r *pb.Request) (Response, error) {
|
||||
if r.Wait {
|
||||
wc, err := a.s.store.Watch(r.Path, r.Recursive, r.Stream, r.Since)
|
||||
if err != nil {
|
||||
return Response{}, err
|
||||
}
|
||||
return Response{Watcher: wc}, nil
|
||||
func (s *EtcdServer) Do(ctx context.Context, r pb.Request) (Response, error) {
|
||||
r.ID = s.reqIDGen.Next()
|
||||
h := &reqV2HandlerEtcdServer{
|
||||
reqV2HandlerStore: reqV2HandlerStore{
|
||||
store: s.store,
|
||||
applier: s.applyV2,
|
||||
},
|
||||
s: s,
|
||||
}
|
||||
ev, err := a.s.store.Get(r.Path, r.Recursive, r.Sorted)
|
||||
if err != nil {
|
||||
return Response{}, err
|
||||
}
|
||||
return Response{Event: ev}, nil
|
||||
rp := &r
|
||||
resp, err := ((*RequestV2)(rp)).Handle(ctx, h)
|
||||
resp.Term, resp.Index = s.Term(), s.Index()
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (a *v2apiStore) Head(ctx context.Context, r *pb.Request) (Response, error) {
|
||||
ev, err := a.s.store.Get(r.Path, r.Recursive, r.Sorted)
|
||||
if err != nil {
|
||||
return Response{}, err
|
||||
}
|
||||
return Response{Event: ev}, nil
|
||||
}
|
||||
|
||||
// Do interprets r and performs an operation on s.store according to r.Method
|
||||
// Handle interprets r and performs an operation on s.store according to r.Method
|
||||
// and other fields. If r.Method is "POST", "PUT", "DELETE", or a "GET" with
|
||||
// Quorum == true, r will be sent through consensus before performing its
|
||||
// respective operation. Do will block until an action is performed or there is
|
||||
// an error.
|
||||
func (s *EtcdServer) Do(ctx context.Context, r pb.Request) (Response, error) {
|
||||
r.ID = s.reqIDGen.Next()
|
||||
func (r *RequestV2) Handle(ctx context.Context, v2api RequestV2Handler) (Response, error) {
|
||||
if r.Method == "GET" && r.Quorum {
|
||||
r.Method = "QGET"
|
||||
}
|
||||
v2api := (v2API)(&v2apiStore{s})
|
||||
switch r.Method {
|
||||
case "POST":
|
||||
return v2api.Post(ctx, &r)
|
||||
return v2api.Post(ctx, r)
|
||||
case "PUT":
|
||||
return v2api.Put(ctx, &r)
|
||||
return v2api.Put(ctx, r)
|
||||
case "DELETE":
|
||||
return v2api.Delete(ctx, &r)
|
||||
return v2api.Delete(ctx, r)
|
||||
case "QGET":
|
||||
return v2api.QGet(ctx, &r)
|
||||
return v2api.QGet(ctx, r)
|
||||
case "GET":
|
||||
return v2api.Get(ctx, &r)
|
||||
return v2api.Get(ctx, r)
|
||||
case "HEAD":
|
||||
return v2api.Head(ctx, &r)
|
||||
return v2api.Head(ctx, r)
|
||||
}
|
||||
return Response{}, ErrUnknownMethod
|
||||
}
|
||||
|
|
|
|||
|
|
@ -16,6 +16,7 @@ package etcdserver
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"time"
|
||||
|
||||
|
|
@ -27,7 +28,7 @@ import (
|
|||
"github.com/coreos/etcd/mvcc"
|
||||
"github.com/coreos/etcd/raft"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"github.com/gogo/protobuf/proto"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -58,6 +59,9 @@ type Lessor interface {
|
|||
|
||||
// LeaseTimeToLive retrieves lease information.
|
||||
LeaseTimeToLive(ctx context.Context, r *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error)
|
||||
|
||||
// LeaseLeases lists all leases.
|
||||
LeaseLeases(ctx context.Context, r *pb.LeaseLeasesRequest) (*pb.LeaseLeasesResponse, error)
|
||||
}
|
||||
|
||||
type Authenticator interface {
|
||||
|
|
@ -99,25 +103,19 @@ func (s *EtcdServer) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeRe
|
|||
}
|
||||
|
||||
func (s *EtcdServer) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) {
|
||||
result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{Put: r})
|
||||
resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{Put: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.err != nil {
|
||||
return nil, result.err
|
||||
}
|
||||
return result.resp.(*pb.PutResponse), nil
|
||||
return resp.(*pb.PutResponse), nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {
|
||||
result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{DeleteRange: r})
|
||||
resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{DeleteRange: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.err != nil {
|
||||
return nil, result.err
|
||||
}
|
||||
return result.resp.(*pb.DeleteRangeResponse), nil
|
||||
return resp.(*pb.DeleteRangeResponse), nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) {
|
||||
|
|
@ -139,14 +137,11 @@ func (s *EtcdServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse
|
|||
}
|
||||
return resp, err
|
||||
}
|
||||
result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{Txn: r})
|
||||
resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{Txn: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.err != nil {
|
||||
return nil, result.err
|
||||
}
|
||||
return result.resp.(*pb.TxnResponse), nil
|
||||
return resp.(*pb.TxnResponse), nil
|
||||
}
|
||||
|
||||
func isTxnSerializable(r *pb.TxnRequest) bool {
|
||||
|
|
@ -211,25 +206,19 @@ func (s *EtcdServer) LeaseGrant(ctx context.Context, r *pb.LeaseGrantRequest) (*
|
|||
// only use positive int64 id's
|
||||
r.ID = int64(s.reqIDGen.Next() & ((1 << 63) - 1))
|
||||
}
|
||||
result, err := s.processInternalRaftRequestOnce(ctx, pb.InternalRaftRequest{LeaseGrant: r})
|
||||
resp, err := s.raftRequestOnce(ctx, pb.InternalRaftRequest{LeaseGrant: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.err != nil {
|
||||
return nil, result.err
|
||||
}
|
||||
return result.resp.(*pb.LeaseGrantResponse), nil
|
||||
return resp.(*pb.LeaseGrantResponse), nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) LeaseRevoke(ctx context.Context, r *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) {
|
||||
result, err := s.processInternalRaftRequestOnce(ctx, pb.InternalRaftRequest{LeaseRevoke: r})
|
||||
resp, err := s.raftRequestOnce(ctx, pb.InternalRaftRequest{LeaseRevoke: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.err != nil {
|
||||
return nil, result.err
|
||||
}
|
||||
return result.resp.(*pb.LeaseRevokeResponse), nil
|
||||
return resp.(*pb.LeaseRevokeResponse), nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) LeaseRenew(ctx context.Context, id lease.LeaseID) (int64, error) {
|
||||
|
|
@ -304,6 +293,15 @@ func (s *EtcdServer) LeaseTimeToLive(ctx context.Context, r *pb.LeaseTimeToLiveR
|
|||
return nil, ErrTimeout
|
||||
}
|
||||
|
||||
func (s *EtcdServer) LeaseLeases(ctx context.Context, r *pb.LeaseLeasesRequest) (*pb.LeaseLeasesResponse, error) {
|
||||
ls := s.lessor.Leases()
|
||||
lss := make([]*pb.LeaseStatus, len(ls))
|
||||
for i := range ls {
|
||||
lss[i] = &pb.LeaseStatus{ID: int64(ls[i].ID)}
|
||||
}
|
||||
return &pb.LeaseLeasesResponse{Header: newHeader(s), Leases: lss}, nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) waitLeader(ctx context.Context) (*membership.Member, error) {
|
||||
leader := s.cluster.Member(s.Leader())
|
||||
for leader == nil {
|
||||
|
|
@ -325,46 +323,35 @@ func (s *EtcdServer) waitLeader(ctx context.Context) (*membership.Member, error)
|
|||
}
|
||||
|
||||
func (s *EtcdServer) Alarm(ctx context.Context, r *pb.AlarmRequest) (*pb.AlarmResponse, error) {
|
||||
result, err := s.processInternalRaftRequestOnce(ctx, pb.InternalRaftRequest{Alarm: r})
|
||||
resp, err := s.raftRequestOnce(ctx, pb.InternalRaftRequest{Alarm: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.err != nil {
|
||||
return nil, result.err
|
||||
}
|
||||
return result.resp.(*pb.AlarmResponse), nil
|
||||
return resp.(*pb.AlarmResponse), nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) AuthEnable(ctx context.Context, r *pb.AuthEnableRequest) (*pb.AuthEnableResponse, error) {
|
||||
result, err := s.processInternalRaftRequestOnce(ctx, pb.InternalRaftRequest{AuthEnable: r})
|
||||
resp, err := s.raftRequestOnce(ctx, pb.InternalRaftRequest{AuthEnable: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.err != nil {
|
||||
return nil, result.err
|
||||
}
|
||||
return result.resp.(*pb.AuthEnableResponse), nil
|
||||
return resp.(*pb.AuthEnableResponse), nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) AuthDisable(ctx context.Context, r *pb.AuthDisableRequest) (*pb.AuthDisableResponse, error) {
|
||||
result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthDisable: r})
|
||||
resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthDisable: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.err != nil {
|
||||
return nil, result.err
|
||||
}
|
||||
return result.resp.(*pb.AuthDisableResponse), nil
|
||||
return resp.(*pb.AuthDisableResponse), nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) Authenticate(ctx context.Context, r *pb.AuthenticateRequest) (*pb.AuthenticateResponse, error) {
|
||||
var result *applyResult
|
||||
|
||||
err := s.linearizableReadNotify(ctx)
|
||||
if err != nil {
|
||||
if err := s.linearizableReadNotify(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var resp proto.Message
|
||||
for {
|
||||
checkedRevision, err := s.AuthStore().CheckPassword(r.Name, r.Password)
|
||||
if err != nil {
|
||||
|
|
@ -385,166 +372,141 @@ func (s *EtcdServer) Authenticate(ctx context.Context, r *pb.AuthenticateRequest
|
|||
SimpleToken: st,
|
||||
}
|
||||
|
||||
result, err = s.processInternalRaftRequestOnce(ctx, pb.InternalRaftRequest{Authenticate: internalReq})
|
||||
resp, err = s.raftRequestOnce(ctx, pb.InternalRaftRequest{Authenticate: internalReq})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.err != nil {
|
||||
return nil, result.err
|
||||
if checkedRevision == s.AuthStore().Revision() {
|
||||
break
|
||||
}
|
||||
|
||||
if checkedRevision != s.AuthStore().Revision() {
|
||||
plog.Infof("revision when password checked is obsolete, retrying")
|
||||
continue
|
||||
}
|
||||
|
||||
break
|
||||
plog.Infof("revision when password checked is obsolete, retrying")
|
||||
}
|
||||
|
||||
return result.resp.(*pb.AuthenticateResponse), nil
|
||||
return resp.(*pb.AuthenticateResponse), nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) UserAdd(ctx context.Context, r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) {
|
||||
result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthUserAdd: r})
|
||||
resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserAdd: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.err != nil {
|
||||
return nil, result.err
|
||||
}
|
||||
return result.resp.(*pb.AuthUserAddResponse), nil
|
||||
return resp.(*pb.AuthUserAddResponse), nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) UserDelete(ctx context.Context, r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) {
|
||||
result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthUserDelete: r})
|
||||
resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserDelete: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.err != nil {
|
||||
return nil, result.err
|
||||
}
|
||||
return result.resp.(*pb.AuthUserDeleteResponse), nil
|
||||
return resp.(*pb.AuthUserDeleteResponse), nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) UserChangePassword(ctx context.Context, r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) {
|
||||
result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthUserChangePassword: r})
|
||||
resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserChangePassword: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.err != nil {
|
||||
return nil, result.err
|
||||
}
|
||||
return result.resp.(*pb.AuthUserChangePasswordResponse), nil
|
||||
return resp.(*pb.AuthUserChangePasswordResponse), nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) UserGrantRole(ctx context.Context, r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) {
|
||||
result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthUserGrantRole: r})
|
||||
resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserGrantRole: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.err != nil {
|
||||
return nil, result.err
|
||||
}
|
||||
return result.resp.(*pb.AuthUserGrantRoleResponse), nil
|
||||
return resp.(*pb.AuthUserGrantRoleResponse), nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) UserGet(ctx context.Context, r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) {
|
||||
result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthUserGet: r})
|
||||
resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserGet: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.err != nil {
|
||||
return nil, result.err
|
||||
}
|
||||
return result.resp.(*pb.AuthUserGetResponse), nil
|
||||
return resp.(*pb.AuthUserGetResponse), nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) UserList(ctx context.Context, r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) {
|
||||
result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthUserList: r})
|
||||
resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserList: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.err != nil {
|
||||
return nil, result.err
|
||||
}
|
||||
return result.resp.(*pb.AuthUserListResponse), nil
|
||||
return resp.(*pb.AuthUserListResponse), nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) UserRevokeRole(ctx context.Context, r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) {
|
||||
result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthUserRevokeRole: r})
|
||||
resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserRevokeRole: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.err != nil {
|
||||
return nil, result.err
|
||||
}
|
||||
return result.resp.(*pb.AuthUserRevokeRoleResponse), nil
|
||||
return resp.(*pb.AuthUserRevokeRoleResponse), nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) RoleAdd(ctx context.Context, r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) {
|
||||
result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthRoleAdd: r})
|
||||
resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleAdd: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.err != nil {
|
||||
return nil, result.err
|
||||
}
|
||||
return result.resp.(*pb.AuthRoleAddResponse), nil
|
||||
return resp.(*pb.AuthRoleAddResponse), nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) RoleGrantPermission(ctx context.Context, r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) {
|
||||
result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthRoleGrantPermission: r})
|
||||
resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleGrantPermission: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.err != nil {
|
||||
return nil, result.err
|
||||
}
|
||||
return result.resp.(*pb.AuthRoleGrantPermissionResponse), nil
|
||||
return resp.(*pb.AuthRoleGrantPermissionResponse), nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) RoleGet(ctx context.Context, r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) {
|
||||
result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthRoleGet: r})
|
||||
resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleGet: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.err != nil {
|
||||
return nil, result.err
|
||||
}
|
||||
return result.resp.(*pb.AuthRoleGetResponse), nil
|
||||
return resp.(*pb.AuthRoleGetResponse), nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) RoleList(ctx context.Context, r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) {
|
||||
result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthRoleList: r})
|
||||
resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleList: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.err != nil {
|
||||
return nil, result.err
|
||||
}
|
||||
return result.resp.(*pb.AuthRoleListResponse), nil
|
||||
return resp.(*pb.AuthRoleListResponse), nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) RoleRevokePermission(ctx context.Context, r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) {
|
||||
result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthRoleRevokePermission: r})
|
||||
resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleRevokePermission: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.err != nil {
|
||||
return nil, result.err
|
||||
}
|
||||
return result.resp.(*pb.AuthRoleRevokePermissionResponse), nil
|
||||
return resp.(*pb.AuthRoleRevokePermissionResponse), nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) RoleDelete(ctx context.Context, r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) {
|
||||
result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthRoleDelete: r})
|
||||
resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleDelete: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.(*pb.AuthRoleDeleteResponse), nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) raftRequestOnce(ctx context.Context, r pb.InternalRaftRequest) (proto.Message, error) {
|
||||
result, err := s.processInternalRaftRequestOnce(ctx, r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.err != nil {
|
||||
return nil, result.err
|
||||
}
|
||||
return result.resp.(*pb.AuthRoleDeleteResponse), nil
|
||||
return result.resp, nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) raftRequest(ctx context.Context, r pb.InternalRaftRequest) (proto.Message, error) {
|
||||
for {
|
||||
resp, err := s.raftRequestOnce(ctx, r)
|
||||
if err != auth.ErrAuthOldRevision {
|
||||
return resp, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// doSerialize handles the auth logic, with permissions checked by "chk", for a serialized request "get". Returns a non-nil error on authentication failure.
|
||||
|
|
@ -629,19 +591,6 @@ func (s *EtcdServer) processInternalRaftRequestOnce(ctx context.Context, r pb.In
|
|||
}
|
||||
}
|
||||
|
||||
func (s *EtcdServer) processInternalRaftRequest(ctx context.Context, r pb.InternalRaftRequest) (*applyResult, error) {
|
||||
var result *applyResult
|
||||
var err error
|
||||
for {
|
||||
result, err = s.processInternalRaftRequestOnce(ctx, r)
|
||||
if err != auth.ErrAuthOldRevision {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return result, err
|
||||
}
|
||||
|
||||
// Watchable returns a watchable interface attached to the etcdserver.
|
||||
func (s *EtcdServer) Watchable() mvcc.WatchableKV { return s.KV() }
|
||||
|
||||
|
|
@ -737,12 +686,14 @@ func (s *EtcdServer) linearizableReadNotify(ctx context.Context) error {
|
|||
}
|
||||
|
||||
func (s *EtcdServer) AuthInfoFromCtx(ctx context.Context) (*auth.AuthInfo, error) {
|
||||
if s.Cfg.ClientCertAuthEnabled {
|
||||
authInfo := s.AuthStore().AuthInfoFromTLS(ctx)
|
||||
if authInfo != nil {
|
||||
return authInfo, nil
|
||||
}
|
||||
authInfo, err := s.AuthStore().AuthInfoFromCtx(ctx)
|
||||
if authInfo != nil || err != nil {
|
||||
return authInfo, err
|
||||
}
|
||||
if !s.Cfg.ClientCertAuthEnabled {
|
||||
return nil, nil
|
||||
}
|
||||
authInfo = s.AuthStore().AuthInfoFromTLS(ctx)
|
||||
return authInfo, nil
|
||||
|
||||
return s.AuthStore().AuthInfoFromCtx(ctx)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -24,10 +24,12 @@ import (
|
|||
type index interface {
|
||||
Get(key []byte, atRev int64) (rev, created revision, ver int64, err error)
|
||||
Range(key, end []byte, atRev int64) ([][]byte, []revision)
|
||||
Revisions(key, end []byte, atRev int64) []revision
|
||||
Put(key []byte, rev revision)
|
||||
Tombstone(key []byte, rev revision) error
|
||||
RangeSince(key, end []byte, rev int64) []revision
|
||||
Compact(rev int64) map[revision]struct{}
|
||||
Keep(rev int64) map[revision]struct{}
|
||||
Equal(b index) bool
|
||||
|
||||
Insert(ki *keyIndex)
|
||||
|
|
@ -83,17 +85,8 @@ func (ti *treeIndex) keyIndex(keyi *keyIndex) *keyIndex {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (ti *treeIndex) Range(key, end []byte, atRev int64) (keys [][]byte, revs []revision) {
|
||||
if end == nil {
|
||||
rev, _, _, err := ti.Get(key, atRev)
|
||||
if err != nil {
|
||||
return nil, nil
|
||||
}
|
||||
return [][]byte{key}, []revision{rev}
|
||||
}
|
||||
|
||||
keyi := &keyIndex{key: key}
|
||||
endi := &keyIndex{key: end}
|
||||
func (ti *treeIndex) visit(key, end []byte, f func(ki *keyIndex)) {
|
||||
keyi, endi := &keyIndex{key: key}, &keyIndex{key: end}
|
||||
|
||||
ti.RLock()
|
||||
defer ti.RUnlock()
|
||||
|
|
@ -102,16 +95,41 @@ func (ti *treeIndex) Range(key, end []byte, atRev int64) (keys [][]byte, revs []
|
|||
if len(endi.key) > 0 && !item.Less(endi) {
|
||||
return false
|
||||
}
|
||||
curKeyi := item.(*keyIndex)
|
||||
rev, _, _, err := curKeyi.get(atRev)
|
||||
if err != nil {
|
||||
return true
|
||||
}
|
||||
revs = append(revs, rev)
|
||||
keys = append(keys, curKeyi.key)
|
||||
f(item.(*keyIndex))
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
func (ti *treeIndex) Revisions(key, end []byte, atRev int64) (revs []revision) {
|
||||
if end == nil {
|
||||
rev, _, _, err := ti.Get(key, atRev)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return []revision{rev}
|
||||
}
|
||||
ti.visit(key, end, func(ki *keyIndex) {
|
||||
if rev, _, _, err := ki.get(atRev); err == nil {
|
||||
revs = append(revs, rev)
|
||||
}
|
||||
})
|
||||
return revs
|
||||
}
|
||||
|
||||
func (ti *treeIndex) Range(key, end []byte, atRev int64) (keys [][]byte, revs []revision) {
|
||||
if end == nil {
|
||||
rev, _, _, err := ti.Get(key, atRev)
|
||||
if err != nil {
|
||||
return nil, nil
|
||||
}
|
||||
return [][]byte{key}, []revision{rev}
|
||||
}
|
||||
ti.visit(key, end, func(ki *keyIndex) {
|
||||
if rev, _, _, err := ki.get(atRev); err == nil {
|
||||
revs = append(revs, rev)
|
||||
keys = append(keys, ki.key)
|
||||
}
|
||||
})
|
||||
return keys, revs
|
||||
}
|
||||
|
||||
|
|
@ -133,10 +151,11 @@ func (ti *treeIndex) Tombstone(key []byte, rev revision) error {
|
|||
// at or after the given rev. The returned slice is sorted in the order
|
||||
// of revision.
|
||||
func (ti *treeIndex) RangeSince(key, end []byte, rev int64) []revision {
|
||||
keyi := &keyIndex{key: key}
|
||||
|
||||
ti.RLock()
|
||||
defer ti.RUnlock()
|
||||
|
||||
keyi := &keyIndex{key: key}
|
||||
if end == nil {
|
||||
item := ti.tree.Get(keyi)
|
||||
if item == nil {
|
||||
|
|
@ -179,6 +198,19 @@ func (ti *treeIndex) Compact(rev int64) map[revision]struct{} {
|
|||
return available
|
||||
}
|
||||
|
||||
// Keep finds all revisions to be kept for a Compaction at the given rev.
|
||||
func (ti *treeIndex) Keep(rev int64) map[revision]struct{} {
|
||||
available := make(map[revision]struct{})
|
||||
ti.RLock()
|
||||
defer ti.RUnlock()
|
||||
ti.tree.Ascend(func(i btree.Item) bool {
|
||||
keyi := i.(*keyIndex)
|
||||
keyi.keep(rev, available)
|
||||
return true
|
||||
})
|
||||
return available
|
||||
}
|
||||
|
||||
func compactIndex(rev int64, available map[revision]struct{}, emptyki *[]*keyIndex) func(i btree.Item) bool {
|
||||
return func(i btree.Item) bool {
|
||||
keyi := i.(*keyIndex)
|
||||
|
|
@ -190,16 +222,16 @@ func compactIndex(rev int64, available map[revision]struct{}, emptyki *[]*keyInd
|
|||
}
|
||||
}
|
||||
|
||||
func (a *treeIndex) Equal(bi index) bool {
|
||||
func (ti *treeIndex) Equal(bi index) bool {
|
||||
b := bi.(*treeIndex)
|
||||
|
||||
if a.tree.Len() != b.tree.Len() {
|
||||
if ti.tree.Len() != b.tree.Len() {
|
||||
return false
|
||||
}
|
||||
|
||||
equal := true
|
||||
|
||||
a.tree.Ascend(func(item btree.Item) bool {
|
||||
ti.tree.Ascend(func(item btree.Item) bool {
|
||||
aki := item.(*keyIndex)
|
||||
bki := b.tree.Get(item).(*keyIndex)
|
||||
if !aki.equal(bki) {
|
||||
|
|
|
|||
|
|
@ -46,7 +46,7 @@ var (
|
|||
// rev except the largest one. If the generation becomes empty
|
||||
// during compaction, it will be removed. if all the generations get
|
||||
// removed, the keyIndex should be removed.
|
||||
|
||||
//
|
||||
// For example:
|
||||
// compact(2) on the previous example
|
||||
// generations:
|
||||
|
|
@ -187,9 +187,44 @@ func (ki *keyIndex) compact(atRev int64, available map[revision]struct{}) {
|
|||
plog.Panicf("store.keyindex: unexpected compact on empty keyIndex %s", string(ki.key))
|
||||
}
|
||||
|
||||
// walk until reaching the first revision that has an revision smaller or equal to
|
||||
// the atRev.
|
||||
// add it to the available map
|
||||
genIdx, revIndex := ki.doCompact(atRev, available)
|
||||
|
||||
g := &ki.generations[genIdx]
|
||||
if !g.isEmpty() {
|
||||
// remove the previous contents.
|
||||
if revIndex != -1 {
|
||||
g.revs = g.revs[revIndex:]
|
||||
}
|
||||
// remove any tombstone
|
||||
if len(g.revs) == 1 && genIdx != len(ki.generations)-1 {
|
||||
delete(available, g.revs[0])
|
||||
genIdx++
|
||||
}
|
||||
}
|
||||
|
||||
// remove the previous generations.
|
||||
ki.generations = ki.generations[genIdx:]
|
||||
}
|
||||
|
||||
// keep finds the revision to be kept if compact is called at given atRev.
|
||||
func (ki *keyIndex) keep(atRev int64, available map[revision]struct{}) {
|
||||
if ki.isEmpty() {
|
||||
return
|
||||
}
|
||||
|
||||
genIdx, revIndex := ki.doCompact(atRev, available)
|
||||
g := &ki.generations[genIdx]
|
||||
if !g.isEmpty() {
|
||||
// remove any tombstone
|
||||
if revIndex == len(g.revs)-1 && genIdx != len(ki.generations)-1 {
|
||||
delete(available, g.revs[revIndex])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ki *keyIndex) doCompact(atRev int64, available map[revision]struct{}) (genIdx int, revIndex int) {
|
||||
// walk until reaching the first revision smaller or equal to "atRev",
|
||||
// and add the revision to the available map
|
||||
f := func(rev revision) bool {
|
||||
if rev.main <= atRev {
|
||||
available[rev] = struct{}{}
|
||||
|
|
@ -198,30 +233,19 @@ func (ki *keyIndex) compact(atRev int64, available map[revision]struct{}) {
|
|||
return true
|
||||
}
|
||||
|
||||
i, g := 0, &ki.generations[0]
|
||||
genIdx, g := 0, &ki.generations[0]
|
||||
// find first generation includes atRev or created after atRev
|
||||
for i < len(ki.generations)-1 {
|
||||
for genIdx < len(ki.generations)-1 {
|
||||
if tomb := g.revs[len(g.revs)-1].main; tomb > atRev {
|
||||
break
|
||||
}
|
||||
i++
|
||||
g = &ki.generations[i]
|
||||
genIdx++
|
||||
g = &ki.generations[genIdx]
|
||||
}
|
||||
|
||||
if !g.isEmpty() {
|
||||
n := g.walk(f)
|
||||
// remove the previous contents.
|
||||
if n != -1 {
|
||||
g.revs = g.revs[n:]
|
||||
}
|
||||
// remove any tombstone
|
||||
if len(g.revs) == 1 && i != len(ki.generations)-1 {
|
||||
delete(available, g.revs[0])
|
||||
i++
|
||||
}
|
||||
}
|
||||
// remove the previous generations.
|
||||
ki.generations = ki.generations[i:]
|
||||
revIndex = g.walk(f)
|
||||
|
||||
return genIdx, revIndex
|
||||
}
|
||||
|
||||
func (ki *keyIndex) isEmpty() bool {
|
||||
|
|
|
|||
|
|
@ -107,10 +107,12 @@ type KV interface {
|
|||
// Write creates a write transaction.
|
||||
Write() TxnWrite
|
||||
|
||||
// Hash retrieves the hash of KV state and revision.
|
||||
// This method is designed for consistency checking purposes.
|
||||
// Hash computes the hash of the KV's backend.
|
||||
Hash() (hash uint32, revision int64, err error)
|
||||
|
||||
// HashByRev computes the hash of all MVCC revisions up to a given revision.
|
||||
HashByRev(rev int64) (hash uint32, revision int64, compactRev int64, err error)
|
||||
|
||||
// Compact frees all superseded keys with revisions less than rev.
|
||||
Compact(rev int64) (<-chan struct{}, error)
|
||||
|
||||
|
|
|
|||
|
|
@ -15,8 +15,10 @@
|
|||
package mvcc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"hash/crc32"
|
||||
"math"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
|
@ -27,7 +29,6 @@ import (
|
|||
"github.com/coreos/etcd/mvcc/mvccpb"
|
||||
"github.com/coreos/etcd/pkg/schedule"
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
var (
|
||||
|
|
@ -160,6 +161,54 @@ func (s *store) Hash() (hash uint32, revision int64, err error) {
|
|||
return h, s.currentRev, err
|
||||
}
|
||||
|
||||
func (s *store) HashByRev(rev int64) (hash uint32, currentRev int64, compactRev int64, err error) {
|
||||
s.mu.RLock()
|
||||
s.revMu.RLock()
|
||||
compactRev, currentRev = s.compactMainRev, s.currentRev
|
||||
s.revMu.RUnlock()
|
||||
|
||||
if rev > 0 && rev <= compactRev {
|
||||
s.mu.RUnlock()
|
||||
return 0, 0, compactRev, ErrCompacted
|
||||
} else if rev > 0 && rev > currentRev {
|
||||
s.mu.RUnlock()
|
||||
return 0, currentRev, 0, ErrFutureRev
|
||||
}
|
||||
|
||||
if rev == 0 {
|
||||
rev = currentRev
|
||||
}
|
||||
keep := s.kvindex.Keep(rev)
|
||||
|
||||
tx := s.b.ReadTx()
|
||||
tx.Lock()
|
||||
defer tx.Unlock()
|
||||
s.mu.RUnlock()
|
||||
|
||||
upper := revision{main: rev + 1}
|
||||
lower := revision{main: compactRev + 1}
|
||||
h := crc32.New(crc32.MakeTable(crc32.Castagnoli))
|
||||
|
||||
h.Write(keyBucketName)
|
||||
err = tx.UnsafeForEach(keyBucketName, func(k, v []byte) error {
|
||||
kr := bytesToRev(k)
|
||||
if !upper.GreaterThan(kr) {
|
||||
return nil
|
||||
}
|
||||
// skip revisions that are scheduled for deletion
|
||||
// due to compacting; don't skip if there isn't one.
|
||||
if lower.GreaterThan(kr) && len(keep) > 0 {
|
||||
if _, ok := keep[kr]; !ok {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
h.Write(k)
|
||||
h.Write(v)
|
||||
return nil
|
||||
})
|
||||
return h.Sum32(), currentRev, compactRev, err
|
||||
}
|
||||
|
||||
func (s *store) Compact(rev int64) (<-chan struct{}, error) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
|
@ -278,6 +327,7 @@ func (s *store) restore() error {
|
|||
}
|
||||
|
||||
// index keys concurrently as they're loaded in from tx
|
||||
keysGauge.Set(0)
|
||||
rkvc, revc := restoreIntoIndex(s.kvindex)
|
||||
for {
|
||||
keys, vals := tx.UnsafeRange(keyBucketName, min, max, int64(restoreChunkKeys))
|
||||
|
|
@ -445,16 +495,3 @@ func appendMarkTombstone(b []byte) []byte {
|
|||
func isTombstone(b []byte) bool {
|
||||
return len(b) == markedRevBytesLen && b[markBytePosition] == markTombstone
|
||||
}
|
||||
|
||||
// revBytesRange returns the range of revision bytes at
|
||||
// the given revision.
|
||||
func revBytesRange(rev revision) (start, end []byte) {
|
||||
start = newRevBytes()
|
||||
revToBytes(rev, start)
|
||||
|
||||
end = newRevBytes()
|
||||
endRev := revision{main: rev.main, sub: rev.sub + 1}
|
||||
revToBytes(endRev, end)
|
||||
|
||||
return start, end
|
||||
}
|
||||
|
|
|
|||
|
|
@ -22,6 +22,8 @@ import (
|
|||
func (s *store) scheduleCompaction(compactMainRev int64, keep map[revision]struct{}) bool {
|
||||
totalStart := time.Now()
|
||||
defer dbCompactionTotalDurations.Observe(float64(time.Since(totalStart) / time.Millisecond))
|
||||
keyCompactions := 0
|
||||
defer func() { dbCompactionKeysCounter.Add(float64(keyCompactions)) }()
|
||||
|
||||
end := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(end, uint64(compactMainRev+1))
|
||||
|
|
@ -40,6 +42,7 @@ func (s *store) scheduleCompaction(compactMainRev int64, keep map[revision]struc
|
|||
rev = bytesToRev(key)
|
||||
if _, ok := keep[rev]; !ok {
|
||||
tx.UnsafeDelete(keyBucketName, key)
|
||||
keyCompactions++
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -120,7 +120,7 @@ func (tr *storeTxnRead) rangeKeys(key, end []byte, curRev int64, ro RangeOptions
|
|||
return &RangeResult{KVs: nil, Count: -1, Rev: 0}, ErrCompacted
|
||||
}
|
||||
|
||||
_, revpairs := tr.s.kvindex.Range(key, end, int64(rev))
|
||||
revpairs := tr.s.kvindex.Revisions(key, end, int64(rev))
|
||||
if len(revpairs) == 0 {
|
||||
return &RangeResult{KVs: nil, Count: 0, Rev: curRev}, nil
|
||||
}
|
||||
|
|
@ -128,22 +128,22 @@ func (tr *storeTxnRead) rangeKeys(key, end []byte, curRev int64, ro RangeOptions
|
|||
return &RangeResult{KVs: nil, Count: len(revpairs), Rev: curRev}, nil
|
||||
}
|
||||
|
||||
var kvs []mvccpb.KeyValue
|
||||
for _, revpair := range revpairs {
|
||||
start, end := revBytesRange(revpair)
|
||||
_, vs := tr.tx.UnsafeRange(keyBucketName, start, end, 0)
|
||||
limit := int(ro.Limit)
|
||||
if limit <= 0 || limit > len(revpairs) {
|
||||
limit = len(revpairs)
|
||||
}
|
||||
|
||||
kvs := make([]mvccpb.KeyValue, limit)
|
||||
revBytes := newRevBytes()
|
||||
for i, revpair := range revpairs[:len(kvs)] {
|
||||
revToBytes(revpair, revBytes)
|
||||
_, vs := tr.tx.UnsafeRange(keyBucketName, revBytes, nil, 0)
|
||||
if len(vs) != 1 {
|
||||
plog.Fatalf("range cannot find rev (%d,%d)", revpair.main, revpair.sub)
|
||||
}
|
||||
|
||||
var kv mvccpb.KeyValue
|
||||
if err := kv.Unmarshal(vs[0]); err != nil {
|
||||
if err := kvs[i].Unmarshal(vs[0]); err != nil {
|
||||
plog.Fatalf("cannot unmarshal event: %v", err)
|
||||
}
|
||||
kvs = append(kvs, kv)
|
||||
if ro.Limit > 0 && len(kvs) >= int(ro.Limit) {
|
||||
break
|
||||
}
|
||||
}
|
||||
return &RangeResult{KVs: kvs, Count: len(revpairs), Rev: curRev}, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -131,6 +131,14 @@ var (
|
|||
Buckets: prometheus.ExponentialBuckets(100, 2, 14),
|
||||
})
|
||||
|
||||
dbCompactionKeysCounter = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "etcd_debugging",
|
||||
Subsystem: "mvcc",
|
||||
Name: "db_compaction_keys_total",
|
||||
Help: "Total number of db keys compacted.",
|
||||
})
|
||||
|
||||
dbTotalSize = prometheus.NewGaugeFunc(prometheus.GaugeOpts{
|
||||
Namespace: "etcd_debugging",
|
||||
Subsystem: "mvcc",
|
||||
|
|
@ -162,6 +170,7 @@ func init() {
|
|||
prometheus.MustRegister(indexCompactionPauseDurations)
|
||||
prometheus.MustRegister(dbCompactionPauseDurations)
|
||||
prometheus.MustRegister(dbCompactionTotalDurations)
|
||||
prometheus.MustRegister(dbCompactionKeysCounter)
|
||||
prometheus.MustRegister(dbTotalSize)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,5 @@
|
|||
// Code generated by protoc-gen-gogo.
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: kv.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package mvccpb is a generated protocol buffer package.
|
||||
|
|
@ -21,6 +20,8 @@ import (
|
|||
|
||||
math "math"
|
||||
|
||||
_ "github.com/gogo/protobuf/gogoproto"
|
||||
|
||||
io "io"
|
||||
)
|
||||
|
||||
|
|
@ -198,24 +199,6 @@ func (m *Event) MarshalTo(dAtA []byte) (int, error) {
|
|||
return i, nil
|
||||
}
|
||||
|
||||
func encodeFixed64Kv(dAtA []byte, offset int, v uint64) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
dAtA[offset+4] = uint8(v >> 32)
|
||||
dAtA[offset+5] = uint8(v >> 40)
|
||||
dAtA[offset+6] = uint8(v >> 48)
|
||||
dAtA[offset+7] = uint8(v >> 56)
|
||||
return offset + 8
|
||||
}
|
||||
func encodeFixed32Kv(dAtA []byte, offset int, v uint32) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
return offset + 4
|
||||
}
|
||||
func encodeVarintKv(dAtA []byte, offset int, v uint64) int {
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
|
|
|
|||
|
|
@ -144,7 +144,6 @@ func (s *watchableStore) watch(key, end []byte, startRev int64, id WatchID, ch c
|
|||
func (s *watchableStore) cancelWatcher(wa *watcher) {
|
||||
for {
|
||||
s.mu.Lock()
|
||||
|
||||
if s.unsynced.delete(wa) {
|
||||
slowWatcherGauge.Dec()
|
||||
break
|
||||
|
|
@ -152,6 +151,9 @@ func (s *watchableStore) cancelWatcher(wa *watcher) {
|
|||
break
|
||||
} else if wa.compacted {
|
||||
break
|
||||
} else if wa.ch == nil {
|
||||
// already canceled (e.g., cancel/close race)
|
||||
break
|
||||
}
|
||||
|
||||
if !wa.victim {
|
||||
|
|
@ -177,9 +179,25 @@ func (s *watchableStore) cancelWatcher(wa *watcher) {
|
|||
}
|
||||
|
||||
watcherGauge.Dec()
|
||||
wa.ch = nil
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
func (s *watchableStore) Restore(b backend.Backend) error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
err := s.store.Restore(b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for wa := range s.synced.watchers {
|
||||
s.unsynced.watchers.add(wa)
|
||||
}
|
||||
s.synced = newWatcherGroup()
|
||||
return nil
|
||||
}
|
||||
|
||||
// syncWatchersLoop syncs the watcher in the unsynced map every 100ms.
|
||||
func (s *watchableStore) syncWatchersLoop() {
|
||||
defer s.wg.Done()
|
||||
|
|
@ -410,7 +428,6 @@ func (s *watchableStore) notify(rev int64, evs []mvccpb.Event) {
|
|||
if eb.revs != 1 {
|
||||
plog.Panicf("unexpected multiple revisions in notification")
|
||||
}
|
||||
|
||||
if w.send(WatchResponse{WatchID: w.id, Events: eb.evs, Revision: rev}) {
|
||||
pendingEventsGauge.Add(float64(len(eb.evs)))
|
||||
} else {
|
||||
|
|
|
|||
|
|
@ -129,16 +129,25 @@ func (ws *watchStream) Chan() <-chan WatchResponse {
|
|||
func (ws *watchStream) Cancel(id WatchID) error {
|
||||
ws.mu.Lock()
|
||||
cancel, ok := ws.cancels[id]
|
||||
w := ws.watchers[id]
|
||||
ok = ok && !ws.closed
|
||||
if ok {
|
||||
delete(ws.cancels, id)
|
||||
delete(ws.watchers, id)
|
||||
}
|
||||
ws.mu.Unlock()
|
||||
|
||||
if !ok {
|
||||
return ErrWatcherNotExist
|
||||
}
|
||||
cancel()
|
||||
|
||||
ws.mu.Lock()
|
||||
// The watch isn't removed until cancel so that if Close() is called,
|
||||
// it will wait for the cancel. Otherwise, Close() could close the
|
||||
// watch channel while the store is still posting events.
|
||||
if ww := ws.watchers[id]; ww == w {
|
||||
delete(ws.cancels, id)
|
||||
delete(ws.watchers, id)
|
||||
}
|
||||
ws.mu.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -92,15 +92,19 @@ func resolveTCPAddrs(ctx context.Context, urls [][]url.URL) ([][]url.URL, error)
|
|||
}
|
||||
|
||||
func resolveURL(ctx context.Context, u url.URL) (string, error) {
|
||||
if u.Scheme == "unix" || u.Scheme == "unixs" {
|
||||
// unix sockets don't resolve over TCP
|
||||
return "", nil
|
||||
}
|
||||
host, _, err := net.SplitHostPort(u.Host)
|
||||
if err != nil {
|
||||
plog.Errorf("could not parse url %s during tcp resolving", u.Host)
|
||||
return "", err
|
||||
}
|
||||
if host == "localhost" || net.ParseIP(host) != nil {
|
||||
return "", nil
|
||||
}
|
||||
for ctx.Err() == nil {
|
||||
host, _, err := net.SplitHostPort(u.Host)
|
||||
if err != nil {
|
||||
plog.Errorf("could not parse url %s during tcp resolving", u.Host)
|
||||
return "", err
|
||||
}
|
||||
if host == "localhost" || net.ParseIP(host) != nil {
|
||||
return "", nil
|
||||
}
|
||||
tcpAddr, err := resolveTCPAddr(ctx, u.Host)
|
||||
if err == nil {
|
||||
plog.Infof("resolving %s to %s", u.Host, tcpAddr.String())
|
||||
|
|
|
|||
|
|
@ -246,5 +246,5 @@ func parsePREFSRC(m *syscall.NetlinkMessage) (host string, oif uint32, err error
|
|||
if oif == 0 {
|
||||
err = errNoDefaultRoute
|
||||
}
|
||||
return
|
||||
return host, oif, err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,31 +0,0 @@
|
|||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package pathutil implements utility functions for handling slash-separated
|
||||
// paths.
|
||||
package pathutil
|
||||
|
||||
import "path"
|
||||
|
||||
// CanonicalURLPath returns the canonical url path for p, which follows the rules:
|
||||
// 1. the path always starts with "/"
|
||||
// 2. replace multiple slashes with a single slash
|
||||
// 3. replace each '.' '..' path name element with equivalent one
|
||||
// 4. keep the trailing slash
|
||||
// The function is borrowed from stdlib http.cleanPath in server.go.
|
||||
func CanonicalURLPath(p string) string {
|
||||
if p == "" {
|
||||
return "/"
|
||||
}
|
||||
if p[0] != '/' {
|
||||
p = "/" + p
|
||||
}
|
||||
np := path.Clean(p)
|
||||
// path.Clean removes trailing slash except for root,
|
||||
// put the trailing slash back if necessary.
|
||||
if p[len(p)-1] == '/' && np != "/" {
|
||||
np += "/"
|
||||
}
|
||||
return np
|
||||
}
|
||||
|
|
@ -127,13 +127,13 @@ func copyMap(m map[string]int) (c map[string]int) {
|
|||
for k, v := range m {
|
||||
c[k] = v
|
||||
}
|
||||
return
|
||||
return c
|
||||
}
|
||||
|
||||
func copyFloats(s []float64) (c []float64) {
|
||||
c = make([]float64, len(s))
|
||||
copy(c, s)
|
||||
return
|
||||
return c
|
||||
}
|
||||
|
||||
func (r *report) String() (s string) {
|
||||
|
|
@ -221,7 +221,7 @@ func percentiles(nums []float64) (data []float64) {
|
|||
j++
|
||||
}
|
||||
}
|
||||
return
|
||||
return data
|
||||
}
|
||||
|
||||
func (r *report) sprintLatencies() string {
|
||||
|
|
|
|||
|
|
@ -118,20 +118,20 @@ func (sp *secondPoints) getTimeSeries() TimeSeries {
|
|||
return tslice
|
||||
}
|
||||
|
||||
func (ts TimeSeries) String() string {
|
||||
func (t TimeSeries) String() string {
|
||||
buf := new(bytes.Buffer)
|
||||
wr := csv.NewWriter(buf)
|
||||
if err := wr.Write([]string{"UNIX-SECOND", "MIN-LATENCY-MS", "AVG-LATENCY-MS", "MAX-LATENCY-MS", "AVG-THROUGHPUT"}); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
rows := [][]string{}
|
||||
for i := range ts {
|
||||
for i := range t {
|
||||
row := []string{
|
||||
fmt.Sprintf("%d", ts[i].Timestamp),
|
||||
ts[i].MinLatency.String(),
|
||||
ts[i].AvgLatency.String(),
|
||||
ts[i].MaxLatency.String(),
|
||||
fmt.Sprintf("%d", ts[i].ThroughPut),
|
||||
fmt.Sprintf("%d", t[i].Timestamp),
|
||||
t[i].MinLatency.String(),
|
||||
t[i].AvgLatency.String(),
|
||||
t[i].MaxLatency.String(),
|
||||
fmt.Sprintf("%d", t[i].ThroughPut),
|
||||
}
|
||||
rows = append(rows, row)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,140 +0,0 @@
|
|||
// Copyright 2015 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package srv looks up DNS SRV records.
|
||||
package srv
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/coreos/etcd/pkg/types"
|
||||
)
|
||||
|
||||
var (
|
||||
// indirection for testing
|
||||
lookupSRV = net.LookupSRV // net.DefaultResolver.LookupSRV when ctxs don't conflict
|
||||
resolveTCPAddr = net.ResolveTCPAddr
|
||||
)
|
||||
|
||||
// GetCluster gets the cluster information via DNS discovery.
|
||||
// Also sees each entry as a separate instance.
|
||||
func GetCluster(service, name, dns string, apurls types.URLs) ([]string, error) {
|
||||
tempName := int(0)
|
||||
tcp2ap := make(map[string]url.URL)
|
||||
|
||||
// First, resolve the apurls
|
||||
for _, url := range apurls {
|
||||
tcpAddr, err := resolveTCPAddr("tcp", url.Host)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tcp2ap[tcpAddr.String()] = url
|
||||
}
|
||||
|
||||
stringParts := []string{}
|
||||
updateNodeMap := func(service, scheme string) error {
|
||||
_, addrs, err := lookupSRV(service, "tcp", dns)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, srv := range addrs {
|
||||
port := fmt.Sprintf("%d", srv.Port)
|
||||
host := net.JoinHostPort(srv.Target, port)
|
||||
tcpAddr, terr := resolveTCPAddr("tcp", host)
|
||||
if terr != nil {
|
||||
err = terr
|
||||
continue
|
||||
}
|
||||
n := ""
|
||||
url, ok := tcp2ap[tcpAddr.String()]
|
||||
if ok {
|
||||
n = name
|
||||
}
|
||||
if n == "" {
|
||||
n = fmt.Sprintf("%d", tempName)
|
||||
tempName++
|
||||
}
|
||||
// SRV records have a trailing dot but URL shouldn't.
|
||||
shortHost := strings.TrimSuffix(srv.Target, ".")
|
||||
urlHost := net.JoinHostPort(shortHost, port)
|
||||
stringParts = append(stringParts, fmt.Sprintf("%s=%s://%s", n, scheme, urlHost))
|
||||
if ok && url.Scheme != scheme {
|
||||
err = fmt.Errorf("bootstrap at %s from DNS for %s has scheme mismatch with expected peer %s", scheme+"://"+urlHost, service, url.String())
|
||||
}
|
||||
}
|
||||
if len(stringParts) == 0 {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
failCount := 0
|
||||
err := updateNodeMap(service+"-ssl", "https")
|
||||
srvErr := make([]string, 2)
|
||||
if err != nil {
|
||||
srvErr[0] = fmt.Sprintf("error querying DNS SRV records for _%s-ssl %s", service, err)
|
||||
failCount++
|
||||
}
|
||||
err = updateNodeMap(service, "http")
|
||||
if err != nil {
|
||||
srvErr[1] = fmt.Sprintf("error querying DNS SRV records for _%s %s", service, err)
|
||||
failCount++
|
||||
}
|
||||
if failCount == 2 {
|
||||
return nil, fmt.Errorf("srv: too many errors querying DNS SRV records (%q, %q)", srvErr[0], srvErr[1])
|
||||
}
|
||||
return stringParts, nil
|
||||
}
|
||||
|
||||
type SRVClients struct {
|
||||
Endpoints []string
|
||||
SRVs []*net.SRV
|
||||
}
|
||||
|
||||
// GetClient looks up the client endpoints for a service and domain.
|
||||
func GetClient(service, domain string) (*SRVClients, error) {
|
||||
var urls []*url.URL
|
||||
var srvs []*net.SRV
|
||||
|
||||
updateURLs := func(service, scheme string) error {
|
||||
_, addrs, err := lookupSRV(service, "tcp", domain)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, srv := range addrs {
|
||||
urls = append(urls, &url.URL{
|
||||
Scheme: scheme,
|
||||
Host: net.JoinHostPort(srv.Target, fmt.Sprintf("%d", srv.Port)),
|
||||
})
|
||||
}
|
||||
srvs = append(srvs, addrs...)
|
||||
return nil
|
||||
}
|
||||
|
||||
errHTTPS := updateURLs(service+"-ssl", "https")
|
||||
errHTTP := updateURLs(service, "http")
|
||||
|
||||
if errHTTPS != nil && errHTTP != nil {
|
||||
return nil, fmt.Errorf("dns lookup errors: %s and %s", errHTTPS, errHTTP)
|
||||
}
|
||||
|
||||
endpoints := make([]string, len(urls))
|
||||
for i := range urls {
|
||||
endpoints[i] = urls[i].String()
|
||||
}
|
||||
return &SRVClients{Endpoints: endpoints, SRVs: srvs}, nil
|
||||
}
|
||||
|
|
@ -61,7 +61,7 @@ func (us *unsafeSet) Remove(value string) {
|
|||
// Contains returns whether the set contains the given value
|
||||
func (us *unsafeSet) Contains(value string) (exists bool) {
|
||||
_, exists = us.d[value]
|
||||
return
|
||||
return exists
|
||||
}
|
||||
|
||||
// ContainsAll returns whether the set contains all given values
|
||||
|
|
@ -94,7 +94,7 @@ func (us *unsafeSet) Values() (values []string) {
|
|||
for val := range us.d {
|
||||
values = append(values, val)
|
||||
}
|
||||
return
|
||||
return values
|
||||
}
|
||||
|
||||
// Copy creates a new Set containing the values of the first
|
||||
|
|
|
|||
|
|
@ -1,56 +0,0 @@
|
|||
// Copyright 2015 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package version implements etcd version parsing and contains latest version
|
||||
// information.
|
||||
package version
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/coreos/go-semver/semver"
|
||||
)
|
||||
|
||||
var (
|
||||
// MinClusterVersion is the min cluster version this etcd binary is compatible with.
|
||||
MinClusterVersion = "3.0.0"
|
||||
Version = "3.2.0-rc.1+git"
|
||||
APIVersion = "unknown"
|
||||
|
||||
// Git SHA Value will be set during build
|
||||
GitSHA = "Not provided (use ./build instead of go build)"
|
||||
)
|
||||
|
||||
func init() {
|
||||
ver, err := semver.NewVersion(Version)
|
||||
if err == nil {
|
||||
APIVersion = fmt.Sprintf("%d.%d", ver.Major, ver.Minor)
|
||||
}
|
||||
}
|
||||
|
||||
type Versions struct {
|
||||
Server string `json:"etcdserver"`
|
||||
Cluster string `json:"etcdcluster"`
|
||||
// TODO: raft state machine version
|
||||
}
|
||||
|
||||
// Cluster only keeps the major.minor.
|
||||
func Cluster(v string) string {
|
||||
vs := strings.Split(v, ".")
|
||||
if len(vs) <= 2 {
|
||||
return v
|
||||
}
|
||||
return fmt.Sprintf("%s.%s", vs[0], vs[1])
|
||||
}
|
||||
|
|
@ -1,202 +0,0 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
|
@ -1,20 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/coreos/go-semver/semver"
|
||||
"os"
|
||||
)
|
||||
|
||||
func main() {
|
||||
vA, err := semver.NewVersion(os.Args[1])
|
||||
if err != nil {
|
||||
fmt.Println(err.Error())
|
||||
}
|
||||
vB, err := semver.NewVersion(os.Args[2])
|
||||
if err != nil {
|
||||
fmt.Println(err.Error())
|
||||
}
|
||||
|
||||
fmt.Printf("%s < %s == %t\n", vA, vB, vA.LessThan(*vB))
|
||||
}
|
||||
|
|
@ -1,268 +0,0 @@
|
|||
// Copyright 2013-2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Semantic Versions http://semver.org
|
||||
package semver
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Version struct {
|
||||
Major int64
|
||||
Minor int64
|
||||
Patch int64
|
||||
PreRelease PreRelease
|
||||
Metadata string
|
||||
}
|
||||
|
||||
type PreRelease string
|
||||
|
||||
func splitOff(input *string, delim string) (val string) {
|
||||
parts := strings.SplitN(*input, delim, 2)
|
||||
|
||||
if len(parts) == 2 {
|
||||
*input = parts[0]
|
||||
val = parts[1]
|
||||
}
|
||||
|
||||
return val
|
||||
}
|
||||
|
||||
func New(version string) *Version {
|
||||
return Must(NewVersion(version))
|
||||
}
|
||||
|
||||
func NewVersion(version string) (*Version, error) {
|
||||
v := Version{}
|
||||
|
||||
if err := v.Set(version); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &v, nil
|
||||
}
|
||||
|
||||
// Must is a helper for wrapping NewVersion and will panic if err is not nil.
|
||||
func Must(v *Version, err error) *Version {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// Set parses and updates v from the given version string. Implements flag.Value
|
||||
func (v *Version) Set(version string) error {
|
||||
metadata := splitOff(&version, "+")
|
||||
preRelease := PreRelease(splitOff(&version, "-"))
|
||||
dotParts := strings.SplitN(version, ".", 3)
|
||||
|
||||
if len(dotParts) != 3 {
|
||||
return fmt.Errorf("%s is not in dotted-tri format", version)
|
||||
}
|
||||
|
||||
parsed := make([]int64, 3, 3)
|
||||
|
||||
for i, v := range dotParts[:3] {
|
||||
val, err := strconv.ParseInt(v, 10, 64)
|
||||
parsed[i] = val
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
v.Metadata = metadata
|
||||
v.PreRelease = preRelease
|
||||
v.Major = parsed[0]
|
||||
v.Minor = parsed[1]
|
||||
v.Patch = parsed[2]
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v Version) String() string {
|
||||
var buffer bytes.Buffer
|
||||
|
||||
fmt.Fprintf(&buffer, "%d.%d.%d", v.Major, v.Minor, v.Patch)
|
||||
|
||||
if v.PreRelease != "" {
|
||||
fmt.Fprintf(&buffer, "-%s", v.PreRelease)
|
||||
}
|
||||
|
||||
if v.Metadata != "" {
|
||||
fmt.Fprintf(&buffer, "+%s", v.Metadata)
|
||||
}
|
||||
|
||||
return buffer.String()
|
||||
}
|
||||
|
||||
func (v *Version) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
var data string
|
||||
if err := unmarshal(&data); err != nil {
|
||||
return err
|
||||
}
|
||||
return v.Set(data)
|
||||
}
|
||||
|
||||
func (v Version) MarshalJSON() ([]byte, error) {
|
||||
return []byte(`"` + v.String() + `"`), nil
|
||||
}
|
||||
|
||||
func (v *Version) UnmarshalJSON(data []byte) error {
|
||||
l := len(data)
|
||||
if l == 0 || string(data) == `""` {
|
||||
return nil
|
||||
}
|
||||
if l < 2 || data[0] != '"' || data[l-1] != '"' {
|
||||
return errors.New("invalid semver string")
|
||||
}
|
||||
return v.Set(string(data[1 : l-1]))
|
||||
}
|
||||
|
||||
// Compare tests if v is less than, equal to, or greater than versionB,
|
||||
// returning -1, 0, or +1 respectively.
|
||||
func (v Version) Compare(versionB Version) int {
|
||||
if cmp := recursiveCompare(v.Slice(), versionB.Slice()); cmp != 0 {
|
||||
return cmp
|
||||
}
|
||||
return preReleaseCompare(v, versionB)
|
||||
}
|
||||
|
||||
// Equal tests if v is equal to versionB.
|
||||
func (v Version) Equal(versionB Version) bool {
|
||||
return v.Compare(versionB) == 0
|
||||
}
|
||||
|
||||
// LessThan tests if v is less than versionB.
|
||||
func (v Version) LessThan(versionB Version) bool {
|
||||
return v.Compare(versionB) < 0
|
||||
}
|
||||
|
||||
// Slice converts the comparable parts of the semver into a slice of integers.
|
||||
func (v Version) Slice() []int64 {
|
||||
return []int64{v.Major, v.Minor, v.Patch}
|
||||
}
|
||||
|
||||
func (p PreRelease) Slice() []string {
|
||||
preRelease := string(p)
|
||||
return strings.Split(preRelease, ".")
|
||||
}
|
||||
|
||||
func preReleaseCompare(versionA Version, versionB Version) int {
|
||||
a := versionA.PreRelease
|
||||
b := versionB.PreRelease
|
||||
|
||||
/* Handle the case where if two versions are otherwise equal it is the
|
||||
* one without a PreRelease that is greater */
|
||||
if len(a) == 0 && (len(b) > 0) {
|
||||
return 1
|
||||
} else if len(b) == 0 && (len(a) > 0) {
|
||||
return -1
|
||||
}
|
||||
|
||||
// If there is a prerelease, check and compare each part.
|
||||
return recursivePreReleaseCompare(a.Slice(), b.Slice())
|
||||
}
|
||||
|
||||
func recursiveCompare(versionA []int64, versionB []int64) int {
|
||||
if len(versionA) == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
a := versionA[0]
|
||||
b := versionB[0]
|
||||
|
||||
if a > b {
|
||||
return 1
|
||||
} else if a < b {
|
||||
return -1
|
||||
}
|
||||
|
||||
return recursiveCompare(versionA[1:], versionB[1:])
|
||||
}
|
||||
|
||||
func recursivePreReleaseCompare(versionA []string, versionB []string) int {
|
||||
// A larger set of pre-release fields has a higher precedence than a smaller set,
|
||||
// if all of the preceding identifiers are equal.
|
||||
if len(versionA) == 0 {
|
||||
if len(versionB) > 0 {
|
||||
return -1
|
||||
}
|
||||
return 0
|
||||
} else if len(versionB) == 0 {
|
||||
// We're longer than versionB so return 1.
|
||||
return 1
|
||||
}
|
||||
|
||||
a := versionA[0]
|
||||
b := versionB[0]
|
||||
|
||||
aInt := false
|
||||
bInt := false
|
||||
|
||||
aI, err := strconv.Atoi(versionA[0])
|
||||
if err == nil {
|
||||
aInt = true
|
||||
}
|
||||
|
||||
bI, err := strconv.Atoi(versionB[0])
|
||||
if err == nil {
|
||||
bInt = true
|
||||
}
|
||||
|
||||
// Handle Integer Comparison
|
||||
if aInt && bInt {
|
||||
if aI > bI {
|
||||
return 1
|
||||
} else if aI < bI {
|
||||
return -1
|
||||
}
|
||||
}
|
||||
|
||||
// Handle String Comparison
|
||||
if a > b {
|
||||
return 1
|
||||
} else if a < b {
|
||||
return -1
|
||||
}
|
||||
|
||||
return recursivePreReleaseCompare(versionA[1:], versionB[1:])
|
||||
}
|
||||
|
||||
// BumpMajor increments the Major field by 1 and resets all other fields to their default values
|
||||
func (v *Version) BumpMajor() {
|
||||
v.Major += 1
|
||||
v.Minor = 0
|
||||
v.Patch = 0
|
||||
v.PreRelease = PreRelease("")
|
||||
v.Metadata = ""
|
||||
}
|
||||
|
||||
// BumpMinor increments the Minor field by 1 and resets all other fields to their default values
|
||||
func (v *Version) BumpMinor() {
|
||||
v.Minor += 1
|
||||
v.Patch = 0
|
||||
v.PreRelease = PreRelease("")
|
||||
v.Metadata = ""
|
||||
}
|
||||
|
||||
// BumpPatch increments the Patch field by 1 and resets all other fields to their default values
|
||||
func (v *Version) BumpPatch() {
|
||||
v.Patch += 1
|
||||
v.PreRelease = PreRelease("")
|
||||
v.Metadata = ""
|
||||
}
|
||||
|
|
@ -95,6 +95,11 @@ func (l *LogLevel) Set(s string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// Returns an empty string, only here to fulfill the pflag.Value interface.
|
||||
func (l *LogLevel) Type() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// ParseLevel translates some potential loglevel strings into their corresponding levels.
|
||||
func ParseLevel(s string) (LogLevel, error) {
|
||||
switch s {
|
||||
|
|
|
|||
|
|
@ -37,6 +37,14 @@ func (p *PackageLogger) internalLog(depth int, inLevel LogLevel, entries ...inte
|
|||
}
|
||||
}
|
||||
|
||||
// SetLevel allows users to change the current logging level.
|
||||
func (p *PackageLogger) SetLevel(l LogLevel) {
|
||||
logger.Lock()
|
||||
defer logger.Unlock()
|
||||
p.level = l
|
||||
}
|
||||
|
||||
// LevelAt checks if the given log level will be outputted under current setting.
|
||||
func (p *PackageLogger) LevelAt(l LogLevel) bool {
|
||||
logger.Lock()
|
||||
defer logger.Unlock()
|
||||
|
|
@ -81,6 +89,12 @@ func (p *PackageLogger) Panic(args ...interface{}) {
|
|||
panic(s)
|
||||
}
|
||||
|
||||
func (p *PackageLogger) Panicln(args ...interface{}) {
|
||||
s := fmt.Sprintln(args...)
|
||||
p.internalLog(calldepth, CRITICAL, s)
|
||||
panic(s)
|
||||
}
|
||||
|
||||
func (p *PackageLogger) Fatalf(format string, args ...interface{}) {
|
||||
p.Logf(CRITICAL, format, args...)
|
||||
os.Exit(1)
|
||||
|
|
|
|||
|
|
@ -113,7 +113,7 @@ func humanateBigBytes(s, base *big.Int, sizes []string) string {
|
|||
//
|
||||
// See also: ParseBigBytes.
|
||||
//
|
||||
// BigBytes(82854982) -> 83MB
|
||||
// BigBytes(82854982) -> 83 MB
|
||||
func BigBytes(s *big.Int) string {
|
||||
sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"}
|
||||
return humanateBigBytes(s, bigSIExp, sizes)
|
||||
|
|
@ -123,7 +123,7 @@ func BigBytes(s *big.Int) string {
|
|||
//
|
||||
// See also: ParseBigBytes.
|
||||
//
|
||||
// BigIBytes(82854982) -> 79MiB
|
||||
// BigIBytes(82854982) -> 79 MiB
|
||||
func BigIBytes(s *big.Int) string {
|
||||
sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"}
|
||||
return humanateBigBytes(s, bigIECExp, sizes)
|
||||
|
|
@ -134,8 +134,8 @@ func BigIBytes(s *big.Int) string {
|
|||
//
|
||||
// See also: BigBytes, BigIBytes.
|
||||
//
|
||||
// ParseBigBytes("42MB") -> 42000000, nil
|
||||
// ParseBigBytes("42mib") -> 44040192, nil
|
||||
// ParseBigBytes("42 MB") -> 42000000, nil
|
||||
// ParseBigBytes("42 mib") -> 44040192, nil
|
||||
func ParseBigBytes(s string) (*big.Int, error) {
|
||||
lastDigit := 0
|
||||
hasComma := false
|
||||
|
|
|
|||
|
|
@ -84,7 +84,7 @@ func humanateBytes(s uint64, base float64, sizes []string) string {
|
|||
//
|
||||
// See also: ParseBytes.
|
||||
//
|
||||
// Bytes(82854982) -> 83MB
|
||||
// Bytes(82854982) -> 83 MB
|
||||
func Bytes(s uint64) string {
|
||||
sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB"}
|
||||
return humanateBytes(s, 1000, sizes)
|
||||
|
|
@ -94,7 +94,7 @@ func Bytes(s uint64) string {
|
|||
//
|
||||
// See also: ParseBytes.
|
||||
//
|
||||
// IBytes(82854982) -> 79MiB
|
||||
// IBytes(82854982) -> 79 MiB
|
||||
func IBytes(s uint64) string {
|
||||
sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB"}
|
||||
return humanateBytes(s, 1024, sizes)
|
||||
|
|
@ -105,8 +105,8 @@ func IBytes(s uint64) string {
|
|||
//
|
||||
// See Also: Bytes, IBytes.
|
||||
//
|
||||
// ParseBytes("42MB") -> 42000000, nil
|
||||
// ParseBytes("42mib") -> 44040192, nil
|
||||
// ParseBytes("42 MB") -> 42000000, nil
|
||||
// ParseBytes("42 mib") -> 44040192, nil
|
||||
func ParseBytes(s string) (uint64, error) {
|
||||
lastDigit := 0
|
||||
hasComma := false
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ package humanize
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"math"
|
||||
"math/big"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
|
@ -13,6 +14,12 @@ import (
|
|||
// e.g. Comma(834142) -> 834,142
|
||||
func Comma(v int64) string {
|
||||
sign := ""
|
||||
|
||||
// Min int64 can't be negated to a usable value, so it has to be special cased.
|
||||
if v == math.MinInt64 {
|
||||
return "-9,223,372,036,854,775,808"
|
||||
}
|
||||
|
||||
if v < 0 {
|
||||
sign = "-"
|
||||
v = 0 - v
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@
|
|||
Package humanize converts boring ugly numbers to human-friendly strings and back.
|
||||
|
||||
Durations can be turned into strings such as "3 days ago", numbers
|
||||
representing sizes like 82854982 into useful strings like, "83MB" or
|
||||
"79MiB" (whichever you prefer).
|
||||
representing sizes like 82854982 into useful strings like, "83 MB" or
|
||||
"79 MiB" (whichever you prefer).
|
||||
*/
|
||||
package humanize
|
||||
|
|
|
|||
|
|
@ -160,7 +160,7 @@ func FormatFloat(format string, n float64) string {
|
|||
intf, fracf := math.Modf(n + renderFloatPrecisionRounders[precision])
|
||||
|
||||
// generate integer part string
|
||||
intStr := strconv.Itoa(int(intf))
|
||||
intStr := strconv.FormatInt(int64(intf), 10)
|
||||
|
||||
// add thousand separator if required
|
||||
if len(thousandStr) > 0 {
|
||||
|
|
|
|||
|
|
@ -68,7 +68,7 @@ func ComputeSI(input float64) (float64, string) {
|
|||
value := mag / math.Pow(10, exponent)
|
||||
|
||||
// Handle special case where value is exactly 1000.0
|
||||
// Should return 1M instead of 1000k
|
||||
// Should return 1 M instead of 1000 k
|
||||
if value == 1000.0 {
|
||||
exponent += 3
|
||||
value = mag / math.Pow(10, exponent)
|
||||
|
|
@ -86,8 +86,8 @@ func ComputeSI(input float64) (float64, string) {
|
|||
//
|
||||
// See also: ComputeSI, ParseSI.
|
||||
//
|
||||
// e.g. SI(1000000, B) -> 1MB
|
||||
// e.g. SI(2.2345e-12, "F") -> 2.2345pF
|
||||
// e.g. SI(1000000, "B") -> 1 MB
|
||||
// e.g. SI(2.2345e-12, "F") -> 2.2345 pF
|
||||
func SI(input float64, unit string) string {
|
||||
value, prefix := ComputeSI(input)
|
||||
return Ftoa(value) + " " + prefix + unit
|
||||
|
|
@ -99,7 +99,7 @@ var errInvalid = errors.New("invalid input")
|
|||
//
|
||||
// See also: SI, ComputeSI.
|
||||
//
|
||||
// e.g. ParseSI(2.2345pF) -> (2.2345e-12, "F", nil)
|
||||
// e.g. ParseSI("2.2345 pF") -> (2.2345e-12, "F", nil)
|
||||
func ParseSI(input string) (float64, string, error) {
|
||||
found := riParseRegex.FindStringSubmatch(input)
|
||||
if len(found) != 4 {
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue