mirror of https://github.com/etcd-io/dbtester.git
*: vendor update, new config (#29)
This commit is contained in:
parent
7d46587b69
commit
0dac8107b0
|
|
@ -29,48 +29,48 @@
|
|||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/auth/authpb",
|
||||
"Comment": "v2.3.0-364-ga8b7d0b",
|
||||
"Rev": "a8b7d0b63c1bcaebc497e479ec258fd499766a4b"
|
||||
"Comment": "v2.3.0-423-gcfbb8a7",
|
||||
"Rev": "cfbb8a71db5b5c38664a5d6406e15bd296b97808"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/client",
|
||||
"Comment": "v2.3.0-364-ga8b7d0b",
|
||||
"Rev": "a8b7d0b63c1bcaebc497e479ec258fd499766a4b"
|
||||
"Comment": "v2.3.0-423-gcfbb8a7",
|
||||
"Rev": "cfbb8a71db5b5c38664a5d6406e15bd296b97808"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/clientv3",
|
||||
"Comment": "v2.3.0-364-ga8b7d0b",
|
||||
"Rev": "a8b7d0b63c1bcaebc497e479ec258fd499766a4b"
|
||||
"Comment": "v2.3.0-423-gcfbb8a7",
|
||||
"Rev": "cfbb8a71db5b5c38664a5d6406e15bd296b97808"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes",
|
||||
"Comment": "v2.3.0-364-ga8b7d0b",
|
||||
"Rev": "a8b7d0b63c1bcaebc497e479ec258fd499766a4b"
|
||||
"Comment": "v2.3.0-423-gcfbb8a7",
|
||||
"Rev": "cfbb8a71db5b5c38664a5d6406e15bd296b97808"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/etcdserver/etcdserverpb",
|
||||
"Comment": "v2.3.0-364-ga8b7d0b",
|
||||
"Rev": "a8b7d0b63c1bcaebc497e479ec258fd499766a4b"
|
||||
"Comment": "v2.3.0-423-gcfbb8a7",
|
||||
"Rev": "cfbb8a71db5b5c38664a5d6406e15bd296b97808"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/pkg/pathutil",
|
||||
"Comment": "v2.3.0-364-ga8b7d0b",
|
||||
"Rev": "a8b7d0b63c1bcaebc497e479ec258fd499766a4b"
|
||||
"Comment": "v2.3.0-423-gcfbb8a7",
|
||||
"Rev": "cfbb8a71db5b5c38664a5d6406e15bd296b97808"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/pkg/tlsutil",
|
||||
"Comment": "v2.3.0-364-ga8b7d0b",
|
||||
"Rev": "a8b7d0b63c1bcaebc497e479ec258fd499766a4b"
|
||||
"Comment": "v2.3.0-423-gcfbb8a7",
|
||||
"Rev": "cfbb8a71db5b5c38664a5d6406e15bd296b97808"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/pkg/types",
|
||||
"Comment": "v2.3.0-364-ga8b7d0b",
|
||||
"Rev": "a8b7d0b63c1bcaebc497e479ec258fd499766a4b"
|
||||
"Comment": "v2.3.0-423-gcfbb8a7",
|
||||
"Rev": "cfbb8a71db5b5c38664a5d6406e15bd296b97808"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/etcd/storage/storagepb",
|
||||
"Comment": "v2.3.0-364-ga8b7d0b",
|
||||
"Rev": "a8b7d0b63c1bcaebc497e479ec258fd499766a4b"
|
||||
"Comment": "v2.3.0-423-gcfbb8a7",
|
||||
"Rev": "cfbb8a71db5b5c38664a5d6406e15bd296b97808"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/dustin/go-humanize",
|
||||
|
|
@ -117,7 +117,7 @@
|
|||
},
|
||||
{
|
||||
"ImportPath": "github.com/golang/protobuf/proto",
|
||||
"Rev": "dda510ac0fd43b39770f22ac6260eb91d377bce3"
|
||||
"Rev": "f0a097ddac24fb00e07d2ac17f8671423f3ea47c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/gonum/floats",
|
||||
|
|
@ -267,47 +267,47 @@
|
|||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/context",
|
||||
"Rev": "e45385e9b226f570b1f086bf287b25d3d4117776"
|
||||
"Rev": "fb93926129b8ec0056f2f458b1f519654814edf0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/context/ctxhttp",
|
||||
"Rev": "e45385e9b226f570b1f086bf287b25d3d4117776"
|
||||
"Rev": "fb93926129b8ec0056f2f458b1f519654814edf0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/http2",
|
||||
"Rev": "e45385e9b226f570b1f086bf287b25d3d4117776"
|
||||
"Rev": "fb93926129b8ec0056f2f458b1f519654814edf0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/http2/hpack",
|
||||
"Rev": "e45385e9b226f570b1f086bf287b25d3d4117776"
|
||||
"Rev": "fb93926129b8ec0056f2f458b1f519654814edf0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/internal/timeseries",
|
||||
"Rev": "e45385e9b226f570b1f086bf287b25d3d4117776"
|
||||
"Rev": "fb93926129b8ec0056f2f458b1f519654814edf0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/trace",
|
||||
"Rev": "e45385e9b226f570b1f086bf287b25d3d4117776"
|
||||
"Rev": "fb93926129b8ec0056f2f458b1f519654814edf0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/oauth2",
|
||||
"Rev": "33fa30fe45020622640e947917fd1fc4c81e3dce"
|
||||
"Rev": "b0e2337fe6ec0c637fa4f123268b972f334504eb"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/oauth2/google",
|
||||
"Rev": "33fa30fe45020622640e947917fd1fc4c81e3dce"
|
||||
"Rev": "b0e2337fe6ec0c637fa4f123268b972f334504eb"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/oauth2/internal",
|
||||
"Rev": "33fa30fe45020622640e947917fd1fc4c81e3dce"
|
||||
"Rev": "b0e2337fe6ec0c637fa4f123268b972f334504eb"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/oauth2/jws",
|
||||
"Rev": "33fa30fe45020622640e947917fd1fc4c81e3dce"
|
||||
"Rev": "b0e2337fe6ec0c637fa4f123268b972f334504eb"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/oauth2/jwt",
|
||||
"Rev": "33fa30fe45020622640e947917fd1fc4c81e3dce"
|
||||
"Rev": "b0e2337fe6ec0c637fa4f123268b972f334504eb"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/sys/unix",
|
||||
|
|
@ -363,67 +363,67 @@
|
|||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/cloud",
|
||||
"Rev": "b1594faf9accbaecc2d63eaa1f5c5cdf7a5ea884"
|
||||
"Rev": "7b5d2677dff6d957e7f87c784b54cfaa03150c68"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/cloud/compute/metadata",
|
||||
"Rev": "b1594faf9accbaecc2d63eaa1f5c5cdf7a5ea884"
|
||||
"Rev": "7b5d2677dff6d957e7f87c784b54cfaa03150c68"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/cloud/internal",
|
||||
"Rev": "b1594faf9accbaecc2d63eaa1f5c5cdf7a5ea884"
|
||||
"Rev": "7b5d2677dff6d957e7f87c784b54cfaa03150c68"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/cloud/internal/opts",
|
||||
"Rev": "b1594faf9accbaecc2d63eaa1f5c5cdf7a5ea884"
|
||||
"Rev": "7b5d2677dff6d957e7f87c784b54cfaa03150c68"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/cloud/internal/transport",
|
||||
"Rev": "b1594faf9accbaecc2d63eaa1f5c5cdf7a5ea884"
|
||||
"Rev": "7b5d2677dff6d957e7f87c784b54cfaa03150c68"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/cloud/storage",
|
||||
"Rev": "b1594faf9accbaecc2d63eaa1f5c5cdf7a5ea884"
|
||||
"Rev": "7b5d2677dff6d957e7f87c784b54cfaa03150c68"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc",
|
||||
"Rev": "7834b974e55fbf85a5b01afb5821391c71084efd"
|
||||
"Rev": "9ac074585f926c8506b6351bfdc396d2b19b1cb1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/codes",
|
||||
"Rev": "7834b974e55fbf85a5b01afb5821391c71084efd"
|
||||
"Rev": "9ac074585f926c8506b6351bfdc396d2b19b1cb1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/credentials",
|
||||
"Rev": "7834b974e55fbf85a5b01afb5821391c71084efd"
|
||||
"Rev": "9ac074585f926c8506b6351bfdc396d2b19b1cb1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/credentials/oauth",
|
||||
"Rev": "7834b974e55fbf85a5b01afb5821391c71084efd"
|
||||
"Rev": "9ac074585f926c8506b6351bfdc396d2b19b1cb1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/grpclog",
|
||||
"Rev": "7834b974e55fbf85a5b01afb5821391c71084efd"
|
||||
"Rev": "9ac074585f926c8506b6351bfdc396d2b19b1cb1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/internal",
|
||||
"Rev": "7834b974e55fbf85a5b01afb5821391c71084efd"
|
||||
"Rev": "9ac074585f926c8506b6351bfdc396d2b19b1cb1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/metadata",
|
||||
"Rev": "7834b974e55fbf85a5b01afb5821391c71084efd"
|
||||
"Rev": "9ac074585f926c8506b6351bfdc396d2b19b1cb1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/naming",
|
||||
"Rev": "7834b974e55fbf85a5b01afb5821391c71084efd"
|
||||
"Rev": "9ac074585f926c8506b6351bfdc396d2b19b1cb1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/peer",
|
||||
"Rev": "7834b974e55fbf85a5b01afb5821391c71084efd"
|
||||
"Rev": "9ac074585f926c8506b6351bfdc396d2b19b1cb1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/transport",
|
||||
"Rev": "7834b974e55fbf85a5b01afb5821391c71084efd"
|
||||
"Rev": "9ac074585f926c8506b6351bfdc396d2b19b1cb1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "gopkg.in/yaml.v2",
|
||||
|
|
|
|||
|
|
@ -0,0 +1,9 @@
|
|||
|
||||
We keep all the raw logs here and cloud storage (if it's too big).
|
||||
|
||||
- If you want to look at the full logs, please let us know.
|
||||
- If you need help with running this testing suite, please let us know.
|
||||
- If you think test results are wrong, please file an issue.
|
||||
|
||||
Thanks!
|
||||
|
||||
|
|
@ -0,0 +1,193 @@
|
|||
|
||||
titles:
|
||||
- Write 500K keys, 1 client, key 32 bytes, value 500 bytes
|
||||
- Write 2M keys, 1000 clients (etcd 100 conns), key 32 bytes, value 500 bytes
|
||||
|
||||
step1:
|
||||
- data_path_list:
|
||||
- bench-2016041401/bench-01-etcdv3-1-monitor.csv
|
||||
- bench-2016041401/bench-01-etcdv3-2-monitor.csv
|
||||
- bench-2016041401/bench-01-etcdv3-3-monitor.csv
|
||||
data_benchmark_path: bench-2016041401/bench-01-etcdv3-timeseries.csv
|
||||
output_path: bench-2016041401/bench-01-etcdv3-aggregated.csv
|
||||
|
||||
- data_path_list:
|
||||
- bench-2016041401/bench-01-zk-snapshot1-1-monitor.csv
|
||||
- bench-2016041401/bench-01-zk-snapshot1-2-monitor.csv
|
||||
- bench-2016041401/bench-01-zk-snapshot1-3-monitor.csv
|
||||
data_benchmark_path: bench-2016041401/bench-01-zk-snapshot1-timeseries.csv
|
||||
output_path: bench-2016041401/bench-01-zk-snapshot1-aggregated.csv
|
||||
|
||||
- data_path_list:
|
||||
- bench-2016041401/bench-01-zk-snapshot2-1-monitor.csv
|
||||
- bench-2016041401/bench-01-zk-snapshot2-2-monitor.csv
|
||||
- bench-2016041401/bench-01-zk-snapshot2-3-monitor.csv
|
||||
data_benchmark_path: bench-2016041401/bench-01-zk-snapshot2-timeseries.csv
|
||||
output_path: bench-2016041401/bench-01-zk-snapshot2-aggregated.csv
|
||||
|
||||
- data_path_list:
|
||||
- bench-2016041401/bench-01-zk-snapshot3-1-monitor.csv
|
||||
- bench-2016041401/bench-01-zk-snapshot3-2-monitor.csv
|
||||
- bench-2016041401/bench-01-zk-snapshot3-3-monitor.csv
|
||||
data_benchmark_path: bench-2016041401/bench-01-zk-snapshot3-timeseries.csv
|
||||
output_path: bench-2016041401/bench-01-zk-snapshot3-aggregated.csv
|
||||
|
||||
- data_path_list:
|
||||
- bench-2016041401/bench-02-etcdv3-1-monitor.csv
|
||||
- bench-2016041401/bench-02-etcdv3-2-monitor.csv
|
||||
- bench-2016041401/bench-02-etcdv3-3-monitor.csv
|
||||
data_benchmark_path: bench-2016041401/bench-02-etcdv3-timeseries.csv
|
||||
output_path: bench-2016041401/bench-02-etcdv3-aggregated.csv
|
||||
|
||||
- data_path_list:
|
||||
- bench-2016041401/bench-02-zk-snapshot1-1-monitor.csv
|
||||
- bench-2016041401/bench-02-zk-snapshot1-2-monitor.csv
|
||||
- bench-2016041401/bench-02-zk-snapshot1-3-monitor.csv
|
||||
data_benchmark_path: bench-2016041401/bench-02-zk-snapshot1-timeseries.csv
|
||||
output_path: bench-2016041401/bench-02-zk-snapshot1-aggregated.csv
|
||||
|
||||
- data_path_list:
|
||||
- bench-2016041401/bench-02-zk-snapshot2-1-monitor.csv
|
||||
- bench-2016041401/bench-02-zk-snapshot2-2-monitor.csv
|
||||
- bench-2016041401/bench-02-zk-snapshot2-3-monitor.csv
|
||||
data_benchmark_path: bench-2016041401/bench-02-zk-snapshot2-timeseries.csv
|
||||
output_path: bench-2016041401/bench-02-zk-snapshot2-aggregated.csv
|
||||
|
||||
- data_path_list:
|
||||
- bench-2016041401/bench-02-zk-snapshot3-1-monitor.csv
|
||||
- bench-2016041401/bench-02-zk-snapshot3-2-monitor.csv
|
||||
- bench-2016041401/bench-02-zk-snapshot3-3-monitor.csv
|
||||
data_benchmark_path: bench-2016041401/bench-02-zk-snapshot3-timeseries.csv
|
||||
output_path: bench-2016041401/bench-02-zk-snapshot3-aggregated.csv
|
||||
|
||||
step2:
|
||||
- data_list:
|
||||
- path: bench-2016041401/bench-01-etcdv3-aggregated.csv
|
||||
name: etcd_v3
|
||||
- path: bench-2016041401/bench-01-zk-snapshot1-aggregated.csv
|
||||
name: zookeeper_v3.4.8_snapshot1
|
||||
- path: bench-2016041401/bench-01-zk-snapshot2-aggregated.csv
|
||||
name: zookeeper_v3.4.8_snapshot2
|
||||
- path: bench-2016041401/bench-01-zk-snapshot3-aggregated.csv
|
||||
name: zookeeper_v3.4.8_snapshot3
|
||||
output_path: bench-2016041401/bench-01-all-aggregated.csv
|
||||
|
||||
- data_list:
|
||||
- path: bench-2016041401/bench-02-etcdv3-aggregated.csv
|
||||
name: etcd_v3
|
||||
- path: bench-2016041401/bench-02-zk-snapshot1-aggregated.csv
|
||||
name: zookeeper_v3.4.8_snapshot1
|
||||
- path: bench-2016041401/bench-02-zk-snapshot2-aggregated.csv
|
||||
name: zookeeper_v3.4.8_snapshot2
|
||||
- path: bench-2016041401/bench-02-zk-snapshot3-aggregated.csv
|
||||
name: zookeeper_v3.4.8_snapshot3
|
||||
output_path: bench-2016041401/bench-02-all-aggregated.csv
|
||||
|
||||
step3:
|
||||
- data_path: bench-2016041401/bench-01-all-aggregated.csv
|
||||
|
||||
plot_list:
|
||||
- lines:
|
||||
- column: avg_latency_ms_etcd_v3
|
||||
legend: etcd v3
|
||||
- column: avg_latency_ms_zookeeper_v3.4.8_snapshot1
|
||||
legend: Zookeeper v3.4.8 (snapCount 10000)
|
||||
- column: avg_latency_ms_zookeeper_v3.4.8_snapshot2
|
||||
legend: Zookeeper v3.4.8 (snapCount 100000)
|
||||
- column: avg_latency_ms_zookeeper_v3.4.8_snapshot3
|
||||
legend: Zookeeper v3.4.8 (snapCount 1000000)
|
||||
x_axis: Second
|
||||
y_axis: Latency(millisecond)
|
||||
output_path_list:
|
||||
- bench-2016041401/bench-01-avg-latency-ms.svg
|
||||
- bench-2016041401/bench-01-avg-latency-ms.png
|
||||
|
||||
- lines:
|
||||
- column: throughput_etcd_v3
|
||||
legend: etcd v3
|
||||
- column: throughput_zookeeper_v3.4.8_snapshot1
|
||||
legend: Zookeeper v3.4.8 (snapCount 10000)
|
||||
- column: throughput_zookeeper_v3.4.8_snapshot2
|
||||
legend: Zookeeper v3.4.8 (snapCount 100000)
|
||||
- column: throughput_zookeeper_v3.4.8_snapshot3
|
||||
legend: Zookeeper v3.4.8 (snapCount 1000000)
|
||||
x_axis: Second
|
||||
y_axis: Throughput
|
||||
output_path_list:
|
||||
- bench-2016041401/bench-01-throughput.svg
|
||||
- bench-2016041401/bench-01-throughput.png
|
||||
|
||||
- lines:
|
||||
- column: avg_cpu_etcd_v3
|
||||
legend: etcd v3
|
||||
- column: avg_cpu_zookeeper_v3.4.8_snapshot1
|
||||
legend: Zookeeper v3.4.8 (snapCount 10000)
|
||||
- column: avg_cpu_zookeeper_v3.4.8_snapshot2
|
||||
legend: Zookeeper v3.4.8 (snapCount 100000)
|
||||
- column: avg_cpu_zookeeper_v3.4.8_snapshot3
|
||||
legend: Zookeeper v3.4.8 (snapCount 1000000)
|
||||
x_axis: Second
|
||||
y_axis: CPU
|
||||
output_path_list:
|
||||
- bench-2016041401/bench-01-avg-cpu.svg
|
||||
- bench-2016041401/bench-01-avg-cpu.png
|
||||
|
||||
- lines:
|
||||
- column: avg_memory_mb_etcd_v3
|
||||
legend: etcd v3
|
||||
- column: avg_memory_mb_zookeeper_v3.4.8_snapshot1
|
||||
legend: Zookeeper v3.4.8 (snapCount 10000)
|
||||
- column: avg_memory_mb_zookeeper_v3.4.8_snapshot2
|
||||
legend: Zookeeper v3.4.8 (snapCount 100000)
|
||||
- column: avg_memory_mb_zookeeper_v3.4.8_snapshot3
|
||||
legend: Zookeeper v3.4.8 (snapCount 1000000)
|
||||
x_axis: Second
|
||||
y_axis: Memory(MB)
|
||||
output_path_list:
|
||||
- bench-2016041401/bench-01-avg-memory.svg
|
||||
- bench-2016041401/bench-01-avg-memory.png
|
||||
|
||||
step4:
|
||||
preface: |
|
||||
- Google Cloud Compute Engine
|
||||
- 8 vCPUs + 16GB Memory + 50GB SSD
|
||||
- 1 machine(client) of 16 vCPUs + 30GB Memory + 50GB SSD
|
||||
- Ubuntu 15.10
|
||||
- Go 1.6
|
||||
- Java 8
|
||||
- Java(TM) SE Runtime Environment (build 1.8.0_74-b02)
|
||||
- Java HotSpot(TM) 64-Bit Server VM (build 25.74-b02, mixed mode)
|
||||
- etcd v3 (master branch)
|
||||
- Zookeeper v3.4.8
|
||||
|
||||
results:
|
||||
- images:
|
||||
- image_title: bench-2016041401/bench-01-avg-latency-ms
|
||||
image_path: https://storage.googleapis.com/bench-2016041401/bench-01-avg-latency-ms.svg
|
||||
image_type: remote
|
||||
- image_title: bench-2016041401/bench-01-throughput
|
||||
image_path: https://storage.googleapis.com/bench-2016041401/bench-01-throughput.svg
|
||||
image_type: remote
|
||||
- image_title: bench-2016041401/bench-01-avg-cpu
|
||||
image_path: https://storage.googleapis.com/bench-2016041401/bench-01-avg-cpu.svg
|
||||
image_type: remote
|
||||
- image_title: bench-2016041401/bench-01-avg-memory
|
||||
image_path: https://storage.googleapis.com/bench-2016041401/bench-01-avg-memory.svg
|
||||
image_type: remote
|
||||
|
||||
- images:
|
||||
- image_title: bench-2016041401/bench-02-avg-latency-ms
|
||||
image_path: https://storage.googleapis.com/bench-2016041401/bench-02-avg-latency-ms.svg
|
||||
image_type: remote
|
||||
- image_title: bench-2016041401/bench-02-throughput
|
||||
image_path: https://storage.googleapis.com/bench-2016041401/bench-02-throughput.svg
|
||||
image_type: remote
|
||||
- image_title: bench-2016041401/bench-02-avg-cpu
|
||||
image_path: https://storage.googleapis.com/bench-2016041401/bench-02-avg-cpu.svg
|
||||
image_type: remote
|
||||
- image_title: bench-2016041401/bench-02-avg-memory
|
||||
image_path: https://storage.googleapis.com/bench-2016041401/bench-02-avg-memory.svg
|
||||
image_type: remote
|
||||
|
||||
output_path: bench-2016041401/README.md
|
||||
|
||||
|
|
@ -0,0 +1,41 @@
|
|||
database: etcdv3
|
||||
test_name: bench-01-etcdv3
|
||||
|
||||
google_cloud_project_name: etcd-development
|
||||
google_cloud_storage_key_path: /home/gyuho/gcloud-key.json
|
||||
google_cloud_storage_bucket_name: bench-2016041401
|
||||
|
||||
peer_ips:
|
||||
- 10.240.0.7
|
||||
- 10.240.0.13
|
||||
- 10.240.0.14
|
||||
|
||||
agent_port: 3500
|
||||
database_port: 2379
|
||||
|
||||
# start database by sending RPC calls to agents
|
||||
step1:
|
||||
skip: false
|
||||
database_log_path: database.log
|
||||
monitor_log_path: monitor.csv
|
||||
|
||||
zookeeper_max_client_connections: 5000
|
||||
zookeeper_snap_count: 100000
|
||||
|
||||
# start benchmark
|
||||
step2:
|
||||
skip: false
|
||||
bench_type: write
|
||||
local_read: true
|
||||
result_path: bench-01-etcdv3-timeseries.csv
|
||||
connections: 1
|
||||
clients: 1
|
||||
key_size: 32
|
||||
value_size: 500
|
||||
total_requests: 500000
|
||||
etcdv3_compaction_cycle: 0
|
||||
|
||||
# after benchmark
|
||||
step3:
|
||||
skip: false
|
||||
|
||||
|
|
@ -0,0 +1,41 @@
|
|||
database: zk
|
||||
test_name: bench-01-zk-snapshot1
|
||||
|
||||
google_cloud_project_name: etcd-development
|
||||
google_cloud_storage_key_path: /home/gyuho/gcloud-key.json
|
||||
google_cloud_storage_bucket_name: bench-2016041401
|
||||
|
||||
peer_ips:
|
||||
- 10.240.0.7
|
||||
- 10.240.0.13
|
||||
- 10.240.0.14
|
||||
|
||||
agent_port: 3500
|
||||
database_port: 2181
|
||||
|
||||
# start database by sending RPC calls to agents
|
||||
step1:
|
||||
skip: false
|
||||
database_log_path: database.log
|
||||
monitor_log_path: monitor.csv
|
||||
|
||||
zookeeper_max_client_connections: 5000
|
||||
zookeeper_snap_count: 10000
|
||||
|
||||
# start benchmark
|
||||
step2:
|
||||
skip: false
|
||||
bench_type: write
|
||||
local_read: true
|
||||
result_path: bench-01-zk-snapshot1-timeseries.csv
|
||||
connections: 1
|
||||
clients: 1
|
||||
key_size: 32
|
||||
value_size: 500
|
||||
total_requests: 500000
|
||||
etcdv3_compaction_cycle: 0
|
||||
|
||||
# after benchmark
|
||||
step3:
|
||||
skip: false
|
||||
|
||||
|
|
@ -0,0 +1,41 @@
|
|||
database: zk
|
||||
test_name: bench-01-zk-snapshot2
|
||||
|
||||
google_cloud_project_name: etcd-development
|
||||
google_cloud_storage_key_path: /home/gyuho/gcloud-key.json
|
||||
google_cloud_storage_bucket_name: bench-2016041401
|
||||
|
||||
peer_ips:
|
||||
- 10.240.0.7
|
||||
- 10.240.0.13
|
||||
- 10.240.0.14
|
||||
|
||||
agent_port: 3500
|
||||
database_port: 2181
|
||||
|
||||
# start database by sending RPC calls to agents
|
||||
step1:
|
||||
skip: false
|
||||
database_log_path: database.log
|
||||
monitor_log_path: monitor.csv
|
||||
|
||||
zookeeper_max_client_connections: 5000
|
||||
zookeeper_snap_count: 100000
|
||||
|
||||
# start benchmark
|
||||
step2:
|
||||
skip: false
|
||||
bench_type: write
|
||||
local_read: true
|
||||
result_path: bench-01-zk-snapshot2-timeseries.csv
|
||||
connections: 1
|
||||
clients: 1
|
||||
key_size: 32
|
||||
value_size: 500
|
||||
total_requests: 500000
|
||||
etcdv3_compaction_cycle: 0
|
||||
|
||||
# after benchmark
|
||||
step3:
|
||||
skip: false
|
||||
|
||||
|
|
@ -0,0 +1,41 @@
|
|||
database: zk
|
||||
test_name: bench-01-zk-snapshot3
|
||||
|
||||
google_cloud_project_name: etcd-development
|
||||
google_cloud_storage_key_path: /home/gyuho/gcloud-key.json
|
||||
google_cloud_storage_bucket_name: bench-2016041401
|
||||
|
||||
peer_ips:
|
||||
- 10.240.0.7
|
||||
- 10.240.0.13
|
||||
- 10.240.0.14
|
||||
|
||||
agent_port: 3500
|
||||
database_port: 2181
|
||||
|
||||
# start database by sending RPC calls to agents
|
||||
step1:
|
||||
skip: false
|
||||
database_log_path: database.log
|
||||
monitor_log_path: monitor.csv
|
||||
|
||||
zookeeper_max_client_connections: 5000
|
||||
zookeeper_snap_count: 1000000
|
||||
|
||||
# start benchmark
|
||||
step2:
|
||||
skip: false
|
||||
bench_type: write
|
||||
local_read: true
|
||||
result_path: bench-01-zk-snapshot3-timeseries.csv
|
||||
connections: 1
|
||||
clients: 1
|
||||
key_size: 32
|
||||
value_size: 500
|
||||
total_requests: 500000
|
||||
etcdv3_compaction_cycle: 0
|
||||
|
||||
# after benchmark
|
||||
step3:
|
||||
skip: false
|
||||
|
||||
|
|
@ -0,0 +1,41 @@
|
|||
database: etcdv3
|
||||
test_name: bench-02-etcdv3
|
||||
|
||||
google_cloud_project_name: etcd-development
|
||||
google_cloud_storage_key_path: /home/gyuho/gcloud-key.json
|
||||
google_cloud_storage_bucket_name: bench-2016041401
|
||||
|
||||
peer_ips:
|
||||
- 10.240.0.7
|
||||
- 10.240.0.13
|
||||
- 10.240.0.14
|
||||
|
||||
agent_port: 3500
|
||||
database_port: 2379
|
||||
|
||||
# start database by sending RPC calls to agents
|
||||
step1:
|
||||
skip: false
|
||||
database_log_path: database.log
|
||||
monitor_log_path: monitor.csv
|
||||
|
||||
zookeeper_max_client_connections: 5000
|
||||
zookeeper_snap_count: 100000
|
||||
|
||||
# start benchmark
|
||||
step2:
|
||||
skip: false
|
||||
bench_type: write
|
||||
local_read: true
|
||||
result_path: bench-02-etcdv3-timeseries.csv
|
||||
connections: 100
|
||||
clients: 1000
|
||||
key_size: 32
|
||||
value_size: 500
|
||||
total_requests: 2000000
|
||||
etcdv3_compaction_cycle: 0
|
||||
|
||||
# after benchmark
|
||||
step3:
|
||||
skip: false
|
||||
|
||||
|
|
@ -0,0 +1,41 @@
|
|||
database: zk
|
||||
test_name: bench-02-zk-snapshot1
|
||||
|
||||
google_cloud_project_name: etcd-development
|
||||
google_cloud_storage_key_path: /home/gyuho/gcloud-key.json
|
||||
google_cloud_storage_bucket_name: bench-2016041401
|
||||
|
||||
peer_ips:
|
||||
- 10.240.0.7
|
||||
- 10.240.0.13
|
||||
- 10.240.0.14
|
||||
|
||||
agent_port: 3500
|
||||
database_port: 2181
|
||||
|
||||
# start database by sending RPC calls to agents
|
||||
step1:
|
||||
skip: false
|
||||
database_log_path: database.log
|
||||
monitor_log_path: monitor.csv
|
||||
|
||||
zookeeper_max_client_connections: 5000
|
||||
zookeeper_snap_count: 10000
|
||||
|
||||
# start benchmark
|
||||
step2:
|
||||
skip: false
|
||||
bench_type: write
|
||||
local_read: true
|
||||
result_path: bench-02-zk-snapshot1-timeseries.csv
|
||||
connections: 1000
|
||||
clients: 1000
|
||||
key_size: 32
|
||||
value_size: 500
|
||||
total_requests: 2000000
|
||||
etcdv3_compaction_cycle: 0
|
||||
|
||||
# after benchmark
|
||||
step3:
|
||||
skip: false
|
||||
|
||||
|
|
@ -0,0 +1,41 @@
|
|||
database: zk
|
||||
test_name: bench-02-zk-snapshot2
|
||||
|
||||
google_cloud_project_name: etcd-development
|
||||
google_cloud_storage_key_path: /home/gyuho/gcloud-key.json
|
||||
google_cloud_storage_bucket_name: bench-2016041401
|
||||
|
||||
peer_ips:
|
||||
- 10.240.0.7
|
||||
- 10.240.0.13
|
||||
- 10.240.0.14
|
||||
|
||||
agent_port: 3500
|
||||
database_port: 2181
|
||||
|
||||
# start database by sending RPC calls to agents
|
||||
step1:
|
||||
skip: false
|
||||
database_log_path: database.log
|
||||
monitor_log_path: monitor.csv
|
||||
|
||||
zookeeper_max_client_connections: 5000
|
||||
zookeeper_snap_count: 100000
|
||||
|
||||
# start benchmark
|
||||
step2:
|
||||
skip: false
|
||||
bench_type: write
|
||||
local_read: true
|
||||
result_path: bench-02-zk-snapshot2-timeseries.csv
|
||||
connections: 1000
|
||||
clients: 1000
|
||||
key_size: 32
|
||||
value_size: 500
|
||||
total_requests: 2000000
|
||||
etcdv3_compaction_cycle: 0
|
||||
|
||||
# after benchmark
|
||||
step3:
|
||||
skip: false
|
||||
|
||||
|
|
@ -0,0 +1,41 @@
|
|||
database: zk
|
||||
test_name: bench-02-zk-snapshot3
|
||||
|
||||
google_cloud_project_name: etcd-development
|
||||
google_cloud_storage_key_path: /home/gyuho/gcloud-key.json
|
||||
google_cloud_storage_bucket_name: bench-2016041401
|
||||
|
||||
peer_ips:
|
||||
- 10.240.0.7
|
||||
- 10.240.0.13
|
||||
- 10.240.0.14
|
||||
|
||||
agent_port: 3500
|
||||
database_port: 2181
|
||||
|
||||
# start database by sending RPC calls to agents
|
||||
step1:
|
||||
skip: false
|
||||
database_log_path: database.log
|
||||
monitor_log_path: monitor.csv
|
||||
|
||||
zookeeper_max_client_connections: 5000
|
||||
zookeeper_snap_count: 1000000
|
||||
|
||||
# start benchmark
|
||||
step2:
|
||||
skip: false
|
||||
bench_type: write
|
||||
local_read: true
|
||||
result_path: bench-02-zk-snapshot3-timeseries.csv
|
||||
connections: 1000
|
||||
clients: 1000
|
||||
key_size: 32
|
||||
value_size: 500
|
||||
total_requests: 2000000
|
||||
etcdv3_compaction_cycle: 0
|
||||
|
||||
# after benchmark
|
||||
step3:
|
||||
skip: false
|
||||
|
||||
|
|
@ -19,9 +19,9 @@ import (
|
|||
"fmt"
|
||||
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
)
|
||||
|
||||
import math "math"
|
||||
math "math"
|
||||
)
|
||||
|
||||
import io "io"
|
||||
|
||||
|
|
|
|||
|
|
@ -36,6 +36,12 @@ type User struct {
|
|||
Revoke []string `json:"revoke,omitempty"`
|
||||
}
|
||||
|
||||
// userListEntry is the user representation given by the server for ListUsers
|
||||
type userListEntry struct {
|
||||
User string `json:"user"`
|
||||
Roles []Role `json:"roles"`
|
||||
}
|
||||
|
||||
type UserRoles struct {
|
||||
User string `json:"user"`
|
||||
Roles []Role `json:"roles"`
|
||||
|
|
@ -194,7 +200,7 @@ func (u *httpAuthUserAPI) ListUsers(ctx context.Context) ([]string, error) {
|
|||
}
|
||||
|
||||
var userList struct {
|
||||
Users []User `json:"users"`
|
||||
Users []userListEntry `json:"users"`
|
||||
}
|
||||
|
||||
if err = json.Unmarshal(body, &userList); err != nil {
|
||||
|
|
|
|||
|
|
@ -34,9 +34,6 @@ type Cluster interface {
|
|||
// MemberList lists the current cluster membership.
|
||||
MemberList(ctx context.Context) (*MemberListResponse, error)
|
||||
|
||||
// MemberLeader returns the current leader member.
|
||||
MemberLeader(ctx context.Context) (*Member, error)
|
||||
|
||||
// MemberAdd adds a new member into the cluster.
|
||||
MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error)
|
||||
|
||||
|
|
@ -135,19 +132,6 @@ func (c *cluster) MemberList(ctx context.Context) (*MemberListResponse, error) {
|
|||
}
|
||||
}
|
||||
|
||||
func (c *cluster) MemberLeader(ctx context.Context) (*Member, error) {
|
||||
resp, err := c.MemberList(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, m := range resp.Members {
|
||||
if m.IsLeader {
|
||||
return (*Member)(m), nil
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (c *cluster) getRemote() pb.ClusterClient {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
|
|
|||
|
|
@ -46,7 +46,7 @@ type Maintenance interface {
|
|||
// times with different endpoints.
|
||||
Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error)
|
||||
|
||||
// Status gets the status of the member.
|
||||
// Status gets the status of the endpoint.
|
||||
Status(ctx context.Context, endpoint string) (*StatusResponse, error)
|
||||
|
||||
// Snapshot provides a reader for a snapshot of a backend.
|
||||
|
|
|
|||
|
|
@ -94,9 +94,9 @@ import (
|
|||
"fmt"
|
||||
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
)
|
||||
|
||||
import math "math"
|
||||
math "math"
|
||||
)
|
||||
|
||||
import io "io"
|
||||
|
||||
|
|
|
|||
|
|
@ -8,9 +8,9 @@ import (
|
|||
"fmt"
|
||||
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
)
|
||||
|
||||
import math "math"
|
||||
math "math"
|
||||
)
|
||||
|
||||
import io "io"
|
||||
|
||||
|
|
|
|||
|
|
@ -8,20 +8,21 @@ import (
|
|||
"fmt"
|
||||
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
|
||||
math "math"
|
||||
|
||||
authpb "github.com/coreos/etcd/auth/authpb"
|
||||
|
||||
io "io"
|
||||
)
|
||||
|
||||
import math "math"
|
||||
|
||||
import storagepb "github.com/coreos/etcd/storage/storagepb"
|
||||
import authpb "github.com/coreos/etcd/auth/authpb"
|
||||
|
||||
import (
|
||||
context "golang.org/x/net/context"
|
||||
grpc "google.golang.org/grpc"
|
||||
)
|
||||
|
||||
import io "io"
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
|
|
@ -1094,11 +1095,10 @@ type Member struct {
|
|||
ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
|
||||
// If the member is not started, name will be an empty string.
|
||||
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
|
||||
IsLeader bool `protobuf:"varint,3,opt,name=IsLeader,proto3" json:"IsLeader,omitempty"`
|
||||
PeerURLs []string `protobuf:"bytes,4,rep,name=peerURLs" json:"peerURLs,omitempty"`
|
||||
PeerURLs []string `protobuf:"bytes,3,rep,name=peerURLs" json:"peerURLs,omitempty"`
|
||||
// If the member is not started, client_URLs will be an zero length
|
||||
// string array.
|
||||
ClientURLs []string `protobuf:"bytes,5,rep,name=clientURLs" json:"clientURLs,omitempty"`
|
||||
ClientURLs []string `protobuf:"bytes,4,rep,name=clientURLs" json:"clientURLs,omitempty"`
|
||||
}
|
||||
|
||||
func (m *Member) Reset() { *m = Member{} }
|
||||
|
|
@ -4138,19 +4138,9 @@ func (m *Member) MarshalTo(data []byte) (int, error) {
|
|||
i = encodeVarintRpc(data, i, uint64(len(m.Name)))
|
||||
i += copy(data[i:], m.Name)
|
||||
}
|
||||
if m.IsLeader {
|
||||
data[i] = 0x18
|
||||
i++
|
||||
if m.IsLeader {
|
||||
data[i] = 1
|
||||
} else {
|
||||
data[i] = 0
|
||||
}
|
||||
i++
|
||||
}
|
||||
if len(m.PeerURLs) > 0 {
|
||||
for _, s := range m.PeerURLs {
|
||||
data[i] = 0x22
|
||||
data[i] = 0x1a
|
||||
i++
|
||||
l = len(s)
|
||||
for l >= 1<<7 {
|
||||
|
|
@ -4165,7 +4155,7 @@ func (m *Member) MarshalTo(data []byte) (int, error) {
|
|||
}
|
||||
if len(m.ClientURLs) > 0 {
|
||||
for _, s := range m.ClientURLs {
|
||||
data[i] = 0x2a
|
||||
data[i] = 0x22
|
||||
i++
|
||||
l = len(s)
|
||||
for l >= 1<<7 {
|
||||
|
|
@ -5916,9 +5906,6 @@ func (m *Member) Size() (n int) {
|
|||
if l > 0 {
|
||||
n += 1 + l + sovRpc(uint64(l))
|
||||
}
|
||||
if m.IsLeader {
|
||||
n += 2
|
||||
}
|
||||
if len(m.PeerURLs) > 0 {
|
||||
for _, s := range m.PeerURLs {
|
||||
l = len(s)
|
||||
|
|
@ -9759,26 +9746,6 @@ func (m *Member) Unmarshal(data []byte) error {
|
|||
m.Name = string(data[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 3:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field IsLeader", wireType)
|
||||
}
|
||||
var v int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowRpc
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
iNdEx++
|
||||
v |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
m.IsLeader = bool(v != 0)
|
||||
case 4:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field PeerURLs", wireType)
|
||||
}
|
||||
|
|
@ -9807,7 +9774,7 @@ func (m *Member) Unmarshal(data []byte) error {
|
|||
}
|
||||
m.PeerURLs = append(m.PeerURLs, string(data[iNdEx:postIndex]))
|
||||
iNdEx = postIndex
|
||||
case 5:
|
||||
case 4:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field ClientURLs", wireType)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -290,10 +290,8 @@ message TxnResponse {
|
|||
repeated ResponseUnion responses = 3;
|
||||
}
|
||||
|
||||
// Compaction compacts the kv store upto the given revision (including).
|
||||
// It removes the old versions of a key. It keeps the newest version of
|
||||
// the key even if its latest modification revision is smaller than the given
|
||||
// revision.
|
||||
// Compaction compacts the kv store upto a given revision. All superseded keys
|
||||
// with a revision less than the compaction revision will be removed.
|
||||
message CompactionRequest {
|
||||
int64 revision = 1;
|
||||
// physical is set so the RPC will wait until the compaction is physically
|
||||
|
|
@ -417,11 +415,10 @@ message Member {
|
|||
uint64 ID = 1;
|
||||
// If the member is not started, name will be an empty string.
|
||||
string name = 2;
|
||||
bool IsLeader = 3;
|
||||
repeated string peerURLs = 4;
|
||||
repeated string peerURLs = 3;
|
||||
// If the member is not started, client_URLs will be an zero length
|
||||
// string array.
|
||||
repeated string clientURLs = 5;
|
||||
repeated string clientURLs = 4;
|
||||
}
|
||||
|
||||
message MemberAddRequest {
|
||||
|
|
@ -459,7 +456,6 @@ message MemberListResponse {
|
|||
}
|
||||
|
||||
message DefragmentRequest {
|
||||
|
||||
}
|
||||
|
||||
message DefragmentResponse {
|
||||
|
|
|
|||
|
|
@ -18,9 +18,9 @@ import (
|
|||
"fmt"
|
||||
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
)
|
||||
|
||||
import math "math"
|
||||
math "math"
|
||||
)
|
||||
|
||||
import io "io"
|
||||
|
||||
|
|
|
|||
|
|
@ -663,7 +663,8 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
|
|||
return err
|
||||
}
|
||||
reqFieldErr = err
|
||||
} else if props.Required {
|
||||
}
|
||||
if props.Required {
|
||||
reqCount--
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -43,7 +43,7 @@ type HeaderField struct {
|
|||
|
||||
// IsPseudo reports whether the header field is an http2 pseudo header.
|
||||
// That is, it reports whether it starts with a colon.
|
||||
// It is not otherwise guaranteed to be a valid psuedo header field,
|
||||
// It is not otherwise guaranteed to be a valid pseudo header field,
|
||||
// though.
|
||||
func (hf HeaderField) IsPseudo() bool {
|
||||
return len(hf.Name) != 0 && hf.Name[0] == ':'
|
||||
|
|
|
|||
|
|
@ -322,7 +322,7 @@ func mustUint31(v int32) uint32 {
|
|||
}
|
||||
|
||||
// bodyAllowedForStatus reports whether a given response status code
|
||||
// permits a body. See RFC2616, section 4.4.
|
||||
// permits a body. See RFC 2616, section 4.4.
|
||||
func bodyAllowedForStatus(status int) bool {
|
||||
switch {
|
||||
case status >= 100 && status <= 199:
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@
|
|||
import "google.golang.org/cloud"
|
||||
```
|
||||
|
||||
**NOTE:** These packages are experimental, and may occasionally make
|
||||
**NOTE:** These packages are under development, and may occasionally make
|
||||
backwards-incompatible changes.
|
||||
|
||||
**NOTE:** Github repo is a mirror of [https://code.googlesource.com/gocloud](https://code.googlesource.com/gocloud).
|
||||
|
|
@ -16,8 +16,8 @@ Go packages for Google Cloud Platform services. Supported APIs are:
|
|||
|
||||
Google API | Status | Package
|
||||
-------------------------------|--------------|-----------------------------------------------------------
|
||||
[Datastore][cloud-datastore] | experimental | [`google.golang.org/cloud/datastore`][cloud-datastore-ref]
|
||||
[Cloud Storage][cloud-storage] | experimental | [`google.golang.org/cloud/storage`][cloud-storage-ref]
|
||||
[Datastore][cloud-datastore] | beta | [`google.golang.org/cloud/datastore`][cloud-datastore-ref]
|
||||
[Storage][cloud-storage] | beta | [`google.golang.org/cloud/storage`][cloud-storage-ref]
|
||||
[Pub/Sub][cloud-pubsub] | experimental | [`google.golang.org/cloud/pubsub`][cloud-pubsub-ref]
|
||||
[BigTable][cloud-bigtable] | stable | [`google.golang.org/cloud/bigtable`][cloud-bigtable-ref]
|
||||
[BigQuery][cloud-bigquery] | experimental | [`google.golang.org/cloud/bigquery`][cloud-bigquery-ref]
|
||||
|
|
@ -46,7 +46,7 @@ application to run in many environments without requiring explicit configuration
|
|||
Manually-configured authorization can be achieved using the
|
||||
[`golang.org/x/oauth2`](https://godoc.org/golang.org/x/oauth2) package to
|
||||
create an `oauth2.TokenSource`. This token source can be passed to the `NewClient`
|
||||
function for the relevant API using a
|
||||
function for the relevant API using a
|
||||
[`cloud.WithTokenSource`](https://godoc.org/google.golang.org/cloud#WithTokenSource)
|
||||
option.
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,68 @@
|
|||
package grpc
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"time"
|
||||
)
|
||||
|
||||
// DefaultBackoffConfig uses values specified for backoff in
|
||||
// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
|
||||
var (
|
||||
DefaultBackoffConfig = &BackoffConfig{
|
||||
MaxDelay: 120 * time.Second,
|
||||
baseDelay: 1.0 * time.Second,
|
||||
factor: 1.6,
|
||||
jitter: 0.2,
|
||||
}
|
||||
)
|
||||
|
||||
// backoffStrategy defines the methodology for backing off after a grpc
|
||||
// connection failure.
|
||||
//
|
||||
// This is unexported until the GRPC project decides whether or not to allow
|
||||
// alternative backoff strategies. Once a decision is made, this type and its
|
||||
// method may be exported.
|
||||
type backoffStrategy interface {
|
||||
// backoff returns the amount of time to wait before the next retry given
|
||||
// the number of consecutive failures.
|
||||
backoff(retries int) time.Duration
|
||||
}
|
||||
|
||||
// BackoffConfig defines the parameters for the default GRPC backoff strategy.
|
||||
type BackoffConfig struct {
|
||||
// MaxDelay is the upper bound of backoff delay.
|
||||
MaxDelay time.Duration
|
||||
|
||||
// TODO(stevvooe): The following fields are not exported, as allowing changes
|
||||
|
||||
// baseDelay is the amount of time to wait before retrying after the first
|
||||
// failure.
|
||||
baseDelay time.Duration
|
||||
|
||||
// factor is applied to the backoff after each retry.
|
||||
factor float64
|
||||
|
||||
// jitter provides a range to randomize backoff delays.
|
||||
jitter float64
|
||||
}
|
||||
|
||||
func (bc *BackoffConfig) backoff(retries int) (t time.Duration) {
|
||||
if retries == 0 {
|
||||
return bc.baseDelay
|
||||
}
|
||||
backoff, max := float64(bc.baseDelay), float64(bc.MaxDelay)
|
||||
for backoff < max && retries > 0 {
|
||||
backoff *= bc.factor
|
||||
retries--
|
||||
}
|
||||
if backoff > max {
|
||||
backoff = max
|
||||
}
|
||||
// Randomize backoff delays so that if a cluster of requests start at
|
||||
// the same time, they won't operate in lockstep.
|
||||
backoff *= 1 + bc.jitter*(rand.Float64()*2-1)
|
||||
if backoff < 0 {
|
||||
return 0
|
||||
}
|
||||
return time.Duration(backoff)
|
||||
}
|
||||
|
|
@ -75,6 +75,7 @@ type dialOptions struct {
|
|||
codec Codec
|
||||
cp Compressor
|
||||
dc Decompressor
|
||||
bs backoffStrategy
|
||||
picker Picker
|
||||
block bool
|
||||
insecure bool
|
||||
|
|
@ -114,6 +115,22 @@ func WithPicker(p Picker) DialOption {
|
|||
}
|
||||
}
|
||||
|
||||
// WithBackoffConfig configures the dialer to use the provided backoff
|
||||
// parameters after connection failures.
|
||||
func WithBackoffConfig(b *BackoffConfig) DialOption {
|
||||
return withBackoff(b)
|
||||
}
|
||||
|
||||
// withBackoff sets the backoff strategy used for retries after a
|
||||
// failed connection attempt.
|
||||
//
|
||||
// This can be exported if arbitrary backoff strategies are allowed by GRPC.
|
||||
func withBackoff(bs backoffStrategy) DialOption {
|
||||
return func(o *dialOptions) {
|
||||
o.bs = bs
|
||||
}
|
||||
}
|
||||
|
||||
// WithBlock returns a DialOption which makes caller of Dial blocks until the underlying
|
||||
// connection is up. Without this, Dial returns immediately and connecting the server
|
||||
// happens in background.
|
||||
|
|
@ -180,6 +197,11 @@ func Dial(target string, opts ...DialOption) (*ClientConn, error) {
|
|||
// Set the default codec.
|
||||
cc.dopts.codec = protoCodec{}
|
||||
}
|
||||
|
||||
if cc.dopts.bs == nil {
|
||||
cc.dopts.bs = DefaultBackoffConfig
|
||||
}
|
||||
|
||||
if cc.dopts.picker == nil {
|
||||
cc.dopts.picker = &unicastPicker{
|
||||
target: target,
|
||||
|
|
@ -415,7 +437,7 @@ func (cc *Conn) resetTransport(closeTransport bool) error {
|
|||
return ErrClientConnTimeout
|
||||
}
|
||||
}
|
||||
sleepTime := backoff(retries)
|
||||
sleepTime := cc.dopts.bs.backoff(retries)
|
||||
timeout := sleepTime
|
||||
if timeout < minConnectTimeout {
|
||||
timeout = minConnectTimeout
|
||||
|
|
|
|||
|
|
@ -41,9 +41,7 @@ import (
|
|||
"io"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"math/rand"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"golang.org/x/net/context"
|
||||
|
|
@ -411,38 +409,6 @@ func convertCode(err error) codes.Code {
|
|||
return codes.Unknown
|
||||
}
|
||||
|
||||
const (
|
||||
// how long to wait after the first failure before retrying
|
||||
baseDelay = 1.0 * time.Second
|
||||
// upper bound of backoff delay
|
||||
maxDelay = 120 * time.Second
|
||||
// backoff increases by this factor on each retry
|
||||
backoffFactor = 1.6
|
||||
// backoff is randomized downwards by this factor
|
||||
backoffJitter = 0.2
|
||||
)
|
||||
|
||||
func backoff(retries int) (t time.Duration) {
|
||||
if retries == 0 {
|
||||
return baseDelay
|
||||
}
|
||||
backoff, max := float64(baseDelay), float64(maxDelay)
|
||||
for backoff < max && retries > 0 {
|
||||
backoff *= backoffFactor
|
||||
retries--
|
||||
}
|
||||
if backoff > max {
|
||||
backoff = max
|
||||
}
|
||||
// Randomize backoff delays so that if a cluster of requests start at
|
||||
// the same time, they won't operate in lockstep.
|
||||
backoff *= 1 + backoffJitter*(rand.Float64()*2-1)
|
||||
if backoff < 0 {
|
||||
return 0
|
||||
}
|
||||
return time.Duration(backoff)
|
||||
}
|
||||
|
||||
// SupportPackageIsVersion1 is referenced from generated protocol buffer files
|
||||
// to assert that that code is compatible with this version of the grpc package.
|
||||
//
|
||||
|
|
|
|||
|
|
@ -196,6 +196,28 @@ func (f *inFlow) onData(n uint32) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// adjustConnPendingUpdate increments the connection level pending updates by n.
|
||||
// This is called to make the proper connection level window updates when
|
||||
// receiving data frame targeting the canceled RPCs.
|
||||
func (f *inFlow) adjustConnPendingUpdate(n uint32) (uint32, error) {
|
||||
if n == 0 || f.conn != nil {
|
||||
return 0, nil
|
||||
}
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
if f.pendingData+f.pendingUpdate+n > f.limit {
|
||||
return 0, ConnectionErrorf("received %d-bytes data exceeding the limit %d bytes", f.pendingData+f.pendingUpdate+n, f.limit)
|
||||
}
|
||||
f.pendingUpdate += n
|
||||
if f.pendingUpdate >= f.limit/4 {
|
||||
ret := f.pendingUpdate
|
||||
f.pendingUpdate = 0
|
||||
return ret, nil
|
||||
}
|
||||
return 0, nil
|
||||
|
||||
}
|
||||
|
||||
// connOnRead updates the connection level states when the application consumes data.
|
||||
func (f *inFlow) connOnRead(n uint32) uint32 {
|
||||
if n == 0 || f.conn != nil {
|
||||
|
|
|
|||
|
|
@ -236,9 +236,9 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
|
|||
var timeout time.Duration
|
||||
if dl, ok := ctx.Deadline(); ok {
|
||||
timeout = dl.Sub(time.Now())
|
||||
if timeout <= 0 {
|
||||
return nil, ContextErr(context.DeadlineExceeded)
|
||||
}
|
||||
}
|
||||
if err := ctx.Err(); err != nil {
|
||||
return nil, ContextErr(err)
|
||||
}
|
||||
pr := &peer.Peer{
|
||||
Addr: t.conn.RemoteAddr(),
|
||||
|
|
@ -571,11 +571,19 @@ func (t *http2Client) updateWindow(s *Stream, n uint32) {
|
|||
|
||||
func (t *http2Client) handleData(f *http2.DataFrame) {
|
||||
// Select the right stream to dispatch.
|
||||
size := len(f.Data())
|
||||
s, ok := t.getStream(f)
|
||||
if !ok {
|
||||
cwu, err := t.fc.adjustConnPendingUpdate(uint32(size))
|
||||
if err != nil {
|
||||
t.notifyError(err)
|
||||
return
|
||||
}
|
||||
if cwu > 0 {
|
||||
t.controlBuf.put(&windowUpdate{0, cwu})
|
||||
}
|
||||
return
|
||||
}
|
||||
size := len(f.Data())
|
||||
if size > 0 {
|
||||
if err := s.fc.onData(uint32(size)); err != nil {
|
||||
if _, ok := err.(ConnectionError); ok {
|
||||
|
|
|
|||
|
|
@ -318,11 +318,20 @@ func (t *http2Server) updateWindow(s *Stream, n uint32) {
|
|||
|
||||
func (t *http2Server) handleData(f *http2.DataFrame) {
|
||||
// Select the right stream to dispatch.
|
||||
size := len(f.Data())
|
||||
s, ok := t.getStream(f)
|
||||
if !ok {
|
||||
cwu, err := t.fc.adjustConnPendingUpdate(uint32(size))
|
||||
if err != nil {
|
||||
grpclog.Printf("transport: http2Server %v", err)
|
||||
t.Close()
|
||||
return
|
||||
}
|
||||
if cwu > 0 {
|
||||
t.controlBuf.put(&windowUpdate{0, cwu})
|
||||
}
|
||||
return
|
||||
}
|
||||
size := len(f.Data())
|
||||
if size > 0 {
|
||||
if err := s.fc.onData(uint32(size)); err != nil {
|
||||
if _, ok := err.(ConnectionError); ok {
|
||||
|
|
|
|||
Loading…
Reference in New Issue